[
  {
    "path": ".github/FUNDING.yml",
    "content": "# These are supported funding model platforms\n\ngithub: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]\npatreon: # Replace with a single Patreon username\nopen_collective: # Replace with a single Open Collective username\nko_fi:\ntidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel\ncommunity_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry\nliberapay: # Replace with a single Liberapay username\nissuehunt: # Replace with a single IssueHunt username\nlfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry\npolar: # Replace with a single Polar username\nbuy_me_a_coffee: # Replace with a single Buy Me a Coffee username\nthanks_dev: # Replace with a single thanks.dev username\ncustom: ['https://github.com/0x676e67/0x676e67/blob/main/SPONSOR.md']\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/bug_report.md",
    "content": "---\nname: Bug report\nabout: Create a report to help us improve\ntitle: ''\nlabels: ''\nassignees: ''\n\n---\n\n**Describe the bug**\nA clear and concise description of what the bug is.\n\n**To Reproduce**\nSteps to reproduce the behavior:\n1. Go to '...'\n2. Click on '....'\n3. Scroll down to '....'\n4. See error\n\n**Expected behavior**\nA clear and concise description of what you expected to happen.\n\n**Screenshots**\nIf applicable, add screenshots to help explain your problem.\n\n**Desktop (please complete the following information):**\n - OS: [e.g. iOS]\n - Browser [e.g. chrome, safari]\n - Version [e.g. 22]\n\n**Smartphone (please complete the following information):**\n - Device: [e.g. iPhone6]\n - OS: [e.g. iOS8.1]\n - Browser [e.g. stock browser, safari]\n - Version [e.g. 22]\n\n**Additional context**\nAdd any other context about the problem here.\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/feature_request.md",
    "content": "---\nname: Feature request\nabout: Suggest an idea for this project\ntitle: ''\nlabels: ''\nassignees: ''\n\n---\n\n**Is your feature request related to a problem? Please describe.**\nA clear and concise description of what the problem is. Ex. I'm always frustrated when [...]\n\n**Describe the solution you'd like**\nA clear and concise description of what you want to happen.\n\n**Describe alternatives you've considered**\nA clear and concise description of any alternative solutions or features you've considered.\n\n**Additional context**\nAdd any other context or screenshots about the feature request here.\n"
  },
  {
    "path": ".github/compilation-guide/build.yml",
    "content": "name: build\n\non:\n  push:\n    branches: [ \"main\" ]\n  pull_request:\n    branches: [ \"main\" ]\n  workflow_dispatch:\n\nenv:\n  CARGO_TERM_COLOR: always\n\njobs:\n  linux:\n    name: Build Linux (GNU)\n    runs-on: ubuntu-latest\n    strategy:\n      matrix:\n        include:\n          - target: x86_64\n            target_triple: x86_64-unknown-linux-gnu\n            apt_packages: \"\"\n            custom_env: {}\n          - target: i686\n            target_triple: i686-unknown-linux-gnu\n            apt_packages: crossbuild-essential-i386\n            custom_env:\n              CC: i686-linux-gnu-gcc\n              CXX: i686-linux-gnu-g++\n              CARGO_TARGET_I686_UNKNOWN_LINUX_GNU_LINKER: i686-linux-gnu-g++\n              RUSTC_LINKER: i686-linux-gnu-g++\n          - target: aarch64\n            target_triple: aarch64-unknown-linux-gnu\n            apt_packages: crossbuild-essential-arm64\n            custom_env:\n              CFLAGS_aarch64_unknown_linux_gnu: -D__ARM_ARCH=8\n              CC: aarch64-linux-gnu-gcc\n              CXX: aarch64-linux-gnu-g++\n              CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER: aarch64-linux-gnu-g++\n              RUSTC_LINKER: aarch64-linux-gnu-g++\n          - target: armv7\n            target_triple: armv7-unknown-linux-gnueabihf\n            apt_packages: crossbuild-essential-armhf\n            custom_env:\n              CC: arm-linux-gnueabihf-gcc\n              CXX: arm-linux-gnueabihf-g++\n              CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_LINKER: arm-linux-gnueabihf-g++\n              RUSTC_LINKER: arm-linux-gnueabihf-g++\n    steps:\n      - uses: actions/checkout@v4\n\n      - name: Install base dependencies on Ubuntu\n        run: |\n          sudo apt-get update\n          sudo apt-get install -y build-essential cmake perl pkg-config libclang-dev musl-tools\n\n      - name: Install target-specific APT dependencies\n        if: ${{ matrix.apt_packages != '' }}\n        run: |\n          sudo apt-get update\n          sudo apt-get install -y ${{ matrix.apt_packages }}\n\n      - name: Add Rust target\n        run: rustup target add ${{ matrix.target_triple }}\n\n      - name: Build for ${{ matrix.target }}\n        env: ${{ matrix.custom_env }}\n        run: cargo build --release --target ${{ matrix.target_triple }}\n\n      - name: Archive build artifacts\n        run: |\n          cd target/${{ matrix.target_triple }}/release\n          zip -r ../../../build-linux-${{ matrix.target }}.zip *\n        working-directory: ${{ github.workspace }}\n\n      - name: Upload build artifact\n        uses: actions/upload-artifact@v4\n        with:\n          name: build-linux-${{ matrix.target }}\n          path: build-linux-${{ matrix.target }}.zip\n          retention-days: 1\n\n  musllinux:\n    name: Build Linux (musl)\n    runs-on: ubuntu-latest\n    strategy:\n      matrix:\n        include:\n          - target: x86_64\n            target_triple: x86_64-unknown-linux-musl\n            package: x86_64-linux-musl-cross\n            apt_packages: \"\"\n            custom_env:\n              CC: x86_64-linux-musl-gcc\n              CXX: x86_64-linux-musl-g++\n              CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_LINKER: x86_64-linux-musl-g++\n              RUSTC_LINKER: x86_64-linux-musl-g++\n          - target: aarch64\n            target_triple: aarch64-unknown-linux-musl\n            package: aarch64-linux-musl-cross\n            apt_packages: crossbuild-essential-arm64\n            custom_env:\n              CC: aarch64-linux-musl-gcc\n              CXX: aarch64-linux-musl-g++\n              CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_LINKER: aarch64-linux-musl-g++\n              RUSTC_LINKER: aarch64-linux-musl-g++\n          - target: i686\n            target_triple: i686-unknown-linux-musl\n            package: i686-linux-musl-cross\n            apt_packages: crossbuild-essential-i386\n            custom_env:\n              CC: i686-linux-musl-gcc\n              CXX: i686-linux-musl-g++\n              CARGO_TARGET_I686_UNKNOWN_LINUX_MUSL_LINKER: i686-linux-musl-g++\n              RUSTC_LINKER: i686-linux-musl-g++\n          - target: armv7\n            target_triple: armv7-unknown-linux-musleabihf\n            package: armv7l-linux-musleabihf-cross\n            apt_packages: crossbuild-essential-armhf\n            custom_env:\n              CC: armv7l-linux-musleabihf-gcc\n              CXX: armv7l-linux-musleabihf-g++\n              CARGO_TARGET_ARMV7_UNKNOWN_LINUX_MUSLEABIHF_LINKER: armv7l-linux-musleabihf-g++\n              RUSTC_LINKER: armv7l-linux-musleabihf-g++\n\n    steps:\n      - uses: actions/checkout@v4\n\n      - name: Install base dependencies on Ubuntu\n        run: |\n          sudo apt-get update\n          sudo apt-get install -y build-essential cmake perl pkg-config libclang-dev musl-tools\n\n      - name: Install target-specific APT dependencies\n        if: ${{ matrix.apt_packages != '' }}\n        run: |\n          sudo apt-get update\n          sudo apt-get install -y ${{ matrix.apt_packages }}\n\n      - name: Prepare musl cross-compiler\n        run: |\n          wget https://github.com/musl-cc/musl.cc/releases/latest/download/${{ matrix.package }}.tgz\n          tar xzf ${{ matrix.package }}.tgz -C /opt\n          echo \"/opt/${{ matrix.package }}/bin/\" >> $GITHUB_PATH\n\n      - name: Add Rust target\n        run: rustup target add ${{ matrix.target_triple }}\n\n      - name: Build for ${{ matrix.target }}\n        env: ${{ matrix.custom_env }}\n        run: cargo build --release --target ${{ matrix.target_triple }}\n\n      - name: Archive build artifacts\n        run: |\n          cd target/${{ matrix.target_triple }}/release\n          zip -r ../../../build-musllinux-${{ matrix.target }}.zip *\n        working-directory: ${{ github.workspace }}\n\n      - name: Upload build artifact\n        uses: actions/upload-artifact@v4\n        with:\n          name: build-musllinux-${{ matrix.target }}\n          path: build-musllinux-${{ matrix.target }}.zip\n          retention-days: 1\n\n  windows:\n    name: Build Windows\n    runs-on: windows-latest\n    strategy:\n      matrix:\n        include:\n          - target: x86_64\n            target_triple: x86_64-pc-windows-msvc\n          - target: i686\n            target_triple: i686-pc-windows-msvc\n    steps:\n      - uses: actions/checkout@v4\n\n      - name: Install dependencies on Windows\n        run: |\n          choco install cmake -y\n          choco install strawberryperl -y\n          choco install pkgconfiglite -y\n          choco install llvm -y\n          choco install nasm -y\n        shell: cmd\n\n      - name: Build on Windows with Static Linking\n        env:\n          RUSTFLAGS: \"-C target-feature=+crt-static\"\n        run: cargo build --release --target ${{ matrix.target_triple }}\n\n      - name: Archive build artifacts\n        shell: pwsh\n        run: |\n          Compress-Archive -Path 'target\\${{ matrix.target_triple }}\\release\\*' -DestinationPath \"build-windows-${{ matrix.target }}.zip\" -CompressionLevel Optimal -Force\n        working-directory: ${{ github.workspace }}\n\n      - name: Upload build artifact\n        uses: actions/upload-artifact@v4\n        with:\n          name: build-windows-${{ matrix.target }}\n          path: build-windows-${{ matrix.target }}.zip\n          retention-days: 1\n\n  macos:\n    name: Build macOS\n    strategy:\n      matrix:\n        include:\n          - target: x86_64\n            runner: macos-latest\n            target_triple: x86_64-apple-darwin\n          - target: aarch64\n            runner: macos-latest\n            target_triple: aarch64-apple-darwin\n    runs-on: ${{ matrix.runner }}\n    steps:\n      - uses: actions/checkout@v4\n\n      - name: Install dependencies on macOS\n        run: |\n          brew update\n          brew install --formula cmake pkg-config llvm\n\n      - name: Add Rust target\n        run: rustup target add ${{ matrix.target_triple }}\n\n      - name: Build for ${{ matrix.target }}\n        run: cargo build --release --target ${{ matrix.target_triple }}\n\n      - name: Archive build artifacts\n        run: |\n          cd target/${{ matrix.target_triple }}/release\n          zip -r ../../../build-macos-${{ matrix.target }}.zip *\n        working-directory: ${{ github.workspace }}\n\n      - name: Upload build artifact\n        uses: actions/upload-artifact@v4\n        with:\n          name: build-macos-${{ matrix.target }}\n          path: build-macos-${{ matrix.target }}.zip\n          retention-days: 1\n"
  },
  {
    "path": ".github/dependabot.yml",
    "content": "# To get started with Dependabot version updates, you'll need to specify which\n# package ecosystems to update and where the package manifests are located.\n# Please see the documentation for all configuration options:\n# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates\n\nversion: 2\nupdates:\n  - package-ecosystem: \"github-actions\"\n    # Workflow files stored in the\n    # default location of `.github/workflows`\n    directory: \"/\"\n    schedule:\n      interval: \"weekly\"\n  - package-ecosystem: \"cargo\"\n    directory: \"/\"\n    schedule:\n      interval: \"weekly\"\n    # todo: if only this worked, see https://github.com/dependabot/dependabot-core/issues/4009\n    # only tell us if there's a new 'breaking' change we could upgrade to\n    # versioning-strategy: increase-if-necessary\n    # disable regular version updates, security updates are unaffected\n    open-pull-requests-limit: 0\n"
  },
  {
    "path": ".github/workflows/ci.yml",
    "content": "name: CI\n\non:\n  push:\n    tags: [\"v*\"]\n  pull_request:\n    paths-ignore:\n      - 'docs/**'\n      - '*.md'\n      - '.github/**'\n      - 'README.md'\n  workflow_dispatch:\n\nconcurrency:\n  group: ${{ github.workflow }}-${{ github.ref_name }}-${{ github.event.pull_request.number || github.sha }}\n  cancel-in-progress: true\n\npermissions:\n  contents: write\n  packages: write\n\njobs:\n  style:\n    name: Style\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v6\n      - uses: actions-rs/toolchain@v1\n        with:\n          toolchain: stable\n          override: true\n          components: rustfmt, clippy\n      - name: Style check\n        run: cargo fmt --all -- --check\n      - name: Clippy check\n        run: cargo clippy --all-targets --all-features -- -D warnings\n\n  docs:\n    name: Docs\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v6\n      - uses: actions-rs/toolchain@v1\n        with:\n          toolchain: stable\n          override: true\n      - name: Build docs\n        run: cargo doc --document-private-items --all-features\n\n  hack:\n    name: Hack\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v6\n      - uses: actions-rs/toolchain@v1\n        with:\n          toolchain: stable\n          override: true\n      - uses: Swatinem/rust-cache@v2\n      - name: Install cargo-hack from crates.io\n        uses: baptiste0928/cargo-install@v3\n        with:\n          crate: cargo-hack\n      - name: Run hack script\n        run: cargo hack check --each-feature\n\n  msrv:\n    name: MSRV\n    needs: [style]\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v6\n      - uses: dtolnay/rust-toolchain@stable\n      - name: Resolve MSRV aware dependencies\n        run: cargo update\n        env:\n          CARGO_RESOLVER_INCOMPATIBLE_RUST_VERSIONS: fallback\n      - name: Get MSRV package metadata\n        id: metadata\n        run: cargo metadata --no-deps --format-version 1 | jq -r '\"msrv=\" + .packages[0].rust_version' >> $GITHUB_OUTPUT\n      - name: Install rust (${{ steps.metadata.outputs.msrv }})\n        uses: dtolnay/rust-toolchain@master\n        with:\n          toolchain: ${{ steps.metadata.outputs.msrv }}\n      - uses: Swatinem/rust-cache@v2\n      - name: Check\n        run: cargo check\n\n  test:\n    name: Test\n    runs-on: ubuntu-latest\n    strategy:\n      fail-fast: false\n      matrix:\n        feature-set:\n          - \"--all-features\"\n          - \"--no-default-features\"\n          - \"--no-default-features --features webpki-roots\"\n          - \"--no-default-features --features form\"\n          - \"--no-default-features --features query\"\n          - \"--features cookies\"\n          - \"--features gzip,stream\"\n          - \"--features brotli,stream\"\n          - \"--features zstd,stream\"\n          - \"--features deflate,stream\"\n          - \"--features json\"\n          - \"--features multipart\"\n          - \"--features stream\"\n          - \"--features hickory-dns\"\n    steps:\n      - uses: actions/checkout@v6\n      - uses: actions-rs/toolchain@v1\n        with:\n          toolchain: stable\n          override: true\n      - uses: Swatinem/rust-cache@v2\n      - uses: taiki-e/install-action@v2\n        with:\n          tool: cargo-nextest\n      - name: Run tests (${{ matrix.feature-set }})\n        run: cargo nextest run --workspace ${{ matrix.feature-set }}\n\n  build:\n    name: Build (${{ matrix.env }})\n    runs-on: ${{ matrix.os }}\n    environment: ${{ matrix.env }}\n    strategy:\n      matrix:\n        include:\n          - os: ubuntu-latest\n            env: Linux\n          - os: windows-latest\n            env: Windows\n          - os: macos-latest\n            env: macOS\n          - os: ubuntu-latest\n            env: Android\n    steps:\n      - uses: actions/checkout@v6\n      - uses: actions-rs/toolchain@v1\n        with:\n          toolchain: stable\n          override: true\n      - name: Install NASM (Windows)\n        if: matrix.os == 'windows-latest'\n        run: choco install nasm -y\n      - name: Build\n        if: matrix.env != 'Android'\n        run: cargo build --all-features\n      - name: Add Android targets\n        if: matrix.env == 'Android'\n        run: rustup target add aarch64-linux-android x86_64-linux-android\n      - uses: nttld/setup-ndk@v1.6.0\n        if: matrix.env == 'Android'\n        id: setup-ndk\n        with:\n          ndk-version: r27c\n          add-to-path: true\n      - name: Build with cargo-ndk\n        if: matrix.env == 'Android'\n        env:\n          ANDROID_NDK_HOME: ${{ steps.setup-ndk.outputs.ndk-path }}\n          ANDROID_NDK_ROOT: ${{ steps.setup-ndk.outputs.ndk-path }}\n        run: |\n          cargo install cargo-ndk\n          cargo ndk -t arm64-v8a -t x86_64 build --all-features\n\n  release:\n    name: Release\n    needs: [style, test, docs, hack, msrv, build]\n    runs-on: ubuntu-latest\n    environment: Release\n    if: startsWith(github.ref, 'refs/tags/')\n    steps:\n      - uses: actions/checkout@v6\n      - uses: actions-rs/toolchain@v1\n        with:\n          toolchain: stable\n          override: true\n      - uses: katyo/publish-crates@v2\n        with:\n          registry-token: ${{ secrets.CARGO_REGISTRY_TOKEN }}\n          ignore-unpublished-changes: true\n      - name: Upload binaries to GitHub Release\n        uses: softprops/action-gh-release@v3\n        with:\n          token: ${{ secrets.GITHUB_TOKEN }}\n          prerelease: ${{ contains(github.ref, 'alpha') || contains(github.ref, 'beta') }}\n          generate_release_notes: true"
  },
  {
    "path": ".gitignore",
    "content": "# Generated by Cargo\n# will have compiled files and executables\ntarget\nCargo.lock\n*.swp\n.history\n.vscode\n.direnv\nresult\ncurl\n**/*.rs.bk\n/.DS_Store\nkeylog.txt\n*.json\n.zed\n*.log\n\n# These are backup files generated by rustfmt\n**/*.rs.bk\n\n# MSVC Windows builds of rustc generate these, which store debugging information\n*.pdb\n\n# Generated by cargo mutants\n# Contains mutation testing data\n**/mutants.out*/\n\n# RustRover\n#  JetBrains specific template is maintained in a separate JetBrains.gitignore that can\n#  be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore\n#  and can be added to the global gitignore or merged into this file.  For a more nuclear\n#  option (not recommended) you can uncomment the following to ignore the entire idea folder.\n.idea/\n"
  },
  {
    "path": "Cargo.toml",
    "content": "[package]\nname = \"wreq\"\nversion = \"6.0.0-rc.28\"\ndescription = \"An ergonomic Rust HTTP Client with TLS fingerprint\"\nkeywords = [\"http\", \"client\", \"websocket\", \"ja3\", \"ja4\"]\ncategories = [\"web-programming::http-client\"]\nrepository = \"https://github.com/0x676e67/wreq\"\ndocumentation = \"https://docs.rs/wreq\"\nauthors = [\"0x676e67 <gngppz@gmail.com>\"]\nreadme = \"README.md\"\nlicense = \"Apache-2.0\"\nedition = \"2024\"\nrust-version = \"1.85\"\ninclude = [\"README.md\", \"LICENSE\", \"src/**/*.rs\"]\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\ntargets = [\"x86_64-unknown-linux-gnu\"]\n\n[features]\ndefault = [\"webpki-roots\"]\n\n# Enable support for decoding text.\ncharset = [\"dep:encoding_rs\", \"dep:mime\"]\n\n# Enable cookies store support.\ncookies = [\"dep:cookie\"]\n\n# Enable gzip decompression support.\ngzip = [\"dep:tower-http\", \"tower-http?/decompression-gzip\"]\n\n# Enable brotli decompression support.\nbrotli = [\"dep:tower-http\", \"tower-http?/decompression-br\"]\n\n# Enable zstd decompression support.\nzstd = [\"dep:tower-http\", \"tower-http?/decompression-zstd\"]\n\n# Enable deflate decompression support.\ndeflate = [\"dep:tower-http\", \"tower-http?/decompression-deflate\"]\n\n# Enable URL query string serialization support.\nquery = [\"dep:serde\", \"dep:serde_html_form\"]\n\n# Enable x-www-form-urlencoded form support.\nform = [\"dep:serde\", \"dep:serde_html_form\"]\n\n# Enable JSON support.\njson = [\"dep:serde\", \"dep:serde_json\"]\n\n# Enable multipart/form-data support.\nmultipart = [\"dep:mime_guess\", \"dep:sync_wrapper\", \"sync_wrapper?/futures\"]\n\n# Enable hickory DNS resolver.\nhickory-dns = [\"dep:hickory-resolver\"]\n\n# Enable streaming support.\nstream = [\"tokio/fs\", \"dep:sync_wrapper\", \"sync_wrapper?/futures\"]\n\n# Enable SOCKS/4/5 proxy support.\nsocks = [\"dep:tokio-socks\"]\n\n# Enable WebSocket support.\nws = [\"dep:tokio-tungstenite\", \"tokio-tungstenite?/handshake\"]\n\n# Enable webpki-roots for TLS certificate validation.\nwebpki-roots = [\"dep:webpki-root-certs\"]\n\n# Use the system's proxy configuration.\nsystem-proxy = [\"dep:system-configuration\", \"dep:windows-registry\"]\n\n# Enable tracing logging.\ntracing = [\"dep:tracing\", \"http2/tracing\", \"tracing?/std\"]\n\n# Enables the `parking_lot` crate for synchronization primitives.\nparking_lot = [\"dep:parking_lot\", \"http2/parking_lot\"]\n\n# Prefix BoringSSL symbols in libcrypto/libssl to avoid linker conflicts\n# when multiple OpenSSL versions coexist in the same process.\nprefix-symbols = [\"btls/prefix-symbols\"]\n\n[dependencies]\npercent-encoding = \"2.3.2\"\nurl = \"2.5.8\"\nbytes = \"1.11.1\"\nhttp = \"1.4.0\"\nhttp2 = { version = \"0.5.16\", features = [\"unstable\"] }\nhttparse = \"1.10.1\"\nhttp-body = \"1.0.1\"\nhttp-body-util = \"0.1.3\"\nwant = \"0.3.1\"\npin-project-lite = \"0.2.17\"\nfutures-util = { version = \"0.3.32\", default-features = false }\nsmallvec = { version = \"1.15.1\", features = [\"const_generics\", \"const_new\"] }\nsocket2 = { version = \"0.6.3\", features = [\"all\"] }\nipnet = \"2.12.0\"\nlru = \"0.17.0\"\nbtls = \"0.5.6\"\nbtls-sys = \"0.5.6\"\ntokio-btls = \"0.5.6\"\ntokio = { version = \"1.52.1\", default-features = false, features = [\n    \"net\",\n    \"time\",\n    \"rt\",\n] }\ntokio-util = { version = \"0.7.18\", default-features = false }\ntower = { version = \"0.5.3\", default-features = false, features = [\n    \"timeout\",\n    \"util\",\n    \"retry\",\n] }\n\n# Optional deps...\n\n## serde\nserde = { version = \"1.0\", optional = true }\nserde_json = { version = \"1.0\", optional = true }\nserde_html_form = { version = \"0.4.0\", optional = true }\n\n## multipart\nmime_guess = { version = \"2.0\", default-features = false, optional = true }\n\n## charset\nencoding_rs = { version = \"0.8\", optional = true }\nmime = { version = \"0.3.17\", optional = true }\n\n## sync wrapper\nsync_wrapper = { version = \"1.0.2\", optional = true }\n\n## webpki root certs\nwebpki-root-certs = { version = \"1.0.7\", optional = true }\n\n## cookies\ncookie = { version = \"0.18\", optional = true }\n\n## tower http\ntower-http = { version = \"0.6.8\", default-features = false, optional = true }\n\n## socks\ntokio-socks = { version = \"0.5.2\", optional = true }\n\n## websocket\ntokio-tungstenite = { version = \"0.29.0\", default-features = false, optional = true }\n\n## hickory-dns\nhickory-resolver = { version = \"0.26.0\", optional = true }\n\n## parking_lot\nparking_lot = { version = \"0.12.5\", optional = true }\n\n## tracing\ntracing = { version = \"0.1\", default-features = false,optional = true }\n\n## windows\n[target.'cfg(windows)'.dependencies]\nwindows-registry = { version = \"0.6.0\", optional = true }\n\n## macOS\n[target.'cfg(target_os = \"macos\")'.dependencies]\nsystem-configuration = { version = \"0.7.0\", optional = true }\n\n## interface binding\n[target.'cfg(unix)'.dependencies]\nlibc = \"0.2.182\"\n\n[dev-dependencies]\nhyper = { version = \"1.7.0\", default-features = false, features = [\n    \"http1\",\n    \"http2\",\n    \"server\",\n] }\nhyper-util = { version = \"0.1.20\", features = [\n    \"http1\",\n    \"http2\",\n    \"server-auto\",\n    \"server-graceful\",\n    \"tokio\",\n] }\nserde = { version = \"1.0\", features = [\"derive\"] }\ntokio = { version = \"1.0\", default-features = false, features = [\n    \"macros\",\n    \"rt-multi-thread\",\n] }\nfutures = { version = \"0.3.0\", default-features = false, features = [\"std\"] }\ntower = { version = \"0.5.2\", default-features = false, features = [\"limit\"] }\ntokio-test = \"0.4.5\"\ntracing = \"0.1\"\ntracing-subscriber = \"0.3.20\"\npretty_env_logger = \"0.5\"\nbrotli = \"8.0.2\"\nflate2 = \"1.1.9\"\nzstd = \"0.13.3\"\n\n# for benchmarks\nsysinfo = { version = \"0.38.2\", default-features = false, features = [\"system\"] }\ncriterion = { version = \"0.8.2\", features = [\"async_tokio\"] }\nreqwest = { version = \"0.13\", default-features = false, features = [\"rustls\", \"stream\", \"http2\"] }\n\n[profile.bench]\nopt-level = 3\ncodegen-units = 1\nincremental = false\n\n[[bench]]\nname = \"http1\"\npath = \"bench/http1.rs\"\nharness = false\nrequired-features = [\"stream\"]\n\n[[bench]]\nname = \"http2\"\npath = \"bench/http2.rs\"\nharness = false\nrequired-features = [\"stream\"]\n\n[[bench]]\nname = \"http1_over_tls\"\npath = \"bench/http1_over_tls.rs\"\nharness = false\nrequired-features = [\"stream\"]\n\n[[bench]]\nname = \"http2_over_tls\"\npath = \"bench/http2_over_tls.rs\"\nharness = false\nrequired-features = [\"stream\"]\n\n[[test]]\nname = \"cookie\"\npath = \"tests/cookie.rs\"\nrequired-features = [\"cookies\"]\n\n[[test]]\nname = \"gzip\"\npath = \"tests/gzip.rs\"\nrequired-features = [\"gzip\", \"stream\"]\n\n[[test]]\nname = \"brotli\"\npath = \"tests/brotli.rs\"\nrequired-features = [\"brotli\", \"stream\"]\n\n[[test]]\nname = \"zstd\"\npath = \"tests/zstd.rs\"\nrequired-features = [\"zstd\", \"stream\"]\n\n[[test]]\nname = \"deflate\"\npath = \"tests/deflate.rs\"\nrequired-features = [\"deflate\", \"stream\"]\n\n[[test]]\nname = \"multipart\"\npath = \"tests/multipart.rs\"\nrequired-features = [\"multipart\", \"stream\"]\n\n[[test]]\nname = \"retry\"\npath = \"tests/retry.rs\"\n\n[[example]]\nname = \"json_dynamic\"\npath = \"examples/json_dynamic.rs\"\nrequired-features = [\"json\"]\n\n[[example]]\nname = \"json_typed\"\npath = \"examples/json_typed.rs\"\nrequired-features = [\"json\"]\n\n[[example]]\nname = \"tor_socks\"\npath = \"examples/tor_socks.rs\"\nrequired-features = [\"socks\"]\n\n[[example]]\nname = \"form\"\npath = \"examples/form.rs\"\nrequired-features = [\"form\"]\n\n[[example]]\nname = \"connect_via_lower_priority_tokio_runtime\"\npath = \"examples/connect_via_lower_priority_tokio_runtime.rs\"\nrequired-features = [\"tracing\"]\n\n[[example]]\nname = \"emulate\"\npath = \"examples/emulate.rs\"\nrequired-features = [\"gzip\", \"brotli\", \"zstd\", \"deflate\", \"tracing\"]\n\n[[example]]\nname = \"cert_store\"\npath = \"examples/cert_store.rs\"\nrequired-features = [\"webpki-roots\"]\n\n[[example]]\nname = \"request_with_redirect\"\npath = \"examples/request_with_redirect.rs\"\n\n[[example]]\nname = \"request_with_version\"\npath = \"examples/request_with_version.rs\"\n\n[[example]]\nname = \"request_with_proxy\"\npath = \"examples/request_with_proxy.rs\"\nrequired-features = [\"socks\"]\n\n[[example]]\nname = \"request_with_emulate\"\npath = \"examples/request_with_emulate.rs\"\nrequired-features = [\"gzip\", \"brotli\", \"zstd\", \"deflate\", \"tracing\"]\n\n[[example]]\nname = \"request_with_local_address\"\npath = \"examples/request_with_local_address.rs\"\n\n[[example]]\nname = \"request_with_interface\"\npath = \"examples/request_with_interface.rs\"\n\n[[example]]\nname = \"http1_websocket\"\npath = \"examples/http1_websocket.rs\"\nrequired-features = [\"ws\", \"futures-util/std\"]\n\n[[example]]\nname = \"http2_websocket\"\npath = \"examples/http2_websocket.rs\"\nrequired-features = [\"ws\", \"futures-util/std\"]\n\n[[example]]\nname = \"keylog\"\npath = \"examples/keylog.rs\"\n\n[[example]]\nname = \"unix_socket\"\npath = \"examples/unix_socket.rs\"\n\n"
  },
  {
    "path": "LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright 2016 Sean McArthur\n   Copyright 2026 0x676e67 <gngppz@gmail.com>\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "README.md",
    "content": "# wreq\n\n[![CI](https://github.com/0x676e67/wreq/actions/workflows/ci.yml/badge.svg)](https://github.com/0x676e67/wreq/actions/workflows/ci.yml)\n[![Crates.io License](https://img.shields.io/crates/l/wreq)](https://github.com/0x676e67/wreq/blob/main/LICENSE)\n[![Crates.io MSRV](https://img.shields.io/crates/msrv/wreq?logo=rust)](https://crates.io/crates/wreq)\n[![crates.io](https://img.shields.io/crates/v/wreq.svg?logo=rust)](https://crates.io/crates/wreq)\n[![Discord chat][discord-badge]][discord-url]\n\n[discord-badge]: https://img.shields.io/discord/1486741856397164788.svg?logo=discord\n[discord-url]: https://discord.gg/rfbvyFkgq3\n\n> 🚀 Help me work seamlessly with open source sharing by [sponsoring me on GitHub](https://github.com/0x676e67/0x676e67/blob/main/SPONSOR.md)\n\nAn ergonomic and modular Rust HTTP Client for high-fidelity protocol matching, featuring customizable TLS, JA3/JA4, and HTTP/2 signature capabilities.\n\n## Features\n\n- Plain bodies, JSON, urlencoded, multipart\n- HTTP Trailer\n- Cookie Store\n- Redirect Policy\n- Original Header\n- Rotating Proxies\n- Tower Middleware\n- WebSocket Upgrade\n- HTTPS via BoringSSL\n- HTTP/2 over TLS Parity\n- Certificate Store (CAs & mTLS)\n\n## Example\n\nThe following example uses the [Tokio](https://tokio.rs) runtime with optional features enabled by adding this to your `Cargo.toml`:\n\n```toml\n[dependencies]\ntokio = { version = \"1\", features = [\"full\"] }\nwreq = \"6.0.0-rc.28\"\nwreq-util = \"3.0.0-rc.10\"\n```\n\nAnd then the code:\n\n```rust\nuse wreq::Client;\nuse wreq_util::Emulation;\n\n#[tokio::main]\nasync fn main() -> wreq::Result<()> {\n    // Build a client\n    let client = Client::builder()\n        .emulation(Emulation::Safari26)\n        .build()?;\n\n    // Use the API you're already familiar with\n    let resp = client.get(\"https://tls.peet.ws/api/all\").send().await?;\n    println!(\"{}\", resp.text().await?);\n    Ok(())\n}\n```\n\n## Behavior\n\n- **HTTP/1 over TLS**\n\nIn the Rust ecosystem, most HTTP clients rely on the [http](https://github.com/hyperium/http) library, which performs well but does not preserve header case. This causes some **WAFs** to reject **HTTP/1** requests with lowercase headers (see [discussion](https://github.com/seanmonstar/reqwest/discussions/2227)). **wreq** addresses this by fully supporting **HTTP/1** header case sensitivity.\n\n- **HTTP/2 over TLS**\n\nDue to the complexity of **TLS** encryption and the widespread adoption of **HTTP/2**, browser fingerprints such as **JA3**, **JA4**, and **Akamai** cannot be reliably emulated using simple fingerprint strings. Instead of parsing and emulating these string-based fingerprints, **wreq** provides fine-grained control over **TLS** and **HTTP/2** extensions and settings for precise browser behavior emulation.\n\n- **Device Emulation**\n\n**TLS** and **HTTP/2** fingerprints are often identical across various browser models because these underlying protocols evolve slower than browser release cycles. **100+ browser device emulation profiles** are maintained in [wreq-util](https://github.com/0x676e67/wreq-util).\n\n## Building\n\nCompiling alongside **openssl-sys** can cause symbol conflicts with **boringssl** that lead to [link failures](https://github.com/cloudflare/boring/issues/197), and on **Linux** and **Android** this can be avoided by enabling the **`prefix-symbols`** feature.\n\nInstall [BoringSSL build dependencies](https://github.com/google/boringssl/blob/master/BUILDING.md#build-prerequisites) and build with:\n\n```bash\nsudo apt-get install build-essential cmake perl pkg-config libclang-dev musl-tools git -y\ncargo build --release\n```\n\nThis GitHub Actions [workflow](.github/compilation-guide/build.yml) can be used to compile the project on **Linux**, **Windows**, and **macOS**.\n\n## Services\n\nHelp sustain the ongoing development of this open-source project by reaching out for [commercial support](mailto:gngppz@gmail.com). Receive private guidance, expert reviews, or direct access to the maintainer, with personalized technical assistance tailored to your needs.\n\n## License\n\nLicensed under either of Apache License, Version 2.0 ([LICENSE](./LICENSE) or http://www.apache.org/licenses/LICENSE-2.0).\n\n## Contribution\n\nUnless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the [Apache-2.0](./LICENSE) license, shall be licensed as above, without any additional terms or conditions.\n\n## Sponsors\n\n<a href=\"https://captcha.fun/?utm_source=github&utm_medium=readme&utm_campaign=wreq\" target=\"_blank\"><img src=\"https://www.captcha.fun/banner.jpg\" height=\"47\" width=\"149\"></a>\n\n**Solve reCAPTCHA in less than 2 seconds**\n\n**[Captcha.fun](https://captcha.fun/?utm_source=github&utm_medium=readme&utm_campaign=wreq)** delivers fast, reliable CAPTCHA solving built for automation at scale.\n\nWith simple API integration, consistent performance, and competitive pricing, it's an easy way to keep your workflows moving without delays—use code **`WREQ`** for **10% bonus credits**.\n\n**[Dashboard](https://dash.captcha.fun/)** | **[Docs](http://docs.captcha.fun/)** | **[Discord](https://discord.gg/captchafun)**\n\n---\n\n<a href=\"https://hypersolutions.co/?utm_source=github&utm_medium=readme&utm_campaign=wreq\" target=\"_blank\"><img src=\"https://raw.githubusercontent.com/0x676e67/wreq/main/.github/assets/hypersolutions.jpg\" height=\"47\" width=\"149\"></a>\n\nTLS fingerprinting alone isn't enough for modern bot protection. **[Hyper Solutions](https://hypersolutions.co?utm_source=github&utm_medium=readme&utm_campaign=wreq)** provides the missing piece - API endpoints that generate valid antibot tokens for:\n\n**Akamai** • **DataDome** • **Kasada** • **Incapsula**\n\nNo browser automation. Just simple API calls that return the exact cookies and headers these systems require.\n\n**[Dashboard](https://hypersolutions.co?utm_source=github&utm_medium=readme&utm_campaign=wreq)** | **[Docs](https://docs.justhyped.dev)** | **[Discord](https://discord.gg/akamai)**\n\n## Accolades\n\nA hard fork of [reqwest](https://github.com/seanmonstar/reqwest).\n"
  },
  {
    "path": "RELEASE.md",
    "content": "## [unreleased]\n\n### Features\n\n- *(cookie)* RFC 9113 compliant cookie handling ([#1106](https://github.com/0x676e67/wreq/issues/1106)) - ([81f3adb](https://github.com/0x676e67/wreq/commit/81f3adb85e0fff869439bd4eac48405e78916c9a))\n- *(cookie)* Fill missing domain/path in `get_all` from stored scope ([#1082](https://github.com/0x676e67/wreq/issues/1082)) - ([240d84e](https://github.com/0x676e67/wreq/commit/240d84eab5eb4df8548933ee2c13337d86e1afe1))\n- *(multipart)* Add Form::set_boundary for custom boundaries ([#1094](https://github.com/0x676e67/wreq/issues/1094)) - ([30adda1](https://github.com/0x676e67/wreq/commit/30adda14d21824f5b6c8b7817d0da76a4876b007))\n- *(tls)* Allow pluggable TLS session cache ([#1101](https://github.com/0x676e67/wreq/issues/1101)) - ([98c1306](https://github.com/0x676e67/wreq/commit/98c130643afca83b15811d42466011900d672bc4))\n\n### Bug Fixes\n\n- *(bench)* Fix CPU sysinfo reading in benchmark ([#1080](https://github.com/0x676e67/wreq/issues/1080)) - ([7882497](https://github.com/0x676e67/wreq/commit/78824973f82de07f86528a4e5df1cf99f313d325))\n- *(http2)* Prevent panic when calling to_str on non-UTF8 headers ([#1070](https://github.com/0x676e67/wreq/issues/1070)) - ([2aa4b16](https://github.com/0x676e67/wreq/commit/2aa4b1601ec22aea0ef5eb1b97e566a217194351))\n- *(rt)* Support fake time in legacy client and TokioTimer ([#1064](https://github.com/0x676e67/wreq/issues/1064)) - ([29acebc](https://github.com/0x676e67/wreq/commit/29acebcdc16b1cec24f0547e6d381e512322edd9))\n- *(tcp)* Restore the missing TCP nodelay setting ([#1102](https://github.com/0x676e67/wreq/issues/1102)) - ([7ea12ed](https://github.com/0x676e67/wreq/commit/7ea12ede38a6617772cf5b66342d3b6f9c2ff7cb))\n- Disable Nagle's algorithm to resolve HTTP/2 performance dip ([#1074](https://github.com/0x676e67/wreq/issues/1074)) - ([8f45ef4](https://github.com/0x676e67/wreq/commit/8f45ef41eb5738d07947e6b78917488680332213))\n\n### Refactor\n\n- *(conn)* Modular connector component ([#1100](https://github.com/0x676e67/wreq/issues/1100)) - ([6cf1279](https://github.com/0x676e67/wreq/commit/6cf1279d4a0b40075942692687c967b5da4292c7))\n- *(multipart)* Streamline legacy Form implementation - ([45df222](https://github.com/0x676e67/wreq/commit/45df2228715df1ecbe8e35866f1ec3a82cd4e106))\n- *(pool)* Redesign emulation and pool ID strategy ([#1103](https://github.com/0x676e67/wreq/issues/1103)) - ([c12f3a0](https://github.com/0x676e67/wreq/commit/c12f3a0d8e6dfd4536acb46bf2d318b2cd022aac))\n- *(tls)* Decouple TLS backend logic into sub-modules ([#1105](https://github.com/0x676e67/wreq/issues/1105)) - ([c7a7e3c](https://github.com/0x676e67/wreq/commit/c7a7e3c94a40368894d4a63a959eb633c3a292f1))\n- *(tls)* Expose certificate compression APIs ([#1085](https://github.com/0x676e67/wreq/issues/1085)) - ([8429954](https://github.com/0x676e67/wreq/commit/842995411c9262b04260137c588084340e59133e))\n\n### Documentation\n\n- *(hash)* Simplify documentation for `HashMemo` creation ([#1076](https://github.com/0x676e67/wreq/issues/1076)) - ([fe85f5d](https://github.com/0x676e67/wreq/commit/fe85f5d8972322fe76fdea8317563c730cce319f))\n- Remove deprecated doc_cfg feature conditionally - ([29da566](https://github.com/0x676e67/wreq/commit/29da5662789a9ef8092943a29912dbb77cdde275))\n- Clarify symbol conflict with OpenSSL ([#1068](https://github.com/0x676e67/wreq/issues/1068)) - ([ee2f9f0](https://github.com/0x676e67/wreq/commit/ee2f9f0cf0ab1faf6f56f85ca1a582f576c5f56f))\n\n### Performance\n\n- *(bench)* Optimize benchmark server ([#1073](https://github.com/0x676e67/wreq/issues/1073)) - ([bd8cd36](https://github.com/0x676e67/wreq/commit/bd8cd36084b0367a35d949355b12d5224ea800c0))\n- *(buf)* Make `BufList::remaining` O(1) by caching length ([#1091](https://github.com/0x676e67/wreq/issues/1091)) - ([aaed745](https://github.com/0x676e67/wreq/commit/aaed745799bddacd91e62d373e64ab753ea2d8ee))\n- *(error)* Hint compiler to inline trivial error-handling functions ([#1061](https://github.com/0x676e67/wreq/issues/1061)) - ([7746f74](https://github.com/0x676e67/wreq/commit/7746f74c3749116a3e2148a59771c8219077e94b))\n- *(http1)* Eliminate `ParserConfig` clones on the HTTP/1.1 request hot path ([#1088](https://github.com/0x676e67/wreq/issues/1088)) - ([9edb950](https://github.com/0x676e67/wreq/commit/9edb95002b121b914dd6cc2f8004f55ba6f2e8bf))\n- *(http2)* Backport and apply hyper client's H2 configuration ([#1063](https://github.com/0x676e67/wreq/issues/1063)) - ([6e2f160](https://github.com/0x676e67/wreq/commit/6e2f160e6ddc9b59a8e3de64fb487f5a47f428e8))\n- *(multipart)* Improve memory layout of `multipart::Form` ([#1095](https://github.com/0x676e67/wreq/issues/1095)) - ([ff44181](https://github.com/0x676e67/wreq/commit/ff4418136e8529a5dedbe008d2dee24441ee232a))\n- *(request)* Static init for common content-type header ([#1060](https://github.com/0x676e67/wreq/issues/1060)) - ([1e45fc5](https://github.com/0x676e67/wreq/commit/1e45fc557721de2d0d483cb00ccc38fe59aeb9a0))\n- *(response)* Hint compiler to inline trivial response-handling functions ([#1062](https://github.com/0x676e67/wreq/issues/1062)) - ([be87bb8](https://github.com/0x676e67/wreq/commit/be87bb85646817cdb6c356ae8efa6eec587fac03))\n\n### Styling\n\n- *(bench)* Fmt code - ([c6e6726](https://github.com/0x676e67/wreq/commit/c6e6726f2f70c19dc898110af1a3b2131379036a))\n- *(request)* Fmt imports for request.rs file - ([2c51823](https://github.com/0x676e67/wreq/commit/2c518232f713827bf3be31c4823d76127566c63a))\n\n### Miscellaneous Tasks\n\n- *(bench)* Update mod benchmark comment - ([f987254](https://github.com/0x676e67/wreq/commit/f987254db8d3f44aa4538bc4436ac7daa8aa608d))\n- *(bench)* Format expected error annotations - ([7131366](https://github.com/0x676e67/wreq/commit/71313662072bad0fa18ed8c0a4d921c7ce706499))\n- *(client)* Fmt code - ([21f27bc](https://github.com/0x676e67/wreq/commit/21f27bc22fb1304cb77ffa52acd3d12bdc56dcfe))\n- *(conn)* Optimize `ConnectionId` cloning ([#1108](https://github.com/0x676e67/wreq/issues/1108)) - ([1a58655](https://github.com/0x676e67/wreq/commit/1a58655420f9b2c771cb433bf2e2a1d0b5158ad5))\n- *(core)* Clear code - ([9411b19](https://github.com/0x676e67/wreq/commit/9411b19d16d1dee6b66657dc681c96c89394fe6f))\n- *(tcp)* Prune redundant local address handling ([#1107](https://github.com/0x676e67/wreq/issues/1107)) - ([6a2f343](https://github.com/0x676e67/wreq/commit/6a2f343d280ecc9e40864a85d9b31d44de84ae36))\n- Fmt code - ([69c7a76](https://github.com/0x676e67/wreq/commit/69c7a76b483695ebeaf8deded1bb74a655d11602))\n- Fmt import - ([e96a759](https://github.com/0x676e67/wreq/commit/e96a7592ad957efd8e0d0cda3d2ccd6406694356))\n- Update comments for compression support dependencies - ([3f154d3](https://github.com/0x676e67/wreq/commit/3f154d323ff71e7b4ad38c44a90373e6a5aa9569))\n- Refactor `Cargo.toml` for clarity and organization - ([b272408](https://github.com/0x676e67/wreq/commit/b27240866ad81f29616186243ac5a49cf0d165b8))\n- Lint core ([#1071](https://github.com/0x676e67/wreq/issues/1071)) - ([6ed8212](https://github.com/0x676e67/wreq/commit/6ed8212248bfd7085b56f3ff4330acb929d066bf))\n- Fix clippy - ([cf29946](https://github.com/0x676e67/wreq/commit/cf2994669b1be87d3fc5555a5a5179acb54d62d5))\n\n### Bench\n\n- Add missing `TokioTimer` to http1 server builder ([#1081](https://github.com/0x676e67/wreq/issues/1081)) - ([cacd004](https://github.com/0x676e67/wreq/commit/cacd0046acb3051e1f227678a17a972a08a841e4))\n- Format benchmark group labels - ([63f9e39](https://github.com/0x676e67/wreq/commit/63f9e3944d358b4fdcf1df73329d43c5632593e4))\n- Improve benchmark test coverage ([#1075](https://github.com/0x676e67/wreq/issues/1075)) - ([ef41eb3](https://github.com/0x676e67/wreq/commit/ef41eb3fc14df26f6e51a979a978c3c8eeb73101))\n- Simplify grouped benchmarks - ([c63ef51](https://github.com/0x676e67/wreq/commit/c63ef51583a463b44c9efce95e80719c5b803070))\n- Include TLS-encrypted scenarios for HTTP/1 and HTTP/2 - ([10dc7fd](https://github.com/0x676e67/wreq/commit/10dc7fddccf1afc1a30f978b6f976d1cf19007ad))\n- Add benchmarks for full and streaming bodies ([#1069](https://github.com/0x676e67/wreq/issues/1069)) - ([0186719](https://github.com/0x676e67/wreq/commit/01867191c4b78cb179980751508e6d1d4ebd685f))\n- Add benchmarks for HTTP/1.1 and HTTP/2 ([#1065](https://github.com/0x676e67/wreq/issues/1065)) - ([71fb97a](https://github.com/0x676e67/wreq/commit/71fb97a6a19065e6655875ee3811deaa9c3ae429))\n\n### Build\n\n- *(deps)* Bump btls from 0.5.3 to 0.5.4 ([#1090](https://github.com/0x676e67/wreq/issues/1090)) - ([7c901db](https://github.com/0x676e67/wreq/commit/7c901db6ea4cce23500af66059851fd81e9c1d54))\n- *(deps)* Replace `ahash` with `foldhash` in `lru` cache ([#1084](https://github.com/0x676e67/wreq/issues/1084)) - ([5c7b411](https://github.com/0x676e67/wreq/commit/5c7b4110a4b6678276218b7d6e43b6762b957ebe))\n- *(deps)* Migrate from `boring2` to `btls` ([#1083](https://github.com/0x676e67/wreq/issues/1083)) - ([2d45542](https://github.com/0x676e67/wreq/commit/2d45542b230397875bd92fbca65389b24e17ca2f))\n- *(deps)* Replace `raw-cpuid` with `sysinfo` implementation ([#1077](https://github.com/0x676e67/wreq/issues/1077)) - ([1ab8770](https://github.com/0x676e67/wreq/commit/1ab87707bb7939d79bd31d9460a79bece97dce8c))\n- *(deps)* Bump nttld/setup-ndk from 1.5.0 to 1.6.0 ([#1072](https://github.com/0x676e67/wreq/issues/1072)) - ([3757645](https://github.com/0x676e67/wreq/commit/3757645801a260bb0db38cdbd12f26a2cc45ea5c))\n- *(deps)* Replace `schnellru` with `lru`  implementation ([#1066](https://github.com/0x676e67/wreq/issues/1066)) - ([13c9586](https://github.com/0x676e67/wreq/commit/13c9586c0951c881312cdb6036a188a20eb5746c))\n\n## New Contributors ❤️\n\n* @sqdshguy made their first contribution in [#1094](https://github.com/0x676e67/wreq/pull/1094)\n\n## [6.0.0-rc.28](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.27..v6.0.0-rc.28) - 2026-02-11\n\n### Bug Fixes\n\n- *(http1)* Use case-insensitive matching for trailer fields ([#1059](https://github.com/0x676e67/wreq/issues/1059)) - ([1b7d57b](https://github.com/0x676e67/wreq/commit/1b7d57bce1fcc7e471ba383a5b0c14fcc926d1de))\n\n### Performance\n\n- *(request)* Reduce overhead by lazy-loading headers for `json`/`form` data ([#1058](https://github.com/0x676e67/wreq/issues/1058)) - ([6992b6f](https://github.com/0x676e67/wreq/commit/6992b6ffd69bf61f710d97d97b436d630e38cbe7))\n\n\n## [6.0.0-rc.27](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.26..v6.0.0-rc.27) - 2026-01-17\n\n### Features\n\n- *(cookie)* Consolidate cookie methods into a unified add() ([#1043](https://github.com/0x676e67/wreq/issues/1043)) - ([59999e6](https://github.com/0x676e67/wreq/commit/59999e613305e8aa8e13150cec858525b9f4cb6f))\n- *(tls)* Add peer certificate chain to `TlsInfo` ([#1049](https://github.com/0x676e67/wreq/issues/1049)) - ([f27cb78](https://github.com/0x676e67/wreq/commit/f27cb789c8db32ca4fd0bc4e6d8e007307639ba6))\n\n### Bug Fixes\n\n- *(verbose)* Correct connection verbose tracing ([#1055](https://github.com/0x676e67/wreq/issues/1055)) - ([22516ae](https://github.com/0x676e67/wreq/commit/22516ae9f1a4becf3827e1ba9889a6add59e38b6))\n\n### Refactor\n\n- *(redirect)* Expose `Attempt` fields as public API ([#1046](https://github.com/0x676e67/wreq/issues/1046)) - ([b97fa4f](https://github.com/0x676e67/wreq/commit/b97fa4fac5530fb455777db986f2f31f8719a6ad))\n\n### Performance\n\n- *(redirect)* Use static `HeaderName` for `cookie2` to avoid allocation ([#1047](https://github.com/0x676e67/wreq/issues/1047)) - ([0211cad](https://github.com/0x676e67/wreq/commit/0211cad5595220095179c0045aff1c3a76690a1e))\n- *(tls)* Use `Bytes` for `peer_certificate` to enable cheap cloning ([#1050](https://github.com/0x676e67/wreq/issues/1050)) - ([27c8e74](https://github.com/0x676e67/wreq/commit/27c8e74936e6eff30761954f3e9f4133b08f611b))\n\n### Styling\n\n- *(cookie)* Prefer `dt <= SystemTime::now()` in expires check ([#1045](https://github.com/0x676e67/wreq/issues/1045)) - ([5da3114](https://github.com/0x676e67/wreq/commit/5da3114e749b6a7a0aeb0f8cdd72759bc1a216d5))\n- *(cookie)* Prefer `Duration::is_zero()` in Max-Age=0 check ([#1044](https://github.com/0x676e67/wreq/issues/1044)) - ([1e607dd](https://github.com/0x676e67/wreq/commit/1e607dd0b0d9822dfc9873d7a2e0093defc6b445))\n\n### Miscellaneous Tasks\n\n- *(test)* Fix windows tests ([#1042](https://github.com/0x676e67/wreq/issues/1042)) - ([a22ca01](https://github.com/0x676e67/wreq/commit/a22ca01315ab62659a1498f3d157fb767cdeb828))\n\n### Build\n\n- *(deps)* Add `prefix-symbols` to resolve `OpenSSL` symbol conflicts ([#1056](https://github.com/0x676e67/wreq/issues/1056)) - ([9c40d0f](https://github.com/0x676e67/wreq/commit/9c40d0ff294ae6d15477284c205607147361c90a))\n- *(deps)* Bump `url` dependency version to 2.5.8 ([#1053](https://github.com/0x676e67/wreq/issues/1053)) - ([f0ba09e](https://github.com/0x676e67/wreq/commit/f0ba09e08fbd24a4736b256ef87a1f10da3c0754))\n- *(deps)* Update `http2` dependency version to 0.5.11 ([#1051](https://github.com/0x676e67/wreq/issues/1051)) - ([0ccc4e8](https://github.com/0x676e67/wreq/commit/0ccc4e8e6db4885dada569ecf161bf5104d8a37f))\n\n## New Contributors ❤️\n\n* @Abernson made their first contribution in [#1049](https://github.com/0x676e67/wreq/pull/1049)\n\n## [6.0.0-rc.26](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.25..v6.0.0-rc.26) - 2025-12-31\n\n### Features\n\n- Add `query` and `form` crate features ([#1035](https://github.com/0x676e67/wreq/issues/1035)) - ([091b9e9](https://github.com/0x676e67/wreq/commit/091b9e9e93fef8bc838910dc383a3fb6bdcb8778))\n\n### Bug Fixes\n\n- *(proxy)* Skip proxy headers for HTTPS destinations ([#1039](https://github.com/0x676e67/wreq/issues/1039)) - ([972737f](https://github.com/0x676e67/wreq/commit/972737f540150819d9659cb17e8cdc097dbb078f))\n- *(redirect)* Fix redirect `location` encoding ([#1034](https://github.com/0x676e67/wreq/issues/1034)) - ([f8e2114](https://github.com/0x676e67/wreq/commit/f8e21143abe06f7ae65d26d3ffb979433fcfe394))\n\n### Refactor\n\n- *(header)* Hide internal details of `OrigHeaderName` ([#1036](https://github.com/0x676e67/wreq/issues/1036)) - ([5424935](https://github.com/0x676e67/wreq/commit/5424935235270cead6c5f2e9a7f59a5398ad001c))\n\n### Performance\n\n- *(proxy)* Improve proxy credential handling for concurrent requests ([#1041](https://github.com/0x676e67/wreq/issues/1041)) - ([4016d1b](https://github.com/0x676e67/wreq/commit/4016d1bfeb7b24122ecdc0906129e65841c3700c))\n- *(uri)* Improve `String` to `Uri` conversion performance ([#1038](https://github.com/0x676e67/wreq/issues/1038)) - ([fcd5cc5](https://github.com/0x676e67/wreq/commit/fcd5cc54a7d3d0d0c2d3575af6f8c6ea1f0fdabe))\n\n### Miscellaneous Tasks\n\n- *(redirect)* Remove macros - ([c92fbaf](https://github.com/0x676e67/wreq/commit/c92fbaf87d33c11d681c7d47c09a54d47b2674fb))\n\n## New Contributors ❤️\n\n* @blinjrm made their first contribution in [#1039](https://github.com/0x676e67/wreq/pull/1039)\n\n## [6.0.0-rc.25](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.23..v6.0.0-rc.25) - 2025-12-23\n\n### Features\n\n- *(cookie)* Refactor `CookieStore` cookie compression strategy ([#1005](https://github.com/0x676e67/wreq/issues/1005)) - ([2dc14cd](https://github.com/0x676e67/wreq/commit/2dc14cd9207d0c1cb41583395a7f544acb40aadf))\n- *(error)* Add `is_proxy_connect` for proxy connection errors ([#1014](https://github.com/0x676e67/wreq/issues/1014)) - ([0578465](https://github.com/0x676e67/wreq/commit/0578465eb64a23b2d47fb7080ea372646c4783d6))\n- *(proxy)* Compatibility for sending HTTP requests without HTTPS tunneling ([#991](https://github.com/0x676e67/wreq/issues/991)) - ([bd1d58b](https://github.com/0x676e67/wreq/commit/bd1d58bcf3b87924486b9515f6f678dc8ca36800))\n- *(redirect)* Add async support to redirect policy ([#996](https://github.com/0x676e67/wreq/issues/996)) - ([bc6f113](https://github.com/0x676e67/wreq/commit/bc6f11376d884dcd614889861bb55157907cdab7))\n- *(response)* Introduce trailers support ([#1021](https://github.com/0x676e67/wreq/issues/1021)) - ([28bcc63](https://github.com/0x676e67/wreq/commit/28bcc63cb0e9083c944d55ca3895ee70a1ed636b))\n\n### Bug Fixes\n\n- *(proxy)* Improve domain matching case insensitivity ([#1031](https://github.com/0x676e67/wreq/issues/1031)) - ([87f9019](https://github.com/0x676e67/wreq/commit/87f90191bbb5fe39174ab2777b4d526145f2e75c))\n- *(proxy)* Fix HTTP requests proxied through an `SOCKS5`/`HTTPS tunnel` ([#990](https://github.com/0x676e67/wreq/issues/990)) - ([7207dd5](https://github.com/0x676e67/wreq/commit/7207dd55989f9ef2d3577261928252b5dc90f206))\n- *(redirect)* Ensure redirect URLs are properly encoded ([#1017](https://github.com/0x676e67/wreq/issues/1017)) - ([8ad5023](https://github.com/0x676e67/wreq/commit/8ad5023932b480c1cf94d8bbddc9bb2b59a83d6c))\n- *(request)* Fix missing `http::Request` conversion extensions ([#1000](https://github.com/0x676e67/wreq/issues/1000)) - ([9df5f14](https://github.com/0x676e67/wreq/commit/9df5f14f3657692ae19691105826d30c23056996))\n- *(test)* Fix decompression test ([#998](https://github.com/0x676e67/wreq/issues/998)) - ([54f5ee6](https://github.com/0x676e67/wreq/commit/54f5ee63877e5ec3ef04167dcdb25b1025a0b2f7))\n\n### Refactor\n\n- *(config)* Simplify extension config type wrappers ([#1009](https://github.com/0x676e67/wreq/issues/1009)) - ([adf84e3](https://github.com/0x676e67/wreq/commit/adf84e38abaa921f10a3994920bbe494bafc608a))\n- *(core)* Use flat module style - ([30a8c13](https://github.com/0x676e67/wreq/commit/30a8c135c26bc4853c24f3a5209b6ad098a4f74a))\n- *(decoder)* Reorder decoder tower layers ([#1026](https://github.com/0x676e67/wreq/issues/1026)) - ([910378d](https://github.com/0x676e67/wreq/commit/910378d9965cd11a9d0c9bf0478428d1f200802d))\n- *(ext)* Remove extension wrapper types ([#999](https://github.com/0x676e67/wreq/issues/999)) - ([15b4866](https://github.com/0x676e67/wreq/commit/15b48664364a436d863b5f94881d6e36402b7f10))\n- *(mod)* Use flat module style and merge legacy client ([#993](https://github.com/0x676e67/wreq/issues/993)) - ([75db3ea](https://github.com/0x676e67/wreq/commit/75db3eaa3b63d52580cef711cd2b3a5960d3850d))\n- *(proxy)* Use flat module style - ([0925369](https://github.com/0x676e67/wreq/commit/0925369c903046ae745bba8eb7330ae2086fa4b7))\n- *(redirect)* Refactor handling of redirect history ([#1002](https://github.com/0x676e67/wreq/issues/1002)) - ([b1ce184](https://github.com/0x676e67/wreq/commit/b1ce184b901aa5f1d11eb1af4dd6b02dffedfed6))\n\n### Documentation\n\n- *(proxy)* Fix docs prompt ([#1010](https://github.com/0x676e67/wreq/issues/1010)) - ([989e691](https://github.com/0x676e67/wreq/commit/989e6910014124cc579eabd372a34ea665d37c63))\n- Update documentation for `Request` and `RequestBuilder` - ([e30b393](https://github.com/0x676e67/wreq/commit/e30b3932323f23e902ae97d0178d1409ff2ef290))\n- Fix documentation build warning ([#1008](https://github.com/0x676e67/wreq/issues/1008)) - ([303c54e](https://github.com/0x676e67/wreq/commit/303c54eba89e4cd2252da3a986710ad330034da8))\n\n### Performance\n\n- *(client)* Reduce one `HeaderMap` clone during header merge ([#987](https://github.com/0x676e67/wreq/issues/987)) - ([ce030b8](https://github.com/0x676e67/wreq/commit/ce030b8c3ba6bb233775fad271e1ecff49a95a61))\n- *(ext)* Update query handling to avoid copying ([#1007](https://github.com/0x676e67/wreq/issues/1007)) - ([be0366f](https://github.com/0x676e67/wreq/commit/be0366fb656cdffde5504c0354ebff36a65a34b2))\n- *(proxy)* Reduce branch matching ([#992](https://github.com/0x676e67/wreq/issues/992)) - ([ed00aec](https://github.com/0x676e67/wreq/commit/ed00aec00371097810d634901bd648dc990041f5))\n- *(redirect)* Avoid cloning inner service for non-redirect requests ([#1028](https://github.com/0x676e67/wreq/issues/1028)) - ([7933341](https://github.com/0x676e67/wreq/commit/79333414a4c6a83e35356ab68ea301b0976472f4))\n\n### Styling\n\n- *(connector)* Fmt code - ([8a15bf4](https://github.com/0x676e67/wreq/commit/8a15bf418c902ada7975976d5278d20487535831))\n- *(layer)* Use flat module style ([#1027](https://github.com/0x676e67/wreq/issues/1027)) - ([519e4ca](https://github.com/0x676e67/wreq/commit/519e4ca6c3ceba8e355838fb2ba0a359ddb3feff))\n- Fmt code - ([53df061](https://github.com/0x676e67/wreq/commit/53df061e44f049c38de1d63b1ef2077070eea7fe))\n- Fmt code - ([c15fc08](https://github.com/0x676e67/wreq/commit/c15fc08abc9210bcd98460e112e3fc746b39e748))\n\n### Testing\n\n- *(response)* Remove duplicate tests - ([7c1df27](https://github.com/0x676e67/wreq/commit/7c1df27efecb5f0a5abdaeec33d5f2bf9a885610))\n\n### Miscellaneous Tasks\n\n- *(body)* Remove `Debug` trait implementation for Body - ([72aea5e](https://github.com/0x676e67/wreq/commit/72aea5eb8e48fc2c561b0b4718f8a4654d0d31cf))\n- *(body)* Remove unnecessary `cfg_attr` for stream feature - ([9c698b3](https://github.com/0x676e67/wreq/commit/9c698b38088529c9d79c293f41b3697a784b5b7a))\n- *(body)* Simplify body construction ([#1020](https://github.com/0x676e67/wreq/issues/1020)) - ([7116f11](https://github.com/0x676e67/wreq/commit/7116f11e0e80ad9651b6f19ced93c2ac8a4d3731))\n- *(decoder)* Add debug assertion for decoder presence - ([977a7ba](https://github.com/0x676e67/wreq/commit/977a7ba80ff4080a19460f8c74908eac509084e6))\n- *(layer)* Move body timeout layer to the outermost layer ([#1032](https://github.com/0x676e67/wreq/issues/1032)) - ([294e9d8](https://github.com/0x676e67/wreq/commit/294e9d8b4b257eb69ad23e7f1b0508ff5c6a8442))\n- *(multipart)* Remove custom `Debug` trait implementations - ([4512913](https://github.com/0x676e67/wreq/commit/45129134b0c67dafb26fc2038f8fd9a4dc92b4ca))\n- *(req/resp)* Fmt docs ([#1022](https://github.com/0x676e67/wreq/issues/1022)) - ([d395827](https://github.com/0x676e67/wreq/commit/d39582730c9d92cdb76e133648a4582511bac647))\n- *(request)* Simplify request construction ([#1018](https://github.com/0x676e67/wreq/issues/1018)) - ([2b044fb](https://github.com/0x676e67/wreq/commit/2b044fbb8b748418b3dfd551c8b9b3ba629b5529))\n- *(request)* Fmt code - ([32fa617](https://github.com/0x676e67/wreq/commit/32fa61771646a1c1c22cb205e94016006b87232a))\n- *(response)* Remove `Debug` implementation for `Response` - ([51f86a5](https://github.com/0x676e67/wreq/commit/51f86a56bb35ca317a108796430e97cfe386bb0f))\n- *(response)* Simplify response construction ([#1016](https://github.com/0x676e67/wreq/issues/1016)) - ([08a8066](https://github.com/0x676e67/wreq/commit/08a8066d690a2b902017a5ed9598c4e6972ca57c))\n- *(style)* Fmt code - ([9f1fd12](https://github.com/0x676e67/wreq/commit/9f1fd12f4af694be89ca2c4e0a8f054ab4e6a310))\n- Add MSRV job to CI workflow - ([681a763](https://github.com/0x676e67/wreq/commit/681a763eeac5bd75f29868d5907f72d0d8033e8e))\n- Use `http_body_util::BodyDataStream` ([#1015](https://github.com/0x676e67/wreq/issues/1015)) - ([75baf44](https://github.com/0x676e67/wreq/commit/75baf44b84bccb3236e8d1b13249d61e344a4b44))\n- Remove cmake pinning from Windows CI step - ([87fc1f6](https://github.com/0x676e67/wreq/commit/87fc1f69989101ac412e4e8e585a4d2a5dfb1073))\n- Add Android NDK tests ([#1011](https://github.com/0x676e67/wreq/issues/1011)) - ([adab15a](https://github.com/0x676e67/wreq/commit/adab15ac1c02411470f914311e299fc84ee3772f))\n\n### Revert\n\n- *(request)* Restore upstream header insertion strategy ([#995](https://github.com/0x676e67/wreq/issues/995)) - ([00c1d6d](https://github.com/0x676e67/wreq/commit/00c1d6d98d760512885270fa5211769ce311fc2a))\n\n### Build\n\n- *(deps)* Update `system-configuration` version to 0.7.0 ([#1024](https://github.com/0x676e67/wreq/issues/1024)) - ([040fc99](https://github.com/0x676e67/wreq/commit/040fc9942ab677a56d9432910db181ec181904f6))\n- *(deps)* Bump actions/checkout from 5 to 6 ([#1023](https://github.com/0x676e67/wreq/issues/1023)) - ([814b9c8](https://github.com/0x676e67/wreq/commit/814b9c880727def1f6cf1586526971d91a473a4f))\n- Cargo diet - ([f0d1ea1](https://github.com/0x676e67/wreq/commit/f0d1ea18226b46185106e9d096acd542ee39a454))\n\n\n## [6.0.0-rc.23](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.22..v6.0.0-rc.23) - 2025-11-28\n\n### Bug Fixes\n\n- *(client)* Handle multi-value default headers without overriding requests ([#986](https://github.com/0x676e67/wreq/issues/986)) - ([745fa26](https://github.com/0x676e67/wreq/commit/745fa265a99a857c394226f4d2b64f7783813d17))\n- *(test)* Fix decompression empty body test ([#979](https://github.com/0x676e67/wreq/issues/979)) - ([9e11af1](https://github.com/0x676e67/wreq/commit/9e11af143fc452e65a42cd720138b96c7433ffd4))\n\n### Refactor\n\n- *(http1)* Replace many args of `Chunked::step` with struct - ([6ffef6c](https://github.com/0x676e67/wreq/commit/6ffef6ca138f341340aa4f2086fdbca009ca301e))\n- Change fast_random from xorshift to siphash a counter ([#983](https://github.com/0x676e67/wreq/issues/983)) - ([a386091](https://github.com/0x676e67/wreq/commit/a38609107949bc88e2dd38a0978bde91f8684b38))\n\n### Build\n\n- *(deps)* Bump actions/checkout from 5 to 6 ([#978](https://github.com/0x676e67/wreq/issues/978)) - ([81d8d82](https://github.com/0x676e67/wreq/commit/81d8d82f811d60a71f6a5e0eff712134dfd15f80))\n\n### Deps\n\n- Update tokio-tungstenite version to 0.28.0 ([#982](https://github.com/0x676e67/wreq/issues/982)) - ([cf8a71e](https://github.com/0x676e67/wreq/commit/cf8a71ea6957ccd40beda136678954787fcab9db))\n\n\n## [6.0.0-rc.22](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.21..v6.0.0-rc.22) - 2025-11-21\n\n### Features\n\n- *(rt)* Add Timer::now() method to allow overriding the instant returned ([#976](https://github.com/0x676e67/wreq/issues/976)) - ([7cf3b95](https://github.com/0x676e67/wreq/commit/7cf3b95f8f445aff46ddd6455e0afaadb72bba36))\n\n### Bug Fixes\n\n- *(http1)* Fix rare missed write wakeup on connections ([#974](https://github.com/0x676e67/wreq/issues/974)) - ([d6bccef](https://github.com/0x676e67/wreq/commit/d6bccefe0e7d474e9bb1a375a3707326fa5db9a4))\n- *(proxy)* Fix 407 proxy auth failures for HTTP requests ([#975](https://github.com/0x676e67/wreq/issues/975)) - ([df67842](https://github.com/0x676e67/wreq/commit/df6784232b9f3b146c872ecb8606336ad2a06256))\n\n### Performance\n\n- *(uri)* Avoid double copying during URI percent encoding ([#977](https://github.com/0x676e67/wreq/issues/977)) - ([6a1a406](https://github.com/0x676e67/wreq/commit/6a1a406d6f12eb3baf320a435330256b71bf8cf3))\n\n### Miscellaneous Tasks\n\n- *(client)* Refactor proxy auth handling logic - ([e54df35](https://github.com/0x676e67/wreq/commit/e54df351be60c6957759f82c3ca6861aca31db33))\n\n\n## [6.0.0-rc.21](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.20..v6.0.0-rc.21) - 2025-11-07\n\n### Features\n\n- *(uri)* Percent-encode spaces when building request URLs ([#972](https://github.com/0x676e67/wreq/issues/972)) - ([de1c937](https://github.com/0x676e67/wreq/commit/de1c9379c101764e1dc5f32d300154edec7f89f6))\n\n### Bug Fixes\n\n- *(proxy)* Support proxy auth where password is omitted ([#971](https://github.com/0x676e67/wreq/issues/971)) - ([f7ffd56](https://github.com/0x676e67/wreq/commit/f7ffd565b8129007b2ee8ccd756f0ccf248decef))\n\n### Refactor\n\n- *(dns)* Redesign DNS API for improved ergonomics and functionality ([#968](https://github.com/0x676e67/wreq/issues/968)) - ([9c3c3f5](https://github.com/0x676e67/wreq/commit/9c3c3f50fe4249be3a1a878d5ad24506bf7778f1))\n- *(proxy)* Consolidate platform-specific modules into mod.rs ([#956](https://github.com/0x676e67/wreq/issues/956)) - ([99d3ed7](https://github.com/0x676e67/wreq/commit/99d3ed74ce0c520baba77301a3a6da20701b550c))\n\n### Documentation\n\n- *(retry)* Fix typo ([#957](https://github.com/0x676e67/wreq/issues/957)) - ([ed5fef2](https://github.com/0x676e67/wreq/commit/ed5fef2a18f473b770799abfa64c092529ebf74d))\n\n### Performance\n\n- *(connector)* Disable Nagle's algorithm for TLS handshake ([#955](https://github.com/0x676e67/wreq/issues/955)) - ([35f4265](https://github.com/0x676e67/wreq/commit/35f426502dada4e4fb245048feccd3b6762f0ea0))\n\n### Testing\n\n- *(redirect)* Improve redirect cookie tests ([#963](https://github.com/0x676e67/wreq/issues/963)) - ([852f280](https://github.com/0x676e67/wreq/commit/852f28059719f3e485e58e9b92f2591466d0f342))\n\n### Miscellaneous Tasks\n\n- *(connector)* Fmt code - ([00fa021](https://github.com/0x676e67/wreq/commit/00fa021349eec058456e2e51ed6b01ab72eedecf))\n- *(dcos)* Improve API docs ([#954](https://github.com/0x676e67/wreq/issues/954)) - ([10eabd7](https://github.com/0x676e67/wreq/commit/10eabd775aacce16a8e0a616c5919124bb5456ef))\n- Update docs - ([9c08747](https://github.com/0x676e67/wreq/commit/9c0874711a10b5d68ee6710218dac4ee3a07d982))\n- Fix style check ([#959](https://github.com/0x676e67/wreq/issues/959)) - ([6c3c02b](https://github.com/0x676e67/wreq/commit/6c3c02bab811893de65b599a8fc75fd50dadd103))\n\n### Build\n\n- *(deps)* Update windows-registry requirement from 0.5.0 to 0.6.0 ([#962](https://github.com/0x676e67/wreq/issues/962)) - ([b51a8fb](https://github.com/0x676e67/wreq/commit/b51a8fbfb5b9f6e3c235ce389926021236e57386))\n\n\n## [6.0.0-rc.20](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.16..v6.0.0-rc.20) - 2025-09-19\n\n### Refactor\n\n- *(tls)* Replace `prefer_chacha20` with `preserve_tls13_cipher_list` ([#953](https://github.com/0x676e67/wreq/issues/953)) - ([3d4f61d](https://github.com/0x676e67/wreq/commit/3d4f61d1135c066df07073899c1cfe81c1fcf961))\n\n\n## [6.0.0-rc.16](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.15..v6.0.0-rc.16) - 2025-09-17\n\n### Features\n\n- *(ws)* Implement `FusedStream` trait for WebSocket ([#949](https://github.com/0x676e67/wreq/issues/949)) - ([d292ef7](https://github.com/0x676e67/wreq/commit/d292ef799a4dfac4500f5ccd785e3fdebeecbe7c))\n\n### Bug Fixes\n\n- *(client)* Allow absolute-form if is_proxied is set even on HTTPS ([#945](https://github.com/0x676e67/wreq/issues/945)) - ([0df02e1](https://github.com/0x676e67/wreq/commit/0df02e1c8db43cd94e32541ce0e24b3966441804))\n- *(error)* Drop leftover debug logging ([#948](https://github.com/0x676e67/wreq/issues/948)) - ([3f73ae6](https://github.com/0x676e67/wreq/commit/3f73ae688bd7acd8a7292eb2a5a6ab7b9892de3b))\n- *(http2)* Fix chained calls ([#952](https://github.com/0x676e67/wreq/issues/952)) - ([a1765dc](https://github.com/0x676e67/wreq/commit/a1765dce6403ea037769331bf51e520f13b7f024))\n\n### Refactor\n\n- *(ws)* Improve close method API ergonomics ([#947](https://github.com/0x676e67/wreq/issues/947)) - ([de9e36b](https://github.com/0x676e67/wreq/commit/de9e36b98e1d372d658c55eeb2cc324d67177b06))\n\n### Miscellaneous Tasks\n\n- *(client)* Fmt code - ([ccc54f7](https://github.com/0x676e67/wreq/commit/ccc54f7cb0805749fac896d3e388383916cf1200))\n- *(examples)* Remove tracing logs from examples - ([dae70b4](https://github.com/0x676e67/wreq/commit/dae70b4320372c00387a2090ba34099ca1e22246))\n- *(examples)* Change HTTP client to use wreq with proxy - ([ba92b95](https://github.com/0x676e67/wreq/commit/ba92b95a913811f7979ff8e51239390c2c62f3d4))\n\n\n## [6.0.0-rc.15](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.14..v6.0.0-rc.15) - 2025-09-12\n\n### Features\n\n- *(http1)* Remove `preserve_header_case` support ([#943](https://github.com/0x676e67/wreq/issues/943)) - ([fd59127](https://github.com/0x676e67/wreq/commit/fd59127a8afebc42adf4e7eb40faaf792377e62b))\n- *(retry)* Introduce configurable retry policy ([#935](https://github.com/0x676e67/wreq/issues/935)) - ([f4644d8](https://github.com/0x676e67/wreq/commit/f4644d8a08545de19638abd80484210190f123f2))\n\n### Refactor\n\n- *(ext)* Introduce ergonomic and functional API ([#942](https://github.com/0x676e67/wreq/issues/942)) - ([52709b3](https://github.com/0x676e67/wreq/commit/52709b3dc3b3c7a756bb370c8efc31dba86f2fc9))\n- *(keylog)* Redesign API for better ergonomics and functionality ([#941](https://github.com/0x676e67/wreq/issues/941)) - ([7845b9b](https://github.com/0x676e67/wreq/commit/7845b9b9d6c3c31cda3c52f573a1446e710710d7))\n\n### Testing\n\n- *(client)* Update header tests and examples ([#939](https://github.com/0x676e67/wreq/issues/939)) - ([bfb8739](https://github.com/0x676e67/wreq/commit/bfb8739b0c0a03e06e54d9c68f7783ca1415b0a3))\n\n### Miscellaneous Tasks\n\n- *(internal)* Remove unnecessary `Debug` bounds - ([4aa1088](https://github.com/0x676e67/wreq/commit/4aa1088888ba8fe4e64a2ff7cf874b1d0174b154))\n- *(response)* Drop `Uri::try_from` in From<http::Response<T>> - ([9e16fba](https://github.com/0x676e67/wreq/commit/9e16fba5e1be1bf95b9b06ad16e0a9858c0b60c2))\n- *(retry)* Remove unused code - ([147fe60](https://github.com/0x676e67/wreq/commit/147fe60d5c62048b064e7896d90e96011383ffa9))\n- *(sync)* Remove unused code ([#940](https://github.com/0x676e67/wreq/issues/940)) - ([a17f799](https://github.com/0x676e67/wreq/commit/a17f79957e722589b6e122f54fae2f1a82893c5b))\n\n\n## [6.0.0-rc.14](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.13..v6.0.0-rc.14) - 2025-09-05\n\n### Bug Fixes\n\n- *(client)* Ensure `Accept-Encoding` header is applied correctly ([#928](https://github.com/0x676e67/wreq/issues/928)) - ([f9f9331](https://github.com/0x676e67/wreq/commit/f9f9331ca28f07fd1d5ad4067d297c66dfe013c1))\n\n### Refactor\n\n- *(client)* Enforce `ClientBuilder` initialization via `Client::builder()` ([#932](https://github.com/0x676e67/wreq/issues/932)) - ([513e6f5](https://github.com/0x676e67/wreq/commit/513e6f56169ba357c8d830d77745092d1a90750c))\n- *(response)* Accept AsRef<str> for charset for better ([#934](https://github.com/0x676e67/wreq/issues/934)) - ([b95e3b5](https://github.com/0x676e67/wreq/commit/b95e3b5791b983b436c892569a1d3a678999ed26))\n\n### Performance\n\n- *(client)* Prevent header duplication by reordering layers ([#930](https://github.com/0x676e67/wreq/issues/930)) - ([ca72a53](https://github.com/0x676e67/wreq/commit/ca72a5341e0ca7d0afe187d1fcd63e1ce1895596))\n- *(client)* Avoid redundant header copy ([#929](https://github.com/0x676e67/wreq/issues/929)) - ([c0d8df7](https://github.com/0x676e67/wreq/commit/c0d8df7c1b8d4dfb002dc6bf6ff417ba67f2d587))\n\n### Miscellaneous Tasks\n\n- *(client)* Speed up client initialization ([#931](https://github.com/0x676e67/wreq/issues/931)) - ([be90796](https://github.com/0x676e67/wreq/commit/be90796bda2c481c773c9c93e26420da92faa932))\n- *(test)* Fmt code - ([f5ab83c](https://github.com/0x676e67/wreq/commit/f5ab83cfb4d28518dab06e63d28c6f234bfd590f))\n- *(tests)* Fmt code ([#933](https://github.com/0x676e67/wreq/issues/933)) - ([86ee4e3](https://github.com/0x676e67/wreq/commit/86ee4e3343466f0284837d4bec6429f28620fc1a))\n\n\n## [6.0.0-rc.13](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.12..v6.0.0-rc.13) - 2025-09-02\n\n### Bug Fixes\n\n- *(cookie)* Normalize host handling with port ([#926](https://github.com/0x676e67/wreq/issues/926)) - ([66368be](https://github.com/0x676e67/wreq/commit/66368be48fd8437c1f2c8cd3ef9e7f0f8432a245))\n\n### Styling\n\n- *(redirect)* Fmt code - ([db195ef](https://github.com/0x676e67/wreq/commit/db195efaedd4232cf27c4161414de64c4898b1fe))\n\n\n## [6.0.0-rc.12](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.11..v6.0.0-rc.12) - 2025-09-02\n\n### Features\n\n- *(lib)* Introduce request shortcut ([#924](https://github.com/0x676e67/wreq/issues/924)) - ([ad6b79d](https://github.com/0x676e67/wreq/commit/ad6b79d0042df52e0e1c418a66a66760308837ac))\n\n\n## [6.0.0-rc.11](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.9..v6.0.0-rc.11) - 2025-08-31\n\n### Features\n\n- *(tls)* Allow custom ALPN configuration ([#921](https://github.com/0x676e67/wreq/issues/921)) - ([9edfd54](https://github.com/0x676e67/wreq/commit/9edfd54732bae3fd98510d307c4320f48bf44a6d))\n\n### Bug Fixes\n\n- *(cookie)* Fix cookie deletion and lookup logic ([#923](https://github.com/0x676e67/wreq/issues/923)) - ([e6014ef](https://github.com/0x676e67/wreq/commit/e6014ef049826062e305e475e10e4c142980a3d5))\n\n### Documentation\n\n- *(tls)* Refine `TlsOptions` field documentation ([#922](https://github.com/0x676e67/wreq/issues/922)) - ([2b42c9c](https://github.com/0x676e67/wreq/commit/2b42c9c3b43b3aabaed6d1c66b0f0bc21070cd48))\n- *(tls)* Update module docs ([#920](https://github.com/0x676e67/wreq/issues/920)) - ([04c1258](https://github.com/0x676e67/wreq/commit/04c12583c67f0205e5dfd049db19316acbc32cce))\n\n### Miscellaneous Tasks\n\n- *(tls)* Streamline conn module type re-exports - ([362c12a](https://github.com/0x676e67/wreq/commit/362c12a50956eb3955a5a6735ebd0bfac39b1e8b))\n- *(tls)* Remove ext & cert compression wrappers ([#918](https://github.com/0x676e67/wreq/issues/918)) - ([d9c3e84](https://github.com/0x676e67/wreq/commit/d9c3e8420075f8f6feca0f1725728f0cc25603aa))\n\n\n## [6.0.0-rc.9](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.8..v6.0.0-rc.9) - 2025-08-30\n\n### Features\n\n- *(cookie)* Improve `cookie_provider` for better ergonomics and flexibility ([#895](https://github.com/0x676e67/wreq/issues/895)) - ([70dd6d9](https://github.com/0x676e67/wreq/commit/70dd6d9d13181b252ce8b69ba807fd5b7b9a15a4))\n- *(dns)* Export `IntoResolve` as public API ([#913](https://github.com/0x676e67/wreq/issues/913)) - ([b1b6278](https://github.com/0x676e67/wreq/commit/b1b6278830e20496e965cdbb9adca7d03974f928))\n- *(lib)* Add shortcut request methods ([#903](https://github.com/0x676e67/wreq/issues/903)) - ([03cce5e](https://github.com/0x676e67/wreq/commit/03cce5e87dfc9fc77d7ce8eb10bcb60069a3114e))\n- *(proxy)* Add Unix socket proxy support ([#900](https://github.com/0x676e67/wreq/issues/900)) - ([d60a6f3](https://github.com/0x676e67/wreq/commit/d60a6f30b0d299f2f1e44f79ec5f9b6cdf94bddf))\n- *(redirect)* Support accessing redirect history in response  ([#917](https://github.com/0x676e67/wreq/issues/917)) - ([46278eb](https://github.com/0x676e67/wreq/commit/46278eb6a38b48a75803cf7b49161690d0b90161))\n- *(redirect)* Allow custom redirects to access response headers ([#916](https://github.com/0x676e67/wreq/issues/916)) - ([7a1c86a](https://github.com/0x676e67/wreq/commit/7a1c86abab7d835a5da92b2573d7e5ef71ff6980))\n- *(response)* Preserve URL when converting `Response` to `http::Response` ([#897](https://github.com/0x676e67/wreq/issues/897)) - ([72b24c7](https://github.com/0x676e67/wreq/commit/72b24c7284d21af2bfbfcc0bcdbac9bc20a5feac))\n- *(ws)* Remove Utf8Bytes::from_bytes_unchecked, unsafe UTF-8 ([#912](https://github.com/0x676e67/wreq/issues/912)) - ([e6b8bcf](https://github.com/0x676e67/wreq/commit/e6b8bcfd33ec6a70cf705da1665ca6d15cae520e))\n\n### Refactor\n\n- *(connect)* Safely convert `socket2::Socket` to Tokio `TcpSocket` ([#904](https://github.com/0x676e67/wreq/issues/904)) - ([2461be9](https://github.com/0x676e67/wreq/commit/2461be98fc73e2fd78c396a69c70ce9ab4f7bbf0))\n- *(core)* Replace Tokio I/O abstraction ([#909](https://github.com/0x676e67/wreq/issues/909)) - ([16976b9](https://github.com/0x676e67/wreq/commit/16976b935f01a6464d4c0ae1e3611e45429b351b))\n- *(deps)* Remove dependency on `url::Url` ([#914](https://github.com/0x676e67/wreq/issues/914)) - ([356950d](https://github.com/0x676e67/wreq/commit/356950d2cfbcb9f4f4ff5832ca696a95880171f2))\n- *(h2)* Refactor legacy unsafe wrapper code ([#905](https://github.com/0x676e67/wreq/issues/905)) - ([172f1c5](https://github.com/0x676e67/wreq/commit/172f1c558292b4630875b0e3910ee2cb4337f071))\n- *(io)* Use Pin::as_deref_mut() from std instead of custom polyfill ([#906](https://github.com/0x676e67/wreq/issues/906)) - ([d3d80f1](https://github.com/0x676e67/wreq/commit/d3d80f16e23e8e1594f2c45041b9403ea2b6be03))\n\n### Documentation\n\n- *(identity)* Update documentation - ([459afd6](https://github.com/0x676e67/wreq/commit/459afd6a90c4da254dd6598f604c3b1fd1841cec))\n- *(proxy)* Remove type export section - ([ae81ef5](https://github.com/0x676e67/wreq/commit/ae81ef533e2439d0398a22b6740521fddcb6cc0d))\n- *(request)* Update docs on request methods with cfg support - ([654e225](https://github.com/0x676e67/wreq/commit/654e2258d8472c3427af09b13c19f70949f38ca9))\n\n### Performance\n\n- *(http1)* Write during header sorting ([#899](https://github.com/0x676e67/wreq/issues/899)) - ([f025e3f](https://github.com/0x676e67/wreq/commit/f025e3fcfce4d8a8d31726b46e92ad8f51dcf46f))\n- *(http2)* Significantly improve http2 multi-core performance ([#892](https://github.com/0x676e67/wreq/issues/892)) - ([2c3f873](https://github.com/0x676e67/wreq/commit/2c3f8736b21589ab4f9f2dec1f56c0a9de321dd0))\n- *(layer)* Inline layer creation for faster client build - ([78e8fc7](https://github.com/0x676e67/wreq/commit/78e8fc7b203ac382a5fb70183564513c7346cbe1))\n\n### Styling\n\n- *(cookie)* Fmt code - ([315bccf](https://github.com/0x676e67/wreq/commit/315bccfc65101642b2a56f583c573b6d11148bb7))\n- *(header)* Simplify header sorting branch match - ([ee23d25](https://github.com/0x676e67/wreq/commit/ee23d25fd258f51eb33b20d72460913c38e7a517))\n- *(proto)* Fmt code - ([02e0bc0](https://github.com/0x676e67/wreq/commit/02e0bc06876a458536268863938a4906354791b9))\n- *(request)* Fmt code - ([d6e56e4](https://github.com/0x676e67/wreq/commit/d6e56e4b9e85ab73d627d72a51ed04198483cf98))\n\n### Miscellaneous Tasks\n\n- *(ci)* Speed up tests with feature matrix in GitHub Actions ([#894](https://github.com/0x676e67/wreq/issues/894)) - ([d66dc66](https://github.com/0x676e67/wreq/commit/d66dc6671fadbd427ea2c1d0e4fa07e61d62b4db))\n- *(proxy)* Debug-print HTTP headers - ([628e6b4](https://github.com/0x676e67/wreq/commit/628e6b462561a7fd5fe987dff6e14a76b02272de))\n- *(upgrade)* Drop unused code - ([bb26177](https://github.com/0x676e67/wreq/commit/bb261776fe41f1024f3af1d73147fd0440b2f908))\n- Minimize package size - ([938e3f5](https://github.com/0x676e67/wreq/commit/938e3f56c113bd721ceb9216f15c2e8e141f6d50))\n\n### Build\n\n- *(deps)* Bump actions/checkout from 4 to 5 ([#908](https://github.com/0x676e67/wreq/issues/908)) - ([5f6723a](https://github.com/0x676e67/wreq/commit/5f6723a7a8aad0db11f27ff9aa8e5b208f5f6cb4))\n- *(deps)* Minimize out-of-the-box dependencies ([#902](https://github.com/0x676e67/wreq/issues/902)) - ([5b68106](https://github.com/0x676e67/wreq/commit/5b68106bcda7ae78209afb35925704f13765717b))\n- *(deps)* Bump actions/checkout from 3 to 5 ([#893](https://github.com/0x676e67/wreq/issues/893)) - ([9877ed6](https://github.com/0x676e67/wreq/commit/9877ed6c177c139719bf35245027399e39a7cae7))\n\n\n## [6.0.0-rc.8](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.7..v6.0.0-rc.8) - 2025-08-12\n\n### Features\n\n- *(dns)* Improve `dns_resolver` for better ergonomics and flexibility ([#891](https://github.com/0x676e67/wreq/issues/891)) - ([9e3f974](https://github.com/0x676e67/wreq/commit/9e3f97450af724abba62cc1ee586c292b16e8498))\n\n### Bug Fixes\n\n- *(deps)* Upgrade url to v2.5.4 to address CVE-2024-12224 ([#887](https://github.com/0x676e67/wreq/issues/887)) - ([7038272](https://github.com/0x676e67/wreq/commit/70382725752d44682b5e684d7af3522614941f94))\n- *(pool)* Prevent failure when registering the waker with this oneshot ([#888](https://github.com/0x676e67/wreq/issues/888)) - ([f7d914d](https://github.com/0x676e67/wreq/commit/f7d914d96712bb3f20403d1dce1c30c4d03c7586))\n\n### Refactor\n\n- *(client)* Remove `no_keepalive` method ([#890](https://github.com/0x676e67/wreq/issues/890)) - ([0c15943](https://github.com/0x676e67/wreq/commit/0c159431a296163eb52cf95d0ea9f1e9fc84e3c0))\n\n### Documentation\n\n- *(README)* Update example - ([b620408](https://github.com/0x676e67/wreq/commit/b6204085abbfba933e6bfb368f7a8579b4bea417))\n- *(service)* Update service docs - ([a644502](https://github.com/0x676e67/wreq/commit/a64450253447a8a4287c89e28c66cbd5f9a8c689))\n\n### Testing\n\n- *(common)* Add missing assertion in full_rewind test ([#889](https://github.com/0x676e67/wreq/issues/889)) - ([c84746a](https://github.com/0x676e67/wreq/commit/c84746af284f4b0c2ec72f4d01150cb53de30ac9))\n\n### Build\n\n- *(deps)* Update async-tungstenite requirement from 0.30.0 to 0.31.0 ([#884](https://github.com/0x676e67/wreq/issues/884)) - ([d484f71](https://github.com/0x676e67/wreq/commit/d484f71b1ba2ad26ee9fa28b230d6c4ce5f63df8))\n\n\n## [6.0.0-rc.7](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.5..v6.0.0-rc.7) - 2025-08-10\n\n### Features\n\n- *(ws)* Option for `default_headers` method in websocket ([#883](https://github.com/0x676e67/wreq/issues/883)) - ([fd36b7a](https://github.com/0x676e67/wreq/commit/fd36b7a817f3fb8d2b59dea73c34ff4fd3249d87))\n\n### Bug Fixes\n\n- *(request)* Correct `default_headers` method semantics ([#882](https://github.com/0x676e67/wreq/issues/882)) - ([2cbd0ac](https://github.com/0x676e67/wreq/commit/2cbd0ac56813a9e4b022d1747dce512943c31993))\n\n### Refactor\n\n- *(dns)* Make hickory module internal ([#881](https://github.com/0x676e67/wreq/issues/881)) - ([e441048](https://github.com/0x676e67/wreq/commit/e441048a6b5df1af3e715cbeceba7e178bbb22eb))\n\n### Miscellaneous Tasks\n\n- *(client)* Expose additional configuration options - ([65bd959](https://github.com/0x676e67/wreq/commit/65bd95963500af6205f9f06b4cc059b67a0ed740))\n\n\n## [6.0.0-rc.5](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.4..v6.0.0-rc.5) - 2025-08-09\n\n### Features\n\n- *(ws)* Expose the `message` module for external use ([#874](https://github.com/0x676e67/wreq/issues/874)) - ([abed4ac](https://github.com/0x676e67/wreq/commit/abed4ac82d8ad82c72593ad931477acea70557b0))\n\n### Refactor\n\n- *(cookie)* Refactor legacy jar cookie implementation ([#871](https://github.com/0x676e67/wreq/issues/871)) - ([ebb1504](https://github.com/0x676e67/wreq/commit/ebb1504400102c71af9d76e9084f8d2ea14c16c7))\n- *(dns)* Consolidate legacy DNS modules ([#876](https://github.com/0x676e67/wreq/issues/876)) - ([f54367c](https://github.com/0x676e67/wreq/commit/f54367cad0d5c699596f80857af234e78ba3d166))\n\n### Documentation\n\n- *(module)* Improve module-level documentation ([#877](https://github.com/0x676e67/wreq/issues/877)) - ([4e2c15f](https://github.com/0x676e67/wreq/commit/4e2c15f39ba0bdf61b0aedb30d43779a4c455d58))\n- *(tls)* Update documentation for configuration fields ([#880](https://github.com/0x676e67/wreq/issues/880)) - ([94c060e](https://github.com/0x676e67/wreq/commit/94c060ed2a3fcc744223ab6a7224e67fae8c9210))\n\n### Performance\n\n- *(upgrade)* Inline hot methods in async IO wrapper ([#875](https://github.com/0x676e67/wreq/issues/875)) - ([8388b52](https://github.com/0x676e67/wreq/commit/8388b5241a253bb8f550435aa9e487d9ce16b44d))\n\n### Styling\n\n- *(internal)* Refactor internal code layout and naming ([#878](https://github.com/0x676e67/wreq/issues/878)) - ([fbf11fd](https://github.com/0x676e67/wreq/commit/fbf11fd588cb773471fb46302405655eb53cafe6))\n\n### Testing\n\n- *(client)* Verify multiple identical headers are appended correctly ([#879](https://github.com/0x676e67/wreq/issues/879)) - ([f245f9c](https://github.com/0x676e67/wreq/commit/f245f9c47965ee4b7682050357f350e05a2ca549))\n\n### Miscellaneous Tasks\n\n- *(retry)* Remove unnecessary clone in request duplication - ([d78568c](https://github.com/0x676e67/wreq/commit/d78568cc6079aaefe3f3b02c3537e21646a1f7f0))\n\n### Build\n\n- *(ws)* Switch to runtime-agnostic WebSocket implementation ([#873](https://github.com/0x676e67/wreq/issues/873)) - ([3fb93ef](https://github.com/0x676e67/wreq/commit/3fb93efb76773d8349ade8f66fe3cabb543faa7b))\n\n\n## [6.0.0-rc.4](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.2..v6.0.0-rc.4) - 2025-08-07\n\n### Bug Fixes\n\n- *(cookie)* Store response cookies even with manual `Cookie` header ([#868](https://github.com/0x676e67/wreq/issues/868)) - ([d2f3bf0](https://github.com/0x676e67/wreq/commit/d2f3bf0ec425ad4880dbcba03951f260f8bb1015))\n- *(header)* Preserve multi-value headers in `OrigHeaderMap` sorting ([#867](https://github.com/0x676e67/wreq/issues/867)) - ([b650956](https://github.com/0x676e67/wreq/commit/b6509561c779dde492a1208a2fe5f7c64832419d))\n\n### Refactor\n\n- *(client)* Allow `?Sized` trait objects in `dns_resolver` ([#870](https://github.com/0x676e67/wreq/issues/870)) - ([2baf195](https://github.com/0x676e67/wreq/commit/2baf1953024fdb646e205478d9dc568113ba2ec1))\n\n### Performance\n\n- *(cookie)* Optimize cookie layer to skip unnecessary matching ([#866](https://github.com/0x676e67/wreq/issues/866)) - ([ce9b531](https://github.com/0x676e67/wreq/commit/ce9b531bd4d0211b73fb64211f51a8549c948cfc))\n\n\n## [6.0.0-rc.2](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.1..v6.0.0-rc.2) - 2025-08-04\n\n### Bug Fixes\n\n- *(build)* Resolve build failure on Windows when `default-features` are disabled ([#864](https://github.com/0x676e67/wreq/issues/864)) - ([4418e47](https://github.com/0x676e67/wreq/commit/4418e4773a711bf15a2e86777473f16b0af3d8e3))\n\n### Documentation\n\n- *(options)* Fix `Http2Options` description ([#863](https://github.com/0x676e67/wreq/issues/863)) - ([89b0957](https://github.com/0x676e67/wreq/commit/89b0957a196debafaeef6a6fa271a53b4a3f7964))\n\n\n## [6.0.0-rc.1](https://github.com/0x676e67/wreq/compare/v5.1.0..v6.0.0-rc.1) - 2025-08-03\n\n### Features\n\n- *(client)* Set default values for TCP keepalive and user_timeout ([#852](https://github.com/0x676e67/wreq/issues/852)) - ([f06fe61](https://github.com/0x676e67/wreq/commit/f06fe616b72a8d672c9a6118acfab7b96f18bbb6))\n- *(client)* Expose TCP socket Happy Eyeballs timeout API ([#844](https://github.com/0x676e67/wreq/issues/844)) - ([bcbfbf8](https://github.com/0x676e67/wreq/commit/bcbfbf802c03b6cf58eaa566d38b4a8c29037635))\n- *(client)* Expose TCP socket send/recv buffer APIs ([#843](https://github.com/0x676e67/wreq/issues/843)) - ([2ea1052](https://github.com/0x676e67/wreq/commit/2ea105290434339cdf84a83afe4d6f65c864e09a))\n- *(client)* Disable redirects by default ([#805](https://github.com/0x676e67/wreq/issues/805)) - ([ecf6019](https://github.com/0x676e67/wreq/commit/ecf60193deaa6951212d862445645e0ba9175cd7))\n- *(client)* Add convenience method for sending `OPTIONS` requests ([#787](https://github.com/0x676e67/wreq/issues/787)) - ([34f1586](https://github.com/0x676e67/wreq/commit/34f158610a52228ad4d0bc665c268714c5b34e0d))\n- *(client)* Make `HTTP`/`TLS` config options publicly accessible ([#783](https://github.com/0x676e67/wreq/issues/783)) - ([a4e7b98](https://github.com/0x676e67/wreq/commit/a4e7b981790942364b07f65333602f4fcbb68a7a))\n- *(client)* Add `SO_REUSEADDR` support for tcp socket ([#762](https://github.com/0x676e67/wreq/issues/762)) - ([8aced63](https://github.com/0x676e67/wreq/commit/8aced637eed476faeb2212930ba91570a0c4cbda))\n- *(client)* Add tower HTTP request middleware layer ([#694](https://github.com/0x676e67/wreq/issues/694)) - ([0ad0021](https://github.com/0x676e67/wreq/commit/0ad0021bde7dd890aa58e3a1d4f422984fab9eec))\n- *(client)* Add tcp_user_timeout builder option ([#688](https://github.com/0x676e67/wreq/issues/688)) - ([d1d0eb4](https://github.com/0x676e67/wreq/commit/d1d0eb459b4859a73fc7b75934804bfa30bc907a))\n- *(client)* Add tcp_keepalive_interval and tcp_keepalive_retries to ClientBuilder ([#643](https://github.com/0x676e67/wreq/issues/643)) - ([32fe31e](https://github.com/0x676e67/wreq/commit/32fe31e0b6aca538909d3e5367d12d58269bf818))\n- *(client)* Drop API for retrieving default headers ([#640](https://github.com/0x676e67/wreq/issues/640)) - ([1b4a445](https://github.com/0x676e67/wreq/commit/1b4a4451fb7bd28e610431a12b2a427b3da64e9b))\n- *(client)* Add identity to be used for client certificate authentication ([#617](https://github.com/0x676e67/wreq/issues/617)) - ([55c2490](https://github.com/0x676e67/wreq/commit/55c249067c267099c400119e12491441e4c0e63a))\n- *(client)* Adds support for SSLKEYLOGFILE output from client ([#605](https://github.com/0x676e67/wreq/issues/605)) - ([dc0c40b](https://github.com/0x676e67/wreq/commit/dc0c40bed2faa7b743d5a22496e83029a9b84dcf))\n- *(client)* ClientBuilder::interface on Solarish OS ([#594](https://github.com/0x676e67/wreq/issues/594)) - ([c0a7fbc](https://github.com/0x676e67/wreq/commit/c0a7fbcaf98f276de74e9b11dbc23f5bb5ce457c))\n- *(cookie)* Provide access to raw cookie API ([#830](https://github.com/0x676e67/wreq/issues/830)) - ([5c5e3e5](https://github.com/0x676e67/wreq/commit/5c5e3e5ccdc3383c7c2b71fe4954da039882f877))\n- *(emulation)* Expose config fields via accessors while preserving `non_exhaustive` ([#854](https://github.com/0x676e67/wreq/issues/854)) - ([dfdf707](https://github.com/0x676e67/wreq/commit/dfdf707e3fb7ca6f3800e4741572eb51323d06cb))\n- *(error)* Report custom reason phrase in error message ([#767](https://github.com/0x676e67/wreq/issues/767)) - ([b492bc1](https://github.com/0x676e67/wreq/commit/b492bc1d408f1742c1f1e688784707f877cc1d5b))\n- *(error)* Check if the error is an upgrade error ([#623](https://github.com/0x676e67/wreq/issues/623)) - ([ddae516](https://github.com/0x676e67/wreq/commit/ddae516928663b2a9a181eb387dc1ff9aa567c79))\n- *(examples)* Add emulation twitter android  `TLS`/`HTTP2` example ([#612](https://github.com/0x676e67/wreq/issues/612)) - ([40c9a70](https://github.com/0x676e67/wreq/commit/40c9a70ad015e4f8db1e9dac8416c61b25d05318))\n- *(header)* Enhance the usability of `OriginalHeaders` API ([#731](https://github.com/0x676e67/wreq/issues/731)) - ([99bfc39](https://github.com/0x676e67/wreq/commit/99bfc391dc5e90f439576da282f94be9bb78b1f8))\n- *(headers)* Omit payload length for HTTP/2 `OPTIONS` ([#785](https://github.com/0x676e67/wreq/issues/785)) - ([bb00275](https://github.com/0x676e67/wreq/commit/bb00275602ea2468d42ee4674652315b4ae2dc6d))\n- *(http2)* Upgrade `http2` dependency to 0.5.0 ([#651](https://github.com/0x676e67/wreq/issues/651)) - ([a21827b](https://github.com/0x676e67/wreq/commit/a21827bb30bc656d9ae0e71a5e6fa3cff4d6e94f))\n- *(lib)* Export `EmulationBuilder` as a public API ([#825](https://github.com/0x676e67/wreq/issues/825)) - ([080f85f](https://github.com/0x676e67/wreq/commit/080f85f1021a5555586b986c4e6addaabaeba018))\n- *(pool)* Distinguish connections by request emulation ([#841](https://github.com/0x676e67/wreq/issues/841)) - ([67884ee](https://github.com/0x676e67/wreq/commit/67884eea31720d743c98bca27b8c9fea02a2f555))\n- *(redirect)* Per-request redirect config support ([#710](https://github.com/0x676e67/wreq/issues/710)) - ([265df64](https://github.com/0x676e67/wreq/commit/265df646689eceb08ab020535f756ce055182ec1))\n- *(request)* Support per-request emulation configuration ([#759](https://github.com/0x676e67/wreq/issues/759)) - ([2ec6d21](https://github.com/0x676e67/wreq/commit/2ec6d21ad9ba1c815c81c9152fc35f70744b7211))\n- *(request)* Adjust internal structure and allow skip default headers ([#723](https://github.com/0x676e67/wreq/issues/723)) - ([7be331d](https://github.com/0x676e67/wreq/commit/7be331d2dffc4dbe5226c95c5b8b3dd96897a324))\n- *(request)* Setting headers order at the request level ([#602](https://github.com/0x676e67/wreq/issues/602)) - ([3b280f8](https://github.com/0x676e67/wreq/commit/3b280f845538e12a92f0976b9455604f8260ef90))\n- *(response)* Add `Response.local_addr()` to get local address ([#835](https://github.com/0x676e67/wreq/issues/835)) - ([35652f5](https://github.com/0x676e67/wreq/commit/35652f547fc293a8339b5e475fd5e8b41e2fafd3))\n- *(tls)* Treat different TLS configs as distinct sessions ([#779](https://github.com/0x676e67/wreq/issues/779)) - ([e05406d](https://github.com/0x676e67/wreq/commit/e05406d4bfdb93837c9168fecd24a467908ba7a5))\n- *(tls)* Add API to set list of stable curves ([#633](https://github.com/0x676e67/wreq/issues/633)) - ([ea0eb17](https://github.com/0x676e67/wreq/commit/ea0eb17ed425d6477eebe629b7851f0e51a1bc75))\n- *(websocket)* Support per-request emulation configuration ([#764](https://github.com/0x676e67/wreq/issues/764)) - ([468f86f](https://github.com/0x676e67/wreq/commit/468f86fd5811043a1d89437f1bea30c8cfbf93b8))\n- *(ws)* Add support for header order on websocket builder ([#608](https://github.com/0x676e67/wreq/issues/608)) - ([ad9e0b9](https://github.com/0x676e67/wreq/commit/ad9e0b97d5733a800b17c32851c1824da83d05c4))\n\n### Bug Fixes\n\n- *(client)* Fix `HTTP/2` safe retry policy ([#715](https://github.com/0x676e67/wreq/issues/715)) - ([3a5c356](https://github.com/0x676e67/wreq/commit/3a5c35697d12dcf67d30db88abb8d1fe37b638a7))\n- *(client)* Prevent future stack overflow in request handling ([#685](https://github.com/0x676e67/wreq/issues/685)) - ([402ffe3](https://github.com/0x676e67/wreq/commit/402ffe3184362a18696791621261c744a5f413b2))\n- *(client)* Update client to retain tls keylog configuration ([#619](https://github.com/0x676e67/wreq/issues/619)) - ([22c0770](https://github.com/0x676e67/wreq/commit/22c0770d3a123fa2569d9174112fa7c2a309220f))\n- *(client)* Fix `HTTP2` extensions to be applied in retry requests ([#596](https://github.com/0x676e67/wreq/issues/596)) - ([a1f0d32](https://github.com/0x676e67/wreq/commit/a1f0d32ede0bb146230781603d532217ccdc0430))\n- *(core)* Improve client errors details if available ([#665](https://github.com/0x676e67/wreq/issues/665)) - ([fb41f70](https://github.com/0x676e67/wreq/commit/fb41f70c7b70a556c2a97f9b699049a5e1fb58f4))\n- *(dns)* Prefer IPv6 addresses before IPv4 even if resolver ordered differently ([#658](https://github.com/0x676e67/wreq/issues/658)) - ([e913768](https://github.com/0x676e67/wreq/commit/e913768cf1be11b277b9b84b2f31b0090e426450))\n- *(error)* Error::is_timeout() checks for crate::core::Error::is_timeout() - ([34e79f1](https://github.com/0x676e67/wreq/commit/34e79f1ea81085c66e9ffb66066c8a35254ebdc1))\n- *(error)* Include request URL in error messages ([#737](https://github.com/0x676e67/wreq/issues/737)) - ([f312645](https://github.com/0x676e67/wreq/commit/f312645c31f53bcbf24d3899132b6fd9af890beb))\n- *(hash)* Fix #780 ([#784](https://github.com/0x676e67/wreq/issues/784)) - ([7b5808d](https://github.com/0x676e67/wreq/commit/7b5808dbb6073cb81e657aedb19ce2f9965875d5))\n- *(http2)* Rename `unknown_setting8` to `enable_connect_protocol` ([#647](https://github.com/0x676e67/wreq/issues/647)) - ([3464105](https://github.com/0x676e67/wreq/commit/34641053a7e2f6737ccf9803cc7ab02cc9d3c103))\n- *(pool)* Cap pool idle interval to a minimum ([#814](https://github.com/0x676e67/wreq/issues/814)) - ([daba062](https://github.com/0x676e67/wreq/commit/daba06298e60ef67a8a57de15aaad0ac071294be))\n- *(pool)* Don't spawn pool idle interval if timeout is 0 ([#806](https://github.com/0x676e67/wreq/issues/806)) - ([a6deeb4](https://github.com/0x676e67/wreq/commit/a6deeb44b8e8d67e322a33759b264bc81a17e7d4))\n- *(proxy)* Restore default port 1080 for SOCKS proxies without explicit port ([#821](https://github.com/0x676e67/wreq/issues/821)) - ([256de2b](https://github.com/0x676e67/wreq/commit/256de2bb5ff60bd0f040277e0020ef84d0ea8b12))\n- *(proxy)* Set https system proxy on windows ([#678](https://github.com/0x676e67/wreq/issues/678)) - ([7111b13](https://github.com/0x676e67/wreq/commit/7111b131db66abdddbbccafa5450f3d1637d229b))\n- *(redirect)* Make the number of redirects of policy matches its maximum limit ([#629](https://github.com/0x676e67/wreq/issues/629)) - ([85bad99](https://github.com/0x676e67/wreq/commit/85bad9996a9d8785feb92cf3d2c3c845bc10a306))\n- *(request)* Fix headers order ([#603](https://github.com/0x676e67/wreq/issues/603)) - ([9c85532](https://github.com/0x676e67/wreq/commit/9c8553229f62c901a2b739fed413be08fa558d4b))\n- *(tls)* Fix encoding error when multiple ALPS extensions are present ([#861](https://github.com/0x676e67/wreq/issues/861)) - ([6ce6c73](https://github.com/0x676e67/wreq/commit/6ce6c73cd0479a169d0f7e6f90c4073cf6e3fc0a))\n- *(ws)* Improve status code message on WebSocket upgrade failure ([#824](https://github.com/0x676e67/wreq/issues/824)) - ([4f6f6da](https://github.com/0x676e67/wreq/commit/4f6f6da67bc990be1753c4bb8e546c1b7ed35889))\n\n### Refactor\n\n- *(client)* Use `Either` to unify generic and boxed `Client` service types ([#849](https://github.com/0x676e67/wreq/issues/849)) - ([9cb05e7](https://github.com/0x676e67/wreq/commit/9cb05e794a6d5f1421482e15117ece37180099a7))\n- *(client)* Move HTTP/2 safe retry logic into `tower` middleware ([#713](https://github.com/0x676e67/wreq/issues/713)) - ([136c791](https://github.com/0x676e67/wreq/commit/136c7912b54bb74cecc48618415a64f865d7830c))\n- *(client)* Move read timeout logic into `tower` middleware ([#702](https://github.com/0x676e67/wreq/issues/702)) - ([06d5e47](https://github.com/0x676e67/wreq/commit/06d5e47f7dfb6353553d9bf5e99b185f644c19fd))\n- *(client)* Move total timeout logic into Tower middleware ([#701](https://github.com/0x676e67/wreq/issues/701)) - ([ed8b2ea](https://github.com/0x676e67/wreq/commit/ed8b2eab0ee71278fd2f787089026cb66f64dd29))\n- *(client)* Remove legacy HTTP/1 and HTTP/2 tuning options ([#644](https://github.com/0x676e67/wreq/issues/644)) - ([f019267](https://github.com/0x676e67/wreq/commit/f019267dd11fc7dd5ce4ab72b4c85a689a206710))\n- *(client)* Replace header map by key - ([6012542](https://github.com/0x676e67/wreq/commit/60125429e1764e50b064c835a77e009e06a18827))\n- *(client)* Replace header map by key ([#618](https://github.com/0x676e67/wreq/issues/618)) - ([237b17a](https://github.com/0x676e67/wreq/commit/237b17a649cf201fbac706044bb665e84c514804))\n- *(config)* Replace duplicate types with type aliases ([#740](https://github.com/0x676e67/wreq/issues/740)) - ([6bb210b](https://github.com/0x676e67/wreq/commit/6bb210b95d550ac415e7fea3d142d2296e1d4fa1))\n- *(config)* Unify request extensions config processing ([#712](https://github.com/0x676e67/wreq/issues/712)) - ([fb1b7b2](https://github.com/0x676e67/wreq/commit/fb1b7b2f3aab1e2c02ee9a0927ae5750e0ae740e))\n- *(config)* Remove public config fields and improve backward compatibility ([#614](https://github.com/0x676e67/wreq/issues/614)) - ([6631c5c](https://github.com/0x676e67/wreq/commit/6631c5c9f4489b5b323eab25d953fb9d13b698f8))\n- *(connect)* Modularize components by responsibility ([#819](https://github.com/0x676e67/wreq/issues/819)) - ([c996ec7](https://github.com/0x676e67/wreq/commit/c996ec7b0b6dca703b75b0007f9f36b142c9cc64))\n- *(connect)* Remove `Connect` trait alias wrapper around `tower::Service` ([#807](https://github.com/0x676e67/wreq/issues/807)) - ([947a25b](https://github.com/0x676e67/wreq/commit/947a25b7f158f84c8483fbf18e780dc4747970b2))\n- *(connect)* Streamline connector builder structure ([#705](https://github.com/0x676e67/wreq/issues/705)) - ([eb9308b](https://github.com/0x676e67/wreq/commit/eb9308bb3ded0b9fd6eabd77a947738f9ac78705))\n- *(connect)* Cleanup dead code for `tracing` feature ([#689](https://github.com/0x676e67/wreq/issues/689)) - ([5574786](https://github.com/0x676e67/wreq/commit/5574786b1a2572a13b5dea8c59e554cf9b63acf0))\n- *(connect)* Refactored internal connector builder - ([39f779b](https://github.com/0x676e67/wreq/commit/39f779b90b3a12705f5658f6a3c43a00c721d88e))\n- *(cookie)* Integrate cookie store into `tower` layer ([#695](https://github.com/0x676e67/wreq/issues/695)) - ([c0cf8e3](https://github.com/0x676e67/wreq/commit/c0cf8e396b5b9743e6b19b9b59b60753a3052802))\n- *(cookie)* Remove redundant store abstraction API ([#635](https://github.com/0x676e67/wreq/issues/635)) - ([8e34a91](https://github.com/0x676e67/wreq/commit/8e34a913e45cf684711c9c5c45a7e62f48d62cee))\n- *(core)* Separate `body` and `proto` responsibilities ([#839](https://github.com/0x676e67/wreq/issues/839)) - ([9e65c9f](https://github.com/0x676e67/wreq/commit/9e65c9f1d4d4a00ded6a3916e74a99486cb41eb6))\n- *(core)* Add socket addr to ConnectError ([#663](https://github.com/0x676e67/wreq/issues/663)) - ([877aa9c](https://github.com/0x676e67/wreq/commit/877aa9c7e791717a8c5ff106a376877d14442211))\n- *(core)* Reduce dependency on `futures-util` ([#636](https://github.com/0x676e67/wreq/issues/636)) - ([87ed77b](https://github.com/0x676e67/wreq/commit/87ed77b02a251b65aed014a9d329a75a6d92e76a))\n- *(core/client)* Remove old body delay_eof code ([#736](https://github.com/0x676e67/wreq/issues/736)) - ([a9d5db1](https://github.com/0x676e67/wreq/commit/a9d5db12aadfc17132c8444acaedb660ae67febe))\n- *(decoder)* Migrate decompression handling to tower-http ([#720](https://github.com/0x676e67/wreq/issues/720)) - ([e2427d8](https://github.com/0x676e67/wreq/commit/e2427d8c60ea370ba092dda766d74ffd119e1655))\n- *(dns)* Disable export of `hickory_resolver` module ([#646](https://github.com/0x676e67/wreq/issues/646)) - ([68fc1e4](https://github.com/0x676e67/wreq/commit/68fc1e4dcd65b0567a3e3b1fa4b485c42652d1b3))\n- *(error)* Use standard library-style error handling ([#722](https://github.com/0x676e67/wreq/issues/722)) - ([97657fd](https://github.com/0x676e67/wreq/commit/97657fd816202dbd8f34a0f0733422dedd27184e))\n- *(future)* Simplify `Client` future types with `Either` ([#851](https://github.com/0x676e67/wreq/issues/851)) - ([b6922d0](https://github.com/0x676e67/wreq/commit/b6922d0902dbe24daa656c06dcc4172b5193e43a))\n- *(header)* Preserve header order and casing in `OrigHeaderMap` redesign ([#860](https://github.com/0x676e67/wreq/issues/860)) - ([cc0e637](https://github.com/0x676e67/wreq/commit/cc0e637798a115f4fdc41a1fd3799c5bdd10e127))\n- *(http1)* Remove support for `title_case_headers` - ([4501d9a](https://github.com/0x676e67/wreq/commit/4501d9ace91a6adab041cddc8ce6d5e964e278c8))\n- *(http2)* Add decriptive error for non-empty body in CONNECT request ([#634](https://github.com/0x676e67/wreq/issues/634)) - ([fa413e6](https://github.com/0x676e67/wreq/commit/fa413e629687df306d937d3f69e64619c80ad524))\n- *(internally)* Normalize internal module structure ([#790](https://github.com/0x676e67/wreq/issues/790)) - ([8b768e7](https://github.com/0x676e67/wreq/commit/8b768e7579cacf8c85cb580abb47c88b9b7662dd))\n- *(internally)* Backport hyper client ([#624](https://github.com/0x676e67/wreq/issues/624)) - ([4efc5a7](https://github.com/0x676e67/wreq/commit/4efc5a7c227dd257ca866fe1772341803d3c91bc))\n- *(internally)* Refactor internal certificate loading ([#616](https://github.com/0x676e67/wreq/issues/616)) - ([2bf9da8](https://github.com/0x676e67/wreq/commit/2bf9da8b0defd4f805fccbd60d4468b14c9dfcdd))\n- *(io)* Drop duplicated legacy IO code ([#836](https://github.com/0x676e67/wreq/issues/836)) - ([0b22b58](https://github.com/0x676e67/wreq/commit/0b22b585d7da3f7375d4d42d109f056c3769a089))\n- *(layer)* Simplify tower `Service` error conversion ([#850](https://github.com/0x676e67/wreq/issues/850)) - ([e577afc](https://github.com/0x676e67/wreq/commit/e577afc903ff416b6db486b0e7c2fe0112914cf9))\n- *(module)* Separate hash responsibilities ([#856](https://github.com/0x676e67/wreq/issues/856)) - ([a5f5caa](https://github.com/0x676e67/wreq/commit/a5f5caadd3513bd8d70081d20a131bd77fdc8451))\n- *(module)* Separate util responsibilities ([#838](https://github.com/0x676e67/wreq/issues/838)) - ([9756969](https://github.com/0x676e67/wreq/commit/975696987c4e04a570f7af7fbd2f81de7de932b4))\n- *(module)* Separate `proxy` and `client` responsibilities ([#833](https://github.com/0x676e67/wreq/issues/833)) - ([6b71f74](https://github.com/0x676e67/wreq/commit/6b71f74f70179a3499480b10d19f9bd26f0c5bd9))\n- *(pool)* Simplify idle task using async/await ([#812](https://github.com/0x676e67/wreq/issues/812)) - ([808da8c](https://github.com/0x676e67/wreq/commit/808da8ceda00e88188339fde3477f097ce4d12a3))\n- *(proxy)* Remove duplicated basic auth encoder ([#845](https://github.com/0x676e67/wreq/issues/845)) - ([5b0cf72](https://github.com/0x676e67/wreq/commit/5b0cf72b98be499c9fe4fe8a789d8ac8f9dbf88f))\n- *(proxy)* Replace string comparison with constant comparison ([#820](https://github.com/0x676e67/wreq/issues/820)) - ([d5d60ab](https://github.com/0x676e67/wreq/commit/d5d60ab5c4b445e4584dbec8e65c801f6a6baaf2))\n- *(proxy)* Remove support for `Proxy::custom` ([#756](https://github.com/0x676e67/wreq/issues/756)) - ([1a5a36a](https://github.com/0x676e67/wreq/commit/1a5a36a5b26de80f70f5116cfdf86806f39f2938))\n- *(proxy)* Migrate proxy matcher from hyper-util ([#675](https://github.com/0x676e67/wreq/issues/675)) - ([fafe3a6](https://github.com/0x676e67/wreq/commit/fafe3a615319386ab5a908780996d61ab87dbe61))\n- *(redirect)* Migrate from `iri-string` to `url` crate for URI resolution ([#757](https://github.com/0x676e67/wreq/issues/757)) - ([7b72c18](https://github.com/0x676e67/wreq/commit/7b72c18707c661e3e2eb4256a3d6aa00c6c1dd51))\n- *(redirect)* Redesign redirection logic in `tower` middleware ([#708](https://github.com/0x676e67/wreq/issues/708)) - ([a53ce43](https://github.com/0x676e67/wreq/commit/a53ce43adde765be625fc5e1b176fffdfd5c0975))\n- *(rewind)* Replace manual implementations of `ReadBufCursor` methods ([#595](https://github.com/0x676e67/wreq/issues/595)) - ([e11e214](https://github.com/0x676e67/wreq/commit/e11e214248f8a9bbe1a998f70b823f026035f3f6))\n- *(service)* Eliminate unnecessary URL parsing ([#831](https://github.com/0x676e67/wreq/issues/831)) - ([4339692](https://github.com/0x676e67/wreq/commit/4339692b7333c8be6c6ed779ac8f172aaca12e40))\n- *(socks)* Migrate to `tokio-socks` for easier maintenance ([#766](https://github.com/0x676e67/wreq/issues/766)) - ([b405fda](https://github.com/0x676e67/wreq/commit/b405fda0da727f35457a2a8b751be5c27455c50c))\n- *(socks)* Reuse socks module logic from hyper-util ([#686](https://github.com/0x676e67/wreq/issues/686)) - ([ecb1493](https://github.com/0x676e67/wreq/commit/ecb1493d6cc259bf754ffdc0c93fd946c6a47d97))\n- *(timeout)* Simplify `Pin<B>` wrapping ([#732](https://github.com/0x676e67/wreq/issues/732)) - ([40518b6](https://github.com/0x676e67/wreq/commit/40518b6d4488474f098661de1f11393931d9ccdd))\n- *(tls)* Allow setting `ALPN`/`ALPS` protocol preference order ([#743](https://github.com/0x676e67/wreq/issues/743)) - ([7d7f65f](https://github.com/0x676e67/wreq/commit/7d7f65f7e70194a4ec69af143d906302dd587486))\n- *(tls)* Redesign certificate compression API for clarity and consistency ([#742](https://github.com/0x676e67/wreq/issues/742)) - ([7097c8d](https://github.com/0x676e67/wreq/commit/7097c8da26db2b7ac28287de257e0477ca1d0043))\n- *(tls)* Remove unnecessary lazy closure from `TlsConnector` ([#739](https://github.com/0x676e67/wreq/issues/739)) - ([37cd919](https://github.com/0x676e67/wreq/commit/37cd919b41cf74c241071e8b7cc8f6ba29f9864f))\n- *(tls)* Refactor TLS keylog tracking policy ([#655](https://github.com/0x676e67/wreq/issues/655)) - ([d88c83d](https://github.com/0x676e67/wreq/commit/d88c83dd2db6953415a62cb395efa3f07d95e355))\n- *(tls)* Remove configuration not associated with TLS extensions ([#654](https://github.com/0x676e67/wreq/issues/654)) - ([d62475f](https://github.com/0x676e67/wreq/commit/d62475f7d4766e4e2356dc21a39cf244b21c0d36))\n- *(tls)* Refactor certificate compression algorithm configuration API ([#639](https://github.com/0x676e67/wreq/issues/639)) - ([058fc9a](https://github.com/0x676e67/wreq/commit/058fc9a6c9152d088d723369fe408c658b4eea6c))\n- *(tls)* Fefactor extension permutation configuration API ([#638](https://github.com/0x676e67/wreq/issues/638)) - ([da9059b](https://github.com/0x676e67/wreq/commit/da9059b9d44a429dd82abf9d883209662ad5cdbe))\n- *(tls)* Distinguish certificate identity from store ([#621](https://github.com/0x676e67/wreq/issues/621)) - ([89e2c5c](https://github.com/0x676e67/wreq/commit/89e2c5ce687dab7c08c6da7c77c089afd97a3ab8))\n- *(websocket)* Standardize WebSocket module exports ([#645](https://github.com/0x676e67/wreq/issues/645)) - ([f61a89f](https://github.com/0x676e67/wreq/commit/f61a89f0cc0e1279469d70a99c055fea53e8d173))\n- *(ws)* Refactor HTTP2 upgrade to WebSocket ([#802](https://github.com/0x676e67/wreq/issues/802)) - ([e7b7052](https://github.com/0x676e67/wreq/commit/e7b705234e68f9a0e39eb86791931d985506a04b))\n- Restructure the core implementation of the client ([#668](https://github.com/0x676e67/wreq/issues/668)) - ([1d445cb](https://github.com/0x676e67/wreq/commit/1d445cb15f444e8104cb264a0fae2e05091e3b8d))\n- Store request timeout in request extensions instead ([#660](https://github.com/0x676e67/wreq/issues/660)) - ([e666be4](https://github.com/0x676e67/wreq/commit/e666be434af04458a86a4f8ae3d7bd1cf624002c))\n- Remove futures-util unless using stream/multipart/compression/websocket/core ([#653](https://github.com/0x676e67/wreq/issues/653)) - ([e3d0c9f](https://github.com/0x676e67/wreq/commit/e3d0c9f960dd7803e83b2c024d1e5f736bccd50c))\n- Drop futures-util for leaner core ([#648](https://github.com/0x676e67/wreq/issues/648)) - ([f46c161](https://github.com/0x676e67/wreq/commit/f46c1618e6a42722f024acad7db526b121536b44))\n- Backport use `hyper-util` Tunnel ([#642](https://github.com/0x676e67/wreq/issues/642)) - ([446719d](https://github.com/0x676e67/wreq/commit/446719daecf7e4e2479f7c7b5f3785c6da2bddad))\n- Renamed `tls_key_log_file` to `tls_keylog_file` for consistency ([#610](https://github.com/0x676e67/wreq/issues/610)) - ([5d1a85a](https://github.com/0x676e67/wreq/commit/5d1a85a1cc04a2380091398dee43146823590545))\n\n### Documentation\n\n- *(README)* Update for HTTP/3 over QUIC support - ([bba899c](https://github.com/0x676e67/wreq/commit/bba899c2b579f97a399b628c71f179ed07236a75))\n- *(client)* Update `tcp_user_timeout` docs - ([1fa4d44](https://github.com/0x676e67/wreq/commit/1fa4d44394bd3fe1efe3bbbbd127a05cfc80d20d))\n- *(client)* Clarify `Client` method usage ([#795](https://github.com/0x676e67/wreq/issues/795)) - ([3f56875](https://github.com/0x676e67/wreq/commit/3f56875e6c58176b7a73ed0464b0e6fcf5e16f8c))\n- *(client)* Update `tower` middleware integration documentation ([#716](https://github.com/0x676e67/wreq/issues/716)) - ([6094176](https://github.com/0x676e67/wreq/commit/60941762e8addec2788c3ae97cf7714eab9967cd))\n- *(connect)* Update docs for `Connector` builder - ([62b3b4a](https://github.com/0x676e67/wreq/commit/62b3b4a7291847faf36b694513ab38970fd3bda2))\n- *(layer)* Update docs - ([ff14827](https://github.com/0x676e67/wreq/commit/ff1482791d218acff48469c04af4bccc8e38e44b))\n- *(middleware)* Update module docs - ([b6b7071](https://github.com/0x676e67/wreq/commit/b6b7071c74844f365ffb4aa137f29be3f73cfd02))\n- *(proxy)* Fix some typos in comment ([#592](https://github.com/0x676e67/wreq/issues/592)) - ([25f85b0](https://github.com/0x676e67/wreq/commit/25f85b06ce72181009e8e2727977557b47df4c68))\n- *(timeout)* Update docs - ([512fa22](https://github.com/0x676e67/wreq/commit/512fa2281e665cb79e459ce6e3b5d6e124aed25a))\n- *(tls)* Update prefer chacha20 documentation - ([9652f46](https://github.com/0x676e67/wreq/commit/9652f46735ad774e956b747fd2c2ff4f3dcb7bd9))\n- *(ws)* Remove redundant comments - ([b401440](https://github.com/0x676e67/wreq/commit/b4014407690c8c73850c2a4299553f336e659fe5))\n- *(x509)* Clarify thread safety and usage of CertStore ([#846](https://github.com/0x676e67/wreq/issues/846)) - ([f1423f8](https://github.com/0x676e67/wreq/commit/f1423f8414ff01bb4b1c3e5318e1f917754ab9ca))\n- Revise and correct API documentation ([#724](https://github.com/0x676e67/wreq/issues/724)) - ([458e473](https://github.com/0x676e67/wreq/commit/458e4731ff25ca2625c4c2c53921be66c8a6bb8b))\n- Improve formatting in documentation ([#696](https://github.com/0x676e67/wreq/issues/696)) - ([867a8a2](https://github.com/0x676e67/wreq/commit/867a8a273028926a838f71e7b6ead728c3234d11))\n- Fix package docs - ([da20f76](https://github.com/0x676e67/wreq/commit/da20f766e9a25cad0cff7c128be8f7f1c0f2099e))\n- Cleanup legacy server documentation - ([7a0b11c](https://github.com/0x676e67/wreq/commit/7a0b11cf8f013e644d63d70d07a0e21289c86bb9))\n- Update documentation build ([#609](https://github.com/0x676e67/wreq/issues/609)) - ([eb06ebc](https://github.com/0x676e67/wreq/commit/eb06ebc81ac24ae821c4756196b58585303b723a))\n\n### Performance\n\n- *(client)* Avoid full `ClientRef` clone by matching and cloning service in-place ([#758](https://github.com/0x676e67/wreq/issues/758)) - ([8e547ad](https://github.com/0x676e67/wreq/commit/8e547ad17a504a230eb770a1ce3b92b6d0765186))\n- *(client)* Replace `Box<dyn Trait>` with generic type for `Box<T>` ([#755](https://github.com/0x676e67/wreq/issues/755)) - ([eb07a2a](https://github.com/0x676e67/wreq/commit/eb07a2af08afb8be084f6623625af0f13a87745a))\n- *(client)* Optimize dyn trait response to reduce runtime overhead ([#746](https://github.com/0x676e67/wreq/issues/746)) - ([0d5cbaf](https://github.com/0x676e67/wreq/commit/0d5cbaf03ff4646c0b81152022fb223a2ffee329))\n- *(client)* Optimize response future wrapping calls ([#726](https://github.com/0x676e67/wreq/issues/726)) - ([e24a0cd](https://github.com/0x676e67/wreq/commit/e24a0cdc422576f68b450f7b96c678fc2655f400))\n- *(client)* Remove redundant execute request calls ([#718](https://github.com/0x676e67/wreq/issues/718)) - ([4285cf7](https://github.com/0x676e67/wreq/commit/4285cf7278813a9c2e6e1de7d77bfe7c9fc82470))\n- *(client)* Avoid redundant box of `tower` layers ([#717](https://github.com/0x676e67/wreq/issues/717)) - ([0ae67f8](https://github.com/0x676e67/wreq/commit/0ae67f8b6aed1b956d1314fa2dc03f310f430286))\n- *(connect)* Simplify complex `TokioIo` wrapper ([#763](https://github.com/0x676e67/wreq/issues/763)) - ([807c33b](https://github.com/0x676e67/wreq/commit/807c33b0a2e47ef5da081b475c584541f27a54d0))\n- *(connect)* Embed single timeout layer directly to avoid `Box::pin` ([#725](https://github.com/0x676e67/wreq/issues/725)) - ([9d24080](https://github.com/0x676e67/wreq/commit/9d2408034372617a49f863f4fab9be381e46f1d7))\n- *(cookie)* Avoid redundant conditional checks ([#730](https://github.com/0x676e67/wreq/issues/730)) - ([574ab8e](https://github.com/0x676e67/wreq/commit/574ab8ef32b8fd91007681d987336e518802c27e))\n- *(cookie)* Avoid unnecessary URL parsing in cookie handling ([#699](https://github.com/0x676e67/wreq/issues/699)) - ([fa07991](https://github.com/0x676e67/wreq/commit/fa079912830a947df50632dd98751f7f351d5b4d))\n- *(decoder)* Avoid unnecessary clone of decompression service ([#828](https://github.com/0x676e67/wreq/issues/828)) - ([ce78205](https://github.com/0x676e67/wreq/commit/ce78205750b08ae9c2565118870c9974681dd95e))\n- *(ext)* Avoid deep calls when inlining is disabled ([#799](https://github.com/0x676e67/wreq/issues/799)) - ([e14a159](https://github.com/0x676e67/wreq/commit/e14a1592f68e235af88a275d52ce7b21f7a3306e))\n- *(hash)* Improve hashing efficiency for large structures ([#780](https://github.com/0x676e67/wreq/issues/780)) - ([7a7730e](https://github.com/0x676e67/wreq/commit/7a7730e2c71b0005a31ce94298236691be5a5750))\n- *(proxy)* Remove unnecessary sorting from `HeaderMap` ([#857](https://github.com/0x676e67/wreq/issues/857)) - ([2de64fe](https://github.com/0x676e67/wreq/commit/2de64fe14c591d07c07cf28d582dc8bebe7069d5))\n- *(proxy)* Remove unnecessary call wrapping ([#855](https://github.com/0x676e67/wreq/issues/855)) - ([2472d39](https://github.com/0x676e67/wreq/commit/2472d39e2128740e437c3d0846f18ea0ff96c148))\n- *(proxy)* Use zero-copy Bytes for proxy credentials ([#729](https://github.com/0x676e67/wreq/issues/729)) - ([5bb8e06](https://github.com/0x676e67/wreq/commit/5bb8e06499613d13fab1dc573ce8a1b61b70c23f))\n- *(redirect)* Avoid copy when redirection is unsupported ([#728](https://github.com/0x676e67/wreq/issues/728)) - ([741b81e](https://github.com/0x676e67/wreq/commit/741b81edc5201f79542c7e09eb3d46b0f3440062))\n- *(req/resp)* Inline frequently called accessor methods - ([7dc3424](https://github.com/0x676e67/wreq/commit/7dc3424a807bb5c60481cb0c6fb6551be2cefacd))\n- *(response)* Avoid unnecessary URL cloning ([#747](https://github.com/0x676e67/wreq/issues/747)) - ([95743b3](https://github.com/0x676e67/wreq/commit/95743b37522f8992803427809ed2e0a90ae7902d))\n- *(socks)* Optimize SOCKS connection handling ([#769](https://github.com/0x676e67/wreq/issues/769)) - ([5d3fe85](https://github.com/0x676e67/wreq/commit/5d3fe8530dedf76f4fc937981a29fccfbfb764c1))\n- *(socks)* Optimize DNS resolution with custom non-blocking resolver ([#687](https://github.com/0x676e67/wreq/issues/687)) - ([49520ce](https://github.com/0x676e67/wreq/commit/49520ce80b6211ec85abfda9655b9196e34c0438))\n- *(timeout)* Encapsulate all per-request timeout extensions uniformly ([#804](https://github.com/0x676e67/wreq/issues/804)) - ([dab45fd](https://github.com/0x676e67/wreq/commit/dab45fde9c70e646d576f049e4a46b7c5113fcb3))\n- *(timeout)* Reduce unnecessary `as_mut` calls ([#719](https://github.com/0x676e67/wreq/issues/719)) - ([fa9570c](https://github.com/0x676e67/wreq/commit/fa9570c35220963e2c17a0741edaebf0fc340974))\n- *(tls)* Inline builder hot path code - ([bc2ff43](https://github.com/0x676e67/wreq/commit/bc2ff43c1b4c39293426cce42724db1b2afd789f))\n- *(tls)* Flatten TLS info construction for better performance ([#847](https://github.com/0x676e67/wreq/issues/847)) - ([2ab4edd](https://github.com/0x676e67/wreq/commit/2ab4edd01c2c022ae4bda0312c3f6307371916e9))\n- *(tls)* Connect stage reduces call chains - ([29c9bd8](https://github.com/0x676e67/wreq/commit/29c9bd8d9beae3be15de37693341e192b8225e0a))\n- *(ws)* Inline frequently called accessor methods ([#782](https://github.com/0x676e67/wreq/issues/782)) - ([929d917](https://github.com/0x676e67/wreq/commit/929d91777539911994527ed6d15ebf31e463b689))\n- Inline hotspot `poll` method to reduce call overhead ([#714](https://github.com/0x676e67/wreq/issues/714)) - ([8c26d8b](https://github.com/0x676e67/wreq/commit/8c26d8b8f58de8a00d7e0a17dc63ccdfe1145653))\n\n### Styling\n\n- *(client)* Shorten paths in type aliases ([#733](https://github.com/0x676e67/wreq/issues/733)) - ([c83b8e8](https://github.com/0x676e67/wreq/commit/c83b8e82a4b21d63c79922df09b737066e3f314d))\n- *(connector)* Simplify user-defined timeout layer setup ([#827](https://github.com/0x676e67/wreq/issues/827)) - ([d620a25](https://github.com/0x676e67/wreq/commit/d620a252eb7549b8cdd079897736d2847e1019cc))\n- *(cookie)* Sync upstream API style ([#659](https://github.com/0x676e67/wreq/issues/659)) - ([03041af](https://github.com/0x676e67/wreq/commit/03041af75026269db1763636390a3bf72fe000d4))\n- *(proxy)* Simplify `Matcher` Debug implementation - ([f15f36e](https://github.com/0x676e67/wreq/commit/f15f36e158091bf352fcfc334d9056d84889e6f8))\n- *(proxy)* Simplify path constraint for `http::Uri` - ([171e7b8](https://github.com/0x676e67/wreq/commit/171e7b83b6e8647f05313f9b4bfbf24e6300cc78))\n- *(redirect)* Rename `TowerRedirectPolicy` to `RedirectPolicy` - ([1e4431b](https://github.com/0x676e67/wreq/commit/1e4431b92f765397f89f92f542111bf5e694682f))\n- *(request)* Simplify request config access ([#793](https://github.com/0x676e67/wreq/issues/793)) - ([0f6f523](https://github.com/0x676e67/wreq/commit/0f6f5232510cb9cce4b437a9e81685377f56fae2))\n- *(socks)* Clippy format - ([20c8236](https://github.com/0x676e67/wreq/commit/20c8236d85e87c1693e01e61566f9d6f46652055))\n- *(tunnel)* Fmt code - ([1a489b5](https://github.com/0x676e67/wreq/commit/1a489b5305512094d43274659173f2625a45ba0c))\n- Format crate imports for consistency ([#709](https://github.com/0x676e67/wreq/issues/709)) - ([777c6e5](https://github.com/0x676e67/wreq/commit/777c6e5e137024d6f09bf1b53eff7434e573cbb4))\n- Fmt code - ([7fb9b1e](https://github.com/0x676e67/wreq/commit/7fb9b1e88df9e088b3920620c84aad1ea0d2a7bb))\n\n### Testing\n\n- *(badssl)* Enable test_aes_hw_override test - ([a37219a](https://github.com/0x676e67/wreq/commit/a37219a47b0903375d033cc9a5c6e3701dcb4b74))\n- *(deps)* Bump `hyper-util` to v0.1.13 ([#667](https://github.com/0x676e67/wreq/issues/667)) - ([862361c](https://github.com/0x676e67/wreq/commit/862361cac33a200bddbdd2c6b3430da36bccadda))\n- *(emulation)* Add tests for additional emulation options ([#823](https://github.com/0x676e67/wreq/issues/823)) - ([e0b76a8](https://github.com/0x676e67/wreq/commit/e0b76a8d40cf0795fc5a81704248edc88b55b439))\n- *(emulation)* Add firefox tests ([#822](https://github.com/0x676e67/wreq/issues/822)) - ([6ed1974](https://github.com/0x676e67/wreq/commit/6ed1974744138b6d6f0cd678d652ec32fcab1751))\n- *(timeout)* Update timeout tests ([#691](https://github.com/0x676e67/wreq/issues/691)) - ([3781cef](https://github.com/0x676e67/wreq/commit/3781cefb547d0052d8b96a781ec6096ce86e2a64))\n- Remove redundant decompression tests ([#734](https://github.com/0x676e67/wreq/issues/734)) - ([8efcd19](https://github.com/0x676e67/wreq/commit/8efcd19925d654ff4cc4a2f61c70672e2890fa60))\n- Tests affected by removal of proxy-related environment variables ([#692](https://github.com/0x676e67/wreq/issues/692)) - ([79648b5](https://github.com/0x676e67/wreq/commit/79648b531199cbe86b1c0db4d570e38cf25ff2da))\n- Switch over from libflate to flate2 in tests to reduce dependency footprint ([#593](https://github.com/0x676e67/wreq/issues/593)) - ([dc74305](https://github.com/0x676e67/wreq/commit/dc74305dc83a19ce0f0320a91d42ee1e76f13860))\n\n### Miscellaneous Tasks\n\n- *(body)* Re-expose body mod - ([99e27f2](https://github.com/0x676e67/wreq/commit/99e27f203c2dd766707494f3c76f8f5a4d69b092))\n- *(body)* Re-expose body mod - ([1d9ee72](https://github.com/0x676e67/wreq/commit/1d9ee729de0eacb68167506f456db871146ec85c))\n- *(client)* Remove unused comment - ([efac842](https://github.com/0x676e67/wreq/commit/efac842c9d54bf4e6e7fd83779c4123dab81f48c))\n- *(client)* Defer initialization of internal client ([#811](https://github.com/0x676e67/wreq/issues/811)) - ([f5817c6](https://github.com/0x676e67/wreq/commit/f5817c63aa020faf6146343b002cd912a5dbe6cc))\n- *(client)* Fmt future.rs - ([3a6c265](https://github.com/0x676e67/wreq/commit/3a6c26545ed768e8e7a7ce73427bef538a74604c))\n- *(client)* Eliminate redundant cloning of `tower` middleware ([#698](https://github.com/0x676e67/wreq/issues/698)) - ([c52bb1d](https://github.com/0x676e67/wreq/commit/c52bb1d6b7f5be7158d1cf28c7df41b90dd7fc14))\n- *(client)* Refactor client into responsibility-specific modules ([#683](https://github.com/0x676e67/wreq/issues/683)) - ([d70a9f2](https://github.com/0x676e67/wreq/commit/d70a9f29ce0c6f7b66f60b1c83af16f906b72821))\n- *(config)* Merge standalone `config` into `middleware/config` ([#771](https://github.com/0x676e67/wreq/issues/771)) - ([96168aa](https://github.com/0x676e67/wreq/commit/96168aa679a600d402eb1f4daca124ddcc16dd40))\n- *(connect)* Simplify parameters and improve documentation ([#858](https://github.com/0x676e67/wreq/issues/858)) - ([0eb219b](https://github.com/0x676e67/wreq/commit/0eb219be71f8d005c48fcd80988e758f176f83da))\n- *(connect)* Simplify conditional cfg for TCP keepalive ([#842](https://github.com/0x676e67/wreq/issues/842)) - ([0c40c3a](https://github.com/0x676e67/wreq/commit/0c40c3a09c5d4bf9dec805b2fa1e79fc686afa9a))\n- *(connect)* Relocate `connect` module to `http` ([#818](https://github.com/0x676e67/wreq/issues/818)) - ([77b00be](https://github.com/0x676e67/wreq/commit/77b00be6dd2f3f46704a152bbde9fbdabf787f1e))\n- *(connect)* Simplified type import - ([fe10748](https://github.com/0x676e67/wreq/commit/fe10748d88a23979192e5141a119c8d40dc49d22))\n- *(connect)* Fmt code - ([53c9a24](https://github.com/0x676e67/wreq/commit/53c9a24ddc998f0bbde5812725ea869a63707ca7))\n- *(connector)* Fmt code - ([a703915](https://github.com/0x676e67/wreq/commit/a703915da5d5f78ed4887bd868b3ebcf5f9b756c))\n- *(cookie)* Cleanup unused error types - ([81bcf3f](https://github.com/0x676e67/wreq/commit/81bcf3fed32c6f00c133c794c9aa9162a42b0c81))\n- *(cookie)* Fmt code - ([c9e03b1](https://github.com/0x676e67/wreq/commit/c9e03b1f39222c03d49a620189ba9131996189d2))\n- *(core)* Format `http1` and `http2` options wrappers ([#813](https://github.com/0x676e67/wreq/issues/813)) - ([6803663](https://github.com/0x676e67/wreq/commit/680366361c809d226e08ab5e5cfcd9635c88a409))\n- *(core)* Shorten `crate::core::Error` to `Error` via import ([#797](https://github.com/0x676e67/wreq/issues/797)) - ([1bd5666](https://github.com/0x676e67/wreq/commit/1bd5666143dcfcae2b020fb0fea06362375ceffe))\n- *(core)* Remove unused `task` mod - ([121a46b](https://github.com/0x676e67/wreq/commit/121a46bcc0888ea0b7525cd2f0e29020da30da8a))\n- *(core)* Remove unused `rewind` mod - ([e6a6ec0](https://github.com/0x676e67/wreq/commit/e6a6ec03bebf77332d26400104c6045268b87622))\n- *(core)* Remove legacy code duplicated with `tower::util` ([#727](https://github.com/0x676e67/wreq/issues/727)) - ([ed218cf](https://github.com/0x676e67/wreq/commit/ed218cf18c1b6208aee727c77b1627b070f36559))\n- *(core)* Remove duplicate code - ([a22bcf7](https://github.com/0x676e67/wreq/commit/a22bcf73888f5e53a6f77ed535c736c97146a8fb))\n- *(decoder)* Merge standalone `decoder` into `middleware/decoder` ([#770](https://github.com/0x676e67/wreq/issues/770)) - ([b917192](https://github.com/0x676e67/wreq/commit/b917192b6e0e62374bf216974f006ccb52035696))\n- *(dispatch)* Cleanup legacy unused code ([#796](https://github.com/0x676e67/wreq/issues/796)) - ([4153e07](https://github.com/0x676e67/wreq/commit/4153e07a38bf21c6d3ecfeb366948f1ea6684710))\n- *(emulation)* Derive(Clone) for Emulation ([#862](https://github.com/0x676e67/wreq/issues/862)) - ([1ec7a09](https://github.com/0x676e67/wreq/commit/1ec7a093340c5c2f1c2c5fbc0b2adf60b388019a))\n- *(example)* Format code in examples - ([a0e63c5](https://github.com/0x676e67/wreq/commit/a0e63c54b55ef4cda748609289d02a0caf570f89))\n- *(example)* Update examples - ([b089c6e](https://github.com/0x676e67/wreq/commit/b089c6e892d214e48a43f5c53c896211219039f1))\n- *(examples)* Update examples - ([b8b52ba](https://github.com/0x676e67/wreq/commit/b8b52ba3c86a6e29ec6c4e7f9f6c12d8688a0049))\n- *(ext)* Encapsulate all per-request extensions uniformly ([#801](https://github.com/0x676e67/wreq/issues/801)) - ([d77d340](https://github.com/0x676e67/wreq/commit/d77d340e2d9c0b8c7440805c248b09797fe62d10))\n- *(ext)* Move `http2::ext::Protocol` extension into request config ([#798](https://github.com/0x676e67/wreq/issues/798)) - ([b7cfbe9](https://github.com/0x676e67/wreq/commit/b7cfbe97ea7e9d03b3b9adb32d49ea665ea34566))\n- *(internal)* Normalize internal error handling APIs ([#773](https://github.com/0x676e67/wreq/issues/773)) - ([65b574a](https://github.com/0x676e67/wreq/commit/65b574a2ea6b42b5f5f9c347d49e5a6c89382125))\n- *(internal)* Normalize internal APIs ([#772](https://github.com/0x676e67/wreq/issues/772)) - ([3cfa301](https://github.com/0x676e67/wreq/commit/3cfa301080cb9a0dd97256a170f6801f5ce1b977))\n- *(lib)* Sort module declarations - ([adc8b58](https://github.com/0x676e67/wreq/commit/adc8b58635acb638e1bcccf51801c65be5d07760))\n- *(pool)* Eliminate type duplication with aliases - ([5ea3b07](https://github.com/0x676e67/wreq/commit/5ea3b07a62f85c5beb46ba673d26bf76415c06e6))\n- *(proxy)* Assign proper connector names for `Tunnel` and `Socks` ([#815](https://github.com/0x676e67/wreq/issues/815)) - ([22d2be1](https://github.com/0x676e67/wreq/commit/22d2be1073988f8f7c76f592b731b0156c72c898))\n- *(rt/tokio)* Cleanup unused code - ([c1c5e34](https://github.com/0x676e67/wreq/commit/c1c5e34a15c484d6829e03a2a903bbbf3357ccd3))\n- *(socks)* Rename 'with_local_dns' to 'with_dns_mode' - ([7430a6a](https://github.com/0x676e67/wreq/commit/7430a6a213b278cb2f6356b0ed8da26d2cd323a2))\n- *(sync)* Simplify lifetime annotations - ([834258c](https://github.com/0x676e67/wreq/commit/834258c75455a35be1c978b43deda8104af73876))\n- *(sync)* Remove dead code - ([a628d1c](https://github.com/0x676e67/wreq/commit/a628d1c17b3748379a493245cf71254ce6800fbb))\n- *(test)* Remove miri exception configs - ([0511365](https://github.com/0x676e67/wreq/commit/0511365422af4d70d045f9c00e34fd77e9c15a8d))\n- *(tls)* Add examples for root and self-signed certificates ([#792](https://github.com/0x676e67/wreq/issues/792)) - ([8691db0](https://github.com/0x676e67/wreq/commit/8691db07db75ea5f6a9dafd9bc0c44cd3dadab20))\n- *(tls)* Remove the legacy curves configuration API ([#637](https://github.com/0x676e67/wreq/issues/637)) - ([2459de9](https://github.com/0x676e67/wreq/commit/2459de97819e3eeb7b0569d99387e63ee099c6a4))\n- *(types)* Merge `GenericClientService` related types - ([303584c](https://github.com/0x676e67/wreq/commit/303584cbfc832c9528439ba320651aada1d10504))\n- *(x509)* Cleanup mixed parsing of pem/der certificates - ([edc4e7d](https://github.com/0x676e67/wreq/commit/edc4e7df38150f899b353b0766653b867a384e05))\n- *(x509)* Cleanup dead code - ([42b741b](https://github.com/0x676e67/wreq/commit/42b741bef019e63b0c05d573a550c6e423458bac))\n- Sort module declarations - ([8699a4b](https://github.com/0x676e67/wreq/commit/8699a4b2cc06990e9848962563289fa4a7b4b059))\n- Remove unused `#[allow]` attributes ([#809](https://github.com/0x676e67/wreq/issues/809)) - ([5bc5cca](https://github.com/0x676e67/wreq/commit/5bc5cca594b4717d343e04b7a7b348b371be7486))\n- Fix typo - ([7b800c5](https://github.com/0x676e67/wreq/commit/7b800c5efbe48899d9ae7e6f9a129a0d8459a990))\n- Cleanup dead code - ([5be4443](https://github.com/0x676e67/wreq/commit/5be4443f78d0cbad72ffa640414e094d20e0fe09))\n- Cleanup unused macros and format definitions - ([9f925d3](https://github.com/0x676e67/wreq/commit/9f925d384f5723c68001041c00e4865172f68c8e))\n- Cleanup redundant and unused type exports ([#704](https://github.com/0x676e67/wreq/issues/704)) - ([a583a7f](https://github.com/0x676e67/wreq/commit/a583a7ff9e179efccdf6d4877792e09016029e7f))\n- Clean up redundant type exports ([#684](https://github.com/0x676e67/wreq/issues/684)) - ([4af36f5](https://github.com/0x676e67/wreq/commit/4af36f5bbaa0ea1614e70e8a0c60a9bbb2079e93))\n- Fmt example code ([#656](https://github.com/0x676e67/wreq/issues/656)) - ([7ad2496](https://github.com/0x676e67/wreq/commit/7ad24960c352257104d60d5be477631446e8793e))\n- Apply clippy fixes required by CI ([#649](https://github.com/0x676e67/wreq/issues/649)) - ([d1e43d4](https://github.com/0x676e67/wreq/commit/d1e43d4a8019f73cb13c69ddf6e6c0852e95c5d3))\n- Fix clippy module inception - ([6e66dd6](https://github.com/0x676e67/wreq/commit/6e66dd6fd18bb586a24a6988faea0398f7093923))\n- Update examples and clean up dead code - ([636b510](https://github.com/0x676e67/wreq/commit/636b510a63ed4a3ff71abffc6c5eef24245fa889))\n- Remove unused comments - ([ff484b2](https://github.com/0x676e67/wreq/commit/ff484b2888333b5e17adb3bd7681271ba6e6d201))\n\n### Build\n\n- *(deps)* Update tokio requirement from 1 to 1.47.0 - ([e7bab63](https://github.com/0x676e67/wreq/commit/e7bab6356d30115c2251eff8f9b10f7d7de58778))\n- *(deps)* Update dependencies - ([42eaba7](https://github.com/0x676e67/wreq/commit/42eaba7444f6d19bb85200b58c8c837a25fbf146))\n- *(deps)* Reduce dependency on `tokio-util` ([#837](https://github.com/0x676e67/wreq/issues/837)) - ([69c178d](https://github.com/0x676e67/wreq/commit/69c178dc3bb2655ff7dd5e782e957898f8933011))\n- *(deps)* Simplify dev dependencies - ([2b6ae59](https://github.com/0x676e67/wreq/commit/2b6ae5925d79c19f6adb13cf1a1bb8c051a33eda))\n- *(deps)* Simplify dev dependencies - ([9743ca7](https://github.com/0x676e67/wreq/commit/9743ca7138723659a5fb7946947c59518c3d0123))\n- *(deps)* Update cookie_store requirement from 0.21 to 0.22 ([#829](https://github.com/0x676e67/wreq/issues/829)) - ([8453aa2](https://github.com/0x676e67/wreq/commit/8453aa272d8756fbd9a69d061803926d86f6774c))\n- *(deps)* Update socket2 requirement from 0.5.10 to 0.6.0 ([#778](https://github.com/0x676e67/wreq/issues/778)) - ([73bd5a0](https://github.com/0x676e67/wreq/commit/73bd5a027e2dddaaa313ae19ed363cbea637cbf6))\n- *(deps)* Drop `tower-service` (redundant with `tower::Service`) ([#800](https://github.com/0x676e67/wreq/issues/800)) - ([9de6cb3](https://github.com/0x676e67/wreq/commit/9de6cb31e417b00210d277ffd8460da5eb8e0eea))\n- *(deps)* Remove redundant `atomic-waker` dependency ([#776](https://github.com/0x676e67/wreq/issues/776)) - ([b0cc9cd](https://github.com/0x676e67/wreq/commit/b0cc9cd6e96108a522b9299aaa8581d5f1780848))\n- *(deps)* Remove redundant `futures-core` dep ([#774](https://github.com/0x676e67/wreq/issues/774)) - ([b04e162](https://github.com/0x676e67/wreq/commit/b04e162995afbabbbbc1dcf47464e2dd372a7574))\n- *(deps)* Replace `lru` with faster `schnellru` implementation ([#754](https://github.com/0x676e67/wreq/issues/754)) - ([100bab9](https://github.com/0x676e67/wreq/commit/100bab9cfebf2df645314f83102fc4fa079e4479))\n- *(deps)* Remove support for `rustls-native-certs` ([#752](https://github.com/0x676e67/wreq/issues/752)) - ([144bc8a](https://github.com/0x676e67/wreq/commit/144bc8abac0a0a38b8ff2c44e5d6edb1ff2b7046))\n- *(deps)* Optionally use `parking_lot` for lock implementation ([#750](https://github.com/0x676e67/wreq/issues/750)) - ([da30d6b](https://github.com/0x676e67/wreq/commit/da30d6beef7182e507659417c4357751866cbdd7))\n- *(deps)* Prepare for Boring 5 upgrade ([#735](https://github.com/0x676e67/wreq/issues/735)) - ([77cfc8d](https://github.com/0x676e67/wreq/commit/77cfc8dcb7d4aec9df9ed7c3656633a4ffcc407e))\n- *(deps)* Update tokio-tungstenite requirement from 0.26.2 to 0.27.0 ([#721](https://github.com/0x676e67/wreq/issues/721)) - ([63f7a4b](https://github.com/0x676e67/wreq/commit/63f7a4b68881a2e453fdf9413082fac7e5d4021e))\n- *(deps)* Update webpki-root-certs requirement from 0.26.0 to 1.0.0 ([#631](https://github.com/0x676e67/wreq/issues/631)) - ([acb44fe](https://github.com/0x676e67/wreq/commit/acb44fe8c78b98f3c54dab05ab68035a6d449515))\n- *(deps)* Remove `typed-builder` dependency ([#620](https://github.com/0x676e67/wreq/issues/620)) - ([5e037ac](https://github.com/0x676e67/wreq/commit/5e037ac61cbadc98bb37b3d851d3401e78023fb7))\n- *(deps)* Update libc requirement from 2.0.11 to 0.2.172 ([#611](https://github.com/0x676e67/wreq/issues/611)) - ([888ef8a](https://github.com/0x676e67/wreq/commit/888ef8a2b709c113d2dbeac3457d35f61436b741))\n- *(deps)* Update async-compression requirement from 0.4.21 to 0.4.23 ([#606](https://github.com/0x676e67/wreq/issues/606)) - ([6dc0026](https://github.com/0x676e67/wreq/commit/6dc002668c72038b87d1bd5e3edae4b47cc2f125))\n- *(deps)* Update boring2 requirement from 4.15.11 to 4.15.12 ([#607](https://github.com/0x676e67/wreq/issues/607)) - ([bfe8c12](https://github.com/0x676e67/wreq/commit/bfe8c1256eded7b68826993e91d9729074d699f6))\n- *(deps)* Update brotli requirement from 7.0.0 to 8.0.0 ([#601](https://github.com/0x676e67/wreq/issues/601)) - ([86849dd](https://github.com/0x676e67/wreq/commit/86849dd73b5753a3bf0cbddfd613686d6013ab9a))\n- *(deps)* Update socket2 requirement from 0.5.8 to 0.5.9 ([#599](https://github.com/0x676e67/wreq/issues/599)) - ([dec8352](https://github.com/0x676e67/wreq/commit/dec8352ca33a74314e7a671858763ced272bc12f))\n- *(deps)* Update lru requirement from 0.13 to 0.14 ([#597](https://github.com/0x676e67/wreq/issues/597)) - ([e557749](https://github.com/0x676e67/wreq/commit/e557749d078d50cf2fddf59df094ed5ce591128d))\n- *(feature)* Drop redundant `sync_wrapper` ([#817](https://github.com/0x676e67/wreq/issues/817)) - ([a737f56](https://github.com/0x676e67/wreq/commit/a737f56e3c398378726666851773242470cb40a7))\n- *(feature)* Rename `websocket` feature to `ws` ([#816](https://github.com/0x676e67/wreq/issues/816)) - ([d15b2d5](https://github.com/0x676e67/wreq/commit/d15b2d530e37299dc9e77559f2c2289424a4799d))\n- *(sync)* Remove optional `parking_lot` support - ([b109eb9](https://github.com/0x676e67/wreq/commit/b109eb99c424d92f9912509105b50a26f02bee36))\n- Drop `full` feature ([#803](https://github.com/0x676e67/wreq/issues/803)) - ([12b4d64](https://github.com/0x676e67/wreq/commit/12b4d64eba7c2c5c34f89b1a10247814f01be095))\n- Drop deprecated `macos-system-configuration` feature ([#775](https://github.com/0x676e67/wreq/issues/775)) - ([7caa4ad](https://github.com/0x676e67/wreq/commit/7caa4ad5327d437ece815fda99635f99f9cd062c))\n- Cleanup deprecated feature - ([8d1632b](https://github.com/0x676e67/wreq/commit/8d1632b73e0994091136c7a60a2e801e65e9b440))\n\n### Deps\n\n- *(boring)* Basic support for LoongArch ([#622](https://github.com/0x676e67/wreq/issues/622)) - ([bcc53cf](https://github.com/0x676e67/wreq/commit/bcc53cf260e31605376bc72fb7acae53fb385a4b))\n- Prune unnecessary dependencies ([#681](https://github.com/0x676e67/wreq/issues/681)) - ([d9aecea](https://github.com/0x676e67/wreq/commit/d9aecead61dccb481b8d39744ece30d66d1ea41f))\n\n## New Contributors ❤️\n\n* @incizzle made their first contribution in [#608](https://github.com/0x676e67/wreq/pull/608)\n\n## [5.1.0](https://github.com/0x676e67/wreq/compare/v5.0.0..v5.1.0) - 2025-03-29\n\n### Features\n\n- *(cookie)* Optional enable of sending multiple cookies in `CookieStore` ([#578](https://github.com/0x676e67/wreq/issues/578)) - ([6678fbf](https://github.com/0x676e67/wreq/commit/6678fbfa22aa259a20fe1868bb41d94851765492))\n- *(cookie)* Cookies feature optionally preserves order ([#573](https://github.com/0x676e67/wreq/issues/573)) - ([803852b](https://github.com/0x676e67/wreq/commit/803852b43e127f0c89aea2a81e75ad4d04c951bd))\n- *(proxy)* Enhanced websocket level proxy options ([#569](https://github.com/0x676e67/wreq/issues/569)) - ([a6c9a75](https://github.com/0x676e67/wreq/commit/a6c9a75dd68f99095bbf70cb95d2955b89b2271b))\n- *(request)* Optionally allow compression in request ([#581](https://github.com/0x676e67/wreq/issues/581)) - ([dc2c148](https://github.com/0x676e67/wreq/commit/dc2c1483dca066f4bc9b02f3504c5c86edd45438))\n- *(x509)* Support for using a private key and X.509 certificate as a client certificate ([#588](https://github.com/0x676e67/wreq/issues/588)) - ([3fbcc89](https://github.com/0x676e67/wreq/commit/3fbcc89775fe0e65e5c5cfa86319350ab4cada7d))\n- *(x509)* Auto detect and parse `DER`/`PEM` certificate formats ([#584](https://github.com/0x676e67/wreq/issues/584)) - ([3ab1681](https://github.com/0x676e67/wreq/commit/3ab168126ed4fe41c5dbe5e0bc56d2f87734d679))\n- Expose `tls` and `websocket` modules ([#587](https://github.com/0x676e67/wreq/issues/587)) - ([a771463](https://github.com/0x676e67/wreq/commit/a771463508f66314f52a725bca6bb8de042843b7))\n\n### Bug Fixes\n\n- *(client)* Adapt sorting for duplicate headers such as cookies ([#576](https://github.com/0x676e67/wreq/issues/576)) - ([a786a85](https://github.com/0x676e67/wreq/commit/a786a8595079b1647c1d1a6ab571ffb199b11a5d))\n- *(request)* Fix `try_clone` missing protocol extension ([#579](https://github.com/0x676e67/wreq/issues/579)) - ([0e9872d](https://github.com/0x676e67/wreq/commit/0e9872dd370a8a70d38139b30c14113495418b86))\n\n### Documentation\n\n- *(request)* Improve request header parameter docs ([#580](https://github.com/0x676e67/wreq/issues/580)) - ([f03c1c8](https://github.com/0x676e67/wreq/commit/f03c1c8d6aff7e2fba2aeb60a03e991f714e9662))\n- *(response)* Link to `char::REPLACEMENT_CHARACTER` ([#586](https://github.com/0x676e67/wreq/issues/586)) - ([b0abcb6](https://github.com/0x676e67/wreq/commit/b0abcb636b5c5b86089cfbf1f39ebdc966da1e30))\n- Update certificate store description ([#572](https://github.com/0x676e67/wreq/issues/572)) - ([f1b076f](https://github.com/0x676e67/wreq/commit/f1b076f8321987f9d4ece641b557261277128cbb))\n- Improved emulation description ([#571](https://github.com/0x676e67/wreq/issues/571)) - ([5924815](https://github.com/0x676e67/wreq/commit/5924815a05b4512381815a2f4d66daf4e855f538))\n- Update examples docs ([#570](https://github.com/0x676e67/wreq/issues/570)) - ([591e4b3](https://github.com/0x676e67/wreq/commit/591e4b3e1b63bc5911b6e1f64643c32c7d3475f0))\n\n### Performance\n\n- *(cookie)* Optimize the performance of cookies compression ([#574](https://github.com/0x676e67/wreq/issues/574)) - ([6c2280c](https://github.com/0x676e67/wreq/commit/6c2280c82a252f4de2289e74fc88a9d6058a6941))\n- *(request)* Improve `json`/`form` request performance ([#583](https://github.com/0x676e67/wreq/issues/583)) - ([cce1fcf](https://github.com/0x676e67/wreq/commit/cce1fcfbad9b6f7d519b0c6f629087bded222ae4))\n\n### Styling\n\n- *(client)* Fmt import - ([f509c52](https://github.com/0x676e67/wreq/commit/f509c5298e4f1865f71a862e6882d420b9c06d24))\n- *(client)* Fmt code - ([ca9bc96](https://github.com/0x676e67/wreq/commit/ca9bc96d85cfdd90e6f06c1b59b952a46946d98a))\n- *(x509)* Fmt code - ([cc6fa5d](https://github.com/0x676e67/wreq/commit/cc6fa5d6bed622d569c50c5153d98e96664bac29))\n- *(x509)* Format compatible code ([#589](https://github.com/0x676e67/wreq/issues/589)) - ([a12a414](https://github.com/0x676e67/wreq/commit/a12a414105433151a583a605b9e0a0767639143c))\n\n### Testing\n\n- *(badssl)* Dynamically update peer certificate SSL pinning test ([#582](https://github.com/0x676e67/wreq/issues/582)) - ([a87b95f](https://github.com/0x676e67/wreq/commit/a87b95fbe37318a5e0e3a0c3b2e90c39bde49654))\n\n### Miscellaneous Tasks\n\n- *(client)* Remove dead code - ([4de2978](https://github.com/0x676e67/wreq/commit/4de29785cd506fedb82ecfbb2355dcb966984d63))\n- *(http)* Rename `ClientInner` to `ClientRef` - ([1d01390](https://github.com/0x676e67/wreq/commit/1d01390103b0e424dfacc211fcb9b56b0c848da6))\n- *(tests)* Update client tests conditional ([#577](https://github.com/0x676e67/wreq/issues/577)) - ([684eb89](https://github.com/0x676e67/wreq/commit/684eb89a42febe7175c4f0fa5a2f2d8204514160))\n\n### Build\n\n- *(deps)* Upgrade dependencies ([#575](https://github.com/0x676e67/wreq/issues/575)) - ([cf6daf0](https://github.com/0x676e67/wreq/commit/cf6daf0662268f5f6d64bb06d4d8ea361cac46aa))\n\n\n## [5.0.0](https://github.com/0x676e67/wreq/compare/v3.0.6..v5.0.0) - 2025-03-23\n\n### Features\n\n- *(client)* Add a straightforward method for SSL pinning setup ([#556](https://github.com/0x676e67/wreq/issues/556)) - ([071d5ed](https://github.com/0x676e67/wreq/commit/071d5ed8ded32e5f40b6d21d2cea39920ddbe355))\n- *(client)* Ignore the requirement to configure tls in order ([#545](https://github.com/0x676e67/wreq/issues/545)) - ([213b0ac](https://github.com/0x676e67/wreq/commit/213b0ac73b0cace1cb70dee443de2de1bcc32b16))\n- *(cookie)* Impl `into_inner` for `Cookie` ([#542](https://github.com/0x676e67/wreq/issues/542)) - ([1f09ed5](https://github.com/0x676e67/wreq/commit/1f09ed5f46bb105618855e7a22f61b0a61454489))\n- *(cookie)* Impl `Display` for `Cookie` ([#541](https://github.com/0x676e67/wreq/issues/541)) - ([729669c](https://github.com/0x676e67/wreq/commit/729669cd23b87e8c303e7ae70c4bf60c9ee0f68c))\n- *(cookie)* Impl `into_owned` for cookie ([#535](https://github.com/0x676e67/wreq/issues/535)) - ([04d11ad](https://github.com/0x676e67/wreq/commit/04d11ada3cfe618927bad83304a886c39e7053bb))\n- *(error)* Added `Error::is_connection_reset()` - ([8a68b1a](https://github.com/0x676e67/wreq/commit/8a68b1a299b3f44108a475e5837d109c635fbf24))\n- *(proxy)* Enhanced client proxy options ([#534](https://github.com/0x676e67/wreq/issues/534)) - ([4edbfef](https://github.com/0x676e67/wreq/commit/4edbfefadbfec1a797c179d3442a1a7b3345ec3f))\n- *(proxy)* Enhanced request level proxy options ([#533](https://github.com/0x676e67/wreq/issues/533)) - ([a69ac1b](https://github.com/0x676e67/wreq/commit/a69ac1ba37d4828d5f409ac6124497d7a84af42b))\n- *(ws)* Impl `from_bytes_unchecked` of `Utf8Bytes` ([#550](https://github.com/0x676e67/wreq/issues/550)) - ([0663aa5](https://github.com/0x676e67/wreq/commit/0663aa5e44d389d1b34c0ee6efd1d2136c774f57))\n- Remove shortcut for quickly make requests ([#560](https://github.com/0x676e67/wreq/issues/560)) - ([cb43f23](https://github.com/0x676e67/wreq/commit/cb43f23f9885a04b595c1caa4eef6323b63845aa))\n\n### Bug Fixes\n\n- *(client)* Preserve TLS settings when update client ([#552](https://github.com/0x676e67/wreq/issues/552)) - ([6a2e3e6](https://github.com/0x676e67/wreq/commit/6a2e3e60a6ac92977681c4c43308be05989c5dfe))\n- *(client)* Preserve TLS `RootCertStore` settings when update client ([#551](https://github.com/0x676e67/wreq/issues/551)) - ([ad72976](https://github.com/0x676e67/wreq/commit/ad7297660a753a97d614fd9bb657303b04c0eba5))\n- *(client)* Preserve TLS verify settings when update client ([#546](https://github.com/0x676e67/wreq/issues/546)) - ([21ad6e8](https://github.com/0x676e67/wreq/commit/21ad6e8beeeced18e928c35c6fee856047944321))\n- *(proxy)* Re-enable NO_PROXY envs on Windows ([#544](https://github.com/0x676e67/wreq/issues/544)) - ([f5eb6fe](https://github.com/0x676e67/wreq/commit/f5eb6fe28d167485ceec79afee25180e9b268314))\n\n### Refactor\n\n- *(client)* Rename max_retry_count to http2_max_retry_count - ([be29947](https://github.com/0x676e67/wreq/commit/be29947166db5c2ac7bcd3700f6cc50fcc9118dc))\n- *(client)* Delete tls fine-tuning config ([#530](https://github.com/0x676e67/wreq/issues/530)) - ([d7a75e3](https://github.com/0x676e67/wreq/commit/d7a75e393aa8d48b570d15aa66ce600a2ac8691c))\n- *(cookie)* Redesign cookie store API signature ([#538](https://github.com/0x676e67/wreq/issues/538)) - ([2968839](https://github.com/0x676e67/wreq/commit/2968839c37c01950fd2be037c7bec1d64381f1f9))\n- *(cookie)* `max_age` type conversion fails to avoid panic ([#536](https://github.com/0x676e67/wreq/issues/536)) - ([ceb0bd5](https://github.com/0x676e67/wreq/commit/ceb0bd5d05886fb172a33da2c23f69078ed147a0))\n- *(tls)* Simplify RootCertStore wrapper implementation ([#553](https://github.com/0x676e67/wreq/issues/553)) - ([b24bc40](https://github.com/0x676e67/wreq/commit/b24bc4060e84734b0fa99d35f111c5638ec1bdb7))\n- Unified naming of historical legacy APIs - ([c7c6a0d](https://github.com/0x676e67/wreq/commit/c7c6a0db32445dda27b285e4c7a812f4ca236b39))\n- Unified naming of historical legacy APIs ([#554](https://github.com/0x676e67/wreq/issues/554)) - ([9022641](https://github.com/0x676e67/wreq/commit/902264184d938d8b8cb138dbc28e8eca1e25891d))\n\n### Documentation\n\n- *(client)* Update emulation method documentation - ([5dd33ab](https://github.com/0x676e67/wreq/commit/5dd33aba02be7d6b0136a5d6e839d9974f1303d3))\n- *(client)* Deleting outdated documents ([#532](https://github.com/0x676e67/wreq/issues/532)) - ([2cffe47](https://github.com/0x676e67/wreq/commit/2cffe471deca62c86ed18346cbd7b12caf2e0579))\n- *(cookie)* Delete irrelevant library documents - ([6c44c38](https://github.com/0x676e67/wreq/commit/6c44c38f589057f3a64bb7152a34ca62630b7586))\n- *(response)* Clarify in docs that `Response::content_length()` is not based on the `Content-Length` header ([#558](https://github.com/0x676e67/wreq/issues/558)) - ([5c174c4](https://github.com/0x676e67/wreq/commit/5c174c48b4ec09544de379c5254fc11e74d5bd7b))\n- *(response)* Clarify that content_length() is not based on the Content-Length header in the docs - ([7257f34](https://github.com/0x676e67/wreq/commit/7257f34ca23c7cd0b9f0a1aa6e0da3507ad58956))\n- Update library examples - ([62d6266](https://github.com/0x676e67/wreq/commit/62d6266f425e83ad0998d1b2f290cb56d44df93f))\n- Update features description ([#540](https://github.com/0x676e67/wreq/issues/540)) - ([bd18719](https://github.com/0x676e67/wreq/commit/bd1871957df8304a0a55485cc7c2eb3e5add00bc))\n\n### Performance\n\n- *(client)* Fine-tune request performance and testing ([#566](https://github.com/0x676e67/wreq/issues/566)) - ([a07c233](https://github.com/0x676e67/wreq/commit/a07c2332cc751a98d48e0a8cf3fca958e19f09e3))\n- *(http)* Inline hotspot method ([#528](https://github.com/0x676e67/wreq/issues/528)) - ([2038231](https://github.com/0x676e67/wreq/commit/20382318693de4e2aaa4b55c3943c5ad1bd2689c))\n\n### Testing\n\n- *(badssl)* Update ssl pinning test ([#557](https://github.com/0x676e67/wreq/issues/557)) - ([b883d7f](https://github.com/0x676e67/wreq/commit/b883d7fb9b7b6c6f1b5b48271bd4d5c7de9666d8))\n\n### Miscellaneous Tasks\n\n- *(emulation)* Impl `default` for EmulationProvider - ([b726363](https://github.com/0x676e67/wreq/commit/b7263637f23bac976a54fe644b96f89047217647))\n- *(tls)* Simplified `IntoCertStore` macro impl ([#562](https://github.com/0x676e67/wreq/issues/562)) - ([5052342](https://github.com/0x676e67/wreq/commit/505234223f28dd749f10414e1fee9161119e1d98))\n- *(tls)* Simplified `IntoCertCompressionAlgorithm` macro impl ([#561](https://github.com/0x676e67/wreq/issues/561)) - ([a7606d9](https://github.com/0x676e67/wreq/commit/a7606d9d50cc295dfbd5374a55c6841f790ae6c2))\n- Update example documentation crate package name - ([363e98b](https://github.com/0x676e67/wreq/commit/363e98b6b97809f2a6802a131e884cb302430da8))\n- Update apache license copyright - ([50d73a3](https://github.com/0x676e67/wreq/commit/50d73a35afd3c482538a23f34e125bfbd9be6f69))\n\n### Build\n\n- *(action)* Added compression features tests ([#564](https://github.com/0x676e67/wreq/issues/564)) - ([5767ce8](https://github.com/0x676e67/wreq/commit/5767ce81d59b5f1d0e2e702c2200dfd3713b4f0b))\n- *(action)* Added features tests ([#563](https://github.com/0x676e67/wreq/issues/563)) - ([b8f7968](https://github.com/0x676e67/wreq/commit/b8f7968f0ed52d6fe6282ef189fe8f8514ba1071))\n- *(action)* Added check semver action ([#559](https://github.com/0x676e67/wreq/issues/559)) - ([a58e989](https://github.com/0x676e67/wreq/commit/a58e989819fb29e89823ee764d26df2646a840e2))\n- *(deps)* Pin `async-compression` to version `0.4.21` ([#567](https://github.com/0x676e67/wreq/issues/567)) - ([0be61d7](https://github.com/0x676e67/wreq/commit/0be61d7db8641170ca143220de348b1e423d8f83))\n- *(deps)* Pin `tokio-tungstenite` to version `0.26.2` ([#565](https://github.com/0x676e67/wreq/issues/565)) - ([a5ee2a2](https://github.com/0x676e67/wreq/commit/a5ee2a2d99fcb1c8afab7a2636c7c657132744ed))\n- *(deps)* Update hickory-resolver requirement from 0.24 to 0.25 ([#549](https://github.com/0x676e67/wreq/issues/549)) - ([f7de3f5](https://github.com/0x676e67/wreq/commit/f7de3f5ba54c9bbb4701138a69adeaa563c9b4c0))\n- *(deps)* Update typed-builder requirement from 0.20.0 to 0.21.0 ([#548](https://github.com/0x676e67/wreq/issues/548)) - ([099c257](https://github.com/0x676e67/wreq/commit/099c257ef3d244a464633deb04ccca6cd4a87898))\n\n\n## [3.0.6](https://github.com/0x676e67/wreq/compare/v3.0.5..v3.0.6) - 2025-03-10\n\n### Features\n\n- *(ws)* Improved WebSocket message creation ([#524](https://github.com/0x676e67/wreq/issues/524)) - ([508d869](https://github.com/0x676e67/wreq/commit/508d8695216a1ca28c91fe5d9e04cce745839a67))\n\n### Testing\n\n- *(zstd)* Test connection reuse with new zstd decompression ([#522](https://github.com/0x676e67/wreq/issues/522)) - ([a277f80](https://github.com/0x676e67/wreq/commit/a277f8036da135533efd55bd561941b992cfb1fa))\n\n\n## [3.0.5](https://github.com/0x676e67/wreq/compare/v3.0.3..v3.0.5) - 2025-03-09\n\n### Features\n\n- *(tls)* Allow overriding AES encryption for TLS ECH ([#515](https://github.com/0x676e67/wreq/issues/515)) - ([0045e3d](https://github.com/0x676e67/wreq/commit/0045e3d105a1c38ffb1ceb1cdc15cb2d4265e9ac))\n\n### Bug Fixes\n\n- *(decoder)* Handle multi-frame zstd response body decompression ([#517](https://github.com/0x676e67/wreq/issues/517)) - ([bbc02ae](https://github.com/0x676e67/wreq/commit/bbc02ae0a837138054321bfcb8223a3fafd2e286))\n\n### Miscellaneous Tasks\n\n- *(connect)* Remove `ServiceBuilder` dead code ([#518](https://github.com/0x676e67/wreq/issues/518)) - ([8cf0dc4](https://github.com/0x676e67/wreq/commit/8cf0dc4034707e73205cc5849c473e2a6ca87201))\n- Update docs - ([d077c3d](https://github.com/0x676e67/wreq/commit/d077c3d40b43441ddebd8d3049b4d9094b23ec3b))\n\n\n## [3.0.3](https://github.com/0x676e67/wreq/compare/v3.0.1..v3.0.3) - 2025-03-07\n\n### Bug Fixes\n\n- *(decoder)* Fix conditional compilation of decompress features ([#507](https://github.com/0x676e67/wreq/issues/507)) - ([8ffa73b](https://github.com/0x676e67/wreq/commit/8ffa73bdd6a8aea1651f31f2a70c6ed727cd65f3))\n\n### Styling\n\n- Clippy fix example `set_root_cert_store` - ([9b3b49a](https://github.com/0x676e67/wreq/commit/9b3b49ac5172d09369b64a1b3b4cfe3550139fb8))\n\n### Miscellaneous Tasks\n\n- Remove pub(super) visibility from `method_has_defined_payload_semantics` - ([b689112](https://github.com/0x676e67/wreq/commit/b689112bdb1bd60798e264ba43b5d073009df0f1))\n\n### Build\n\n- *(deps)* Update async-compression requirement from 0.4.0 to 0.4.20 ([#505](https://github.com/0x676e67/wreq/issues/505)) - ([71562ce](https://github.com/0x676e67/wreq/commit/71562ce70b0418fbd0a516727bb6107f83585f89))\n- *(deps)* Update bytes requirement from 1.0 to 1.10.1 ([#504](https://github.com/0x676e67/wreq/issues/504)) - ([c10f5e1](https://github.com/0x676e67/wreq/commit/c10f5e15c63660ac33413d6c929a11ac70302e53))\n\n\n## [3.0.1-rc4](https://github.com/0x676e67/wreq/compare/v3.0.1-rc3..v3.0.1-rc4) - 2025-03-05\n\n### Features\n\n- *(cert)* Expose `RootCertStoreBuilder` as public API ([#494](https://github.com/0x676e67/wreq/issues/494)) - ([849558f](https://github.com/0x676e67/wreq/commit/849558f2607e7b23521193c74e794cc192decf76))\n\n### Refactor\n\n- *(client)* Simplify DNS resolver initialization in ClientBuilder ([#499](https://github.com/0x676e67/wreq/issues/499)) - ([1368d07](https://github.com/0x676e67/wreq/commit/1368d075121a9cb9d2f9ca9cb674264e84c5e4e5))\n- *(client)* `pool_max_size` signature changed from `Into<Option<NonZeroUsize>>` to `usize` ([#498](https://github.com/0x676e67/wreq/issues/498)) - ([57223e2](https://github.com/0x676e67/wreq/commit/57223e2ed4996239b8cfa696c68f550104de9f65))\n\n### Documentation\n\n- *(emulation)* Improve emulation documentation - ([776f2db](https://github.com/0x676e67/wreq/commit/776f2dbd18fa5fb3f635dceb2d22e92af358405d))\n- Update docs ([#496](https://github.com/0x676e67/wreq/issues/496)) - ([a4862e8](https://github.com/0x676e67/wreq/commit/a4862e870d002f71761863bae22ec81de2bc5f52))\n\n### Performance\n\n- *(clinet)* Reading `user-agent` to avoid full clone ([#495](https://github.com/0x676e67/wreq/issues/495)) - ([89fd750](https://github.com/0x676e67/wreq/commit/89fd750e8f239c0bb31cf8699d7d4a54440933c0))\n- *(decoder)* Statically check compression headers ([#503](https://github.com/0x676e67/wreq/issues/503)) - ([c912d8d](https://github.com/0x676e67/wreq/commit/c912d8d428b6787f4203a06ff9d2fd7abc6fb3d2))\n\n### Styling\n\n- *(network)* Fmt code - ([5941b39](https://github.com/0x676e67/wreq/commit/5941b390b46de184ecb57160cd64d08a7ab708e0))\n\n### Miscellaneous Tasks\n\n- Revert `impl_debug` export - ([3fc3f69](https://github.com/0x676e67/wreq/commit/3fc3f697982cee4fc24e28e10cfba04ceeaf1773))\n\n\n## [3.0.1-rc3](https://github.com/0x676e67/wreq/compare/v3.0.1-rc2..v3.0.1-rc3) - 2025-03-04\n\n### Features\n\n- *(cookie)* Abstract public cookie store trait ([#493](https://github.com/0x676e67/wreq/issues/493)) - ([a565884](https://github.com/0x676e67/wreq/commit/a5658847433928673964b79a7937b35dc4db6296))\n- *(proxy)* Supports `http`/`https` proxy custom headers ([#490](https://github.com/0x676e67/wreq/issues/490)) - ([02fdc5b](https://github.com/0x676e67/wreq/commit/02fdc5bcd1b40d27538163279f4424a666957eef))\n\n### Testing\n\n- Update badssl test ([#487](https://github.com/0x676e67/wreq/issues/487)) - ([8831a9e](https://github.com/0x676e67/wreq/commit/8831a9e42d67dd5234955fc4594f8d3e564b04cc))\n\n### Miscellaneous Tasks\n\n- Replace `get_or_insert_with(Vec::new)` to `get_or_insert_default()` - ([2ca23a1](https://github.com/0x676e67/wreq/commit/2ca23a17068ef5c1b132029abcb25b47db029db7))\n\n### Build\n\n- `MSRV 1.85` / `edition 2024` ([#488](https://github.com/0x676e67/wreq/issues/488)) - ([f5bcc71](https://github.com/0x676e67/wreq/commit/f5bcc71d70a86e52a19596988c1ed08f71c12769))\n\n\n## [3.0.1-rc2](https://github.com/0x676e67/wreq/compare/v3.0.1-rc1..v3.0.1-rc2) - 2025-03-03\n\n### Refactor\n\n- *(client)* Rename `as_mut` to `update` for clarity and consistency   ([#482](https://github.com/0x676e67/wreq/issues/482)) - ([e8137ec](https://github.com/0x676e67/wreq/commit/e8137ec6448e53124b58d5c7e4bdb7eb1d923bb7))\n\n### Styling\n\n- *(client)* Fmt code - ([897a373](https://github.com/0x676e67/wreq/commit/897a373b460ea3e0c8558e9d72843ef28578e61a))\n\n### Testing\n\n- Add client cloned test ([#485](https://github.com/0x676e67/wreq/issues/485)) - ([4a5419b](https://github.com/0x676e67/wreq/commit/4a5419b56d57a54b1cfde121fee9f41acb6c411f))\n- Add client emulation update test ([#484](https://github.com/0x676e67/wreq/issues/484)) - ([f72648f](https://github.com/0x676e67/wreq/commit/f72648feafe1440dc1ae942b75421faf940fff76))\n- Add client headers update test ([#483](https://github.com/0x676e67/wreq/issues/483)) - ([730fdaa](https://github.com/0x676e67/wreq/commit/730fdaa3b18c7e0d2e2c732a408677ba8c483854))\n\n### Miscellaneous Tasks\n\n- *(client)* Update docs - ([bbcdd1f](https://github.com/0x676e67/wreq/commit/bbcdd1f15843c63aa8fee47ac0507620fb9468e6))\n\n### Build\n\n- Fix docs build ([#486](https://github.com/0x676e67/wreq/issues/486)) - ([915c36b](https://github.com/0x676e67/wreq/commit/915c36bb4a666be3acd26a4416a39534e661419b))\n\n\n## [3.0.1-rc1](https://github.com/0x676e67/wreq/compare/v2.0.3..v3.0.1-rc1) - 2025-03-03\n\n### Features\n\n- *(client)* Remove cross-origin redirect proxy support ([#477](https://github.com/0x676e67/wreq/issues/477)) - ([3a241ef](https://github.com/0x676e67/wreq/commit/3a241ef4b342b1bd46a8e4cd7ecbeb641d043b4f))\n- *(client)* Added a remove cookie function ([#475](https://github.com/0x676e67/wreq/issues/475)) - ([7142963](https://github.com/0x676e67/wreq/commit/71429634012e03a710793591727cbf4bd5d8de28))\n- *(client)* Remove `set_cookies_by_ref` ([#474](https://github.com/0x676e67/wreq/issues/474)) - ([56de727](https://github.com/0x676e67/wreq/commit/56de72716b1cd89f724f8720dc3fa2fb75ac0399))\n- *(client)* Added a clear cookies function ([#472](https://github.com/0x676e67/wreq/issues/472)) - ([d934716](https://github.com/0x676e67/wreq/commit/d93471631440a28a0dfb63dad85f4acf3768cab2))\n- *(client)* Adapt thread-safe update client configuration ([#404](https://github.com/0x676e67/wreq/issues/404)) - ([e6397d6](https://github.com/0x676e67/wreq/commit/e6397d68f216a86e75b46bb2f7b9345ecf58e08f))\n- *(client)* Apply configuration sequentially ([#391](https://github.com/0x676e67/wreq/issues/391)) - ([775db82](https://github.com/0x676e67/wreq/commit/775db824653b162e4dfc6bb14c79b811206f79c2))\n- *(imp)* Add `chrome 132`/`chrome 133` impersonate ([#423](https://github.com/0x676e67/wreq/issues/423)) - ([3430645](https://github.com/0x676e67/wreq/commit/34306457c0ba01f95e46b5b0bbe443a3abe3fb87))\n- *(pool)* Connection pool distinguishes request versions ([#431](https://github.com/0x676e67/wreq/issues/431)) - ([22b0e92](https://github.com/0x676e67/wreq/commit/22b0e92835a786be030f405fd70ea311cecb6de4))\n- *(proxy)* Add `socks4a` proxy protocol support ([#416](https://github.com/0x676e67/wreq/issues/416)) - ([1f98b6e](https://github.com/0x676e67/wreq/commit/1f98b6e2578ab55ff4fcfb86c66548a7161469a7))\n- *(tls)* Encapsulate and simplify certificate loading ([#417](https://github.com/0x676e67/wreq/issues/417)) - ([a32207e](https://github.com/0x676e67/wreq/commit/a32207ef84057e042b69068fee2179b0a059cd51))\n- *(tls)* Add ALPS use new endpoint extension ([#396](https://github.com/0x676e67/wreq/issues/396)) - ([20b988c](https://github.com/0x676e67/wreq/commit/20b988c04e4a8a334d702b74a54e46d149b9802a))\n- *(websocket)* Added `read_buffer_size` optional config ([#457](https://github.com/0x676e67/wreq/issues/457)) - ([ccece59](https://github.com/0x676e67/wreq/commit/ccece597da6db3f085acf13718af93ea3acffab9))\n- *(websocket)* Chain call wrapper `RequestBuilder` ([#432](https://github.com/0x676e67/wreq/issues/432)) - ([ea3dfe8](https://github.com/0x676e67/wreq/commit/ea3dfe88c7dbcf4b9f13a70ac29aa306f17fdf91))\n- *(websocket)* Explicitly force the use of ws/wss protocol ([#383](https://github.com/0x676e67/wreq/issues/383)) - ([4fd10a9](https://github.com/0x676e67/wreq/commit/4fd10a951977580b74f60d5ede81833ae0f484cf))\n- Removal of base url feature  ([#411](https://github.com/0x676e67/wreq/issues/411)) - ([16dac1d](https://github.com/0x676e67/wreq/commit/16dac1d122381d27ed3f5948766a1d9a13ca8d9d))\n- Add optional clear method to `CookieStore` implementation ([#400](https://github.com/0x676e67/wreq/issues/400)) - ([a357c9e](https://github.com/0x676e67/wreq/commit/a357c9e1eed9c9d51fd10d3eb98109104928cef5))\n- Serializing impersonate enums uses legacy naming conventions ([#385](https://github.com/0x676e67/wreq/issues/385)) - ([0e3ddb0](https://github.com/0x676e67/wreq/commit/0e3ddb06d3690661806d6f1dc8731e8d337ad4a0))\n- Add `HTTP/2` support for `WebSocket` ([#373](https://github.com/0x676e67/wreq/issues/373)) - ([b46daa9](https://github.com/0x676e67/wreq/commit/b46daa90fd11e475b7b8238e1ab5d573b8a531b2))\n\n### Bug Fixes\n\n- *(deps)* Fix alps use new endpoint negotiation ([#464](https://github.com/0x676e67/wreq/issues/464)) - ([21c6751](https://github.com/0x676e67/wreq/commit/21c675123e1f117633d604290c94e5aa333ec4ab))\n- *(proxy)* Fix `no_proxy` on Windows ([#470](https://github.com/0x676e67/wreq/issues/470)) - ([16ec933](https://github.com/0x676e67/wreq/commit/16ec933045a707a244eebc98edb17ae1314766a6))\n- Ignore Content-Length for methods without payload semantics ([#429](https://github.com/0x676e67/wreq/issues/429)) - ([bd5420c](https://github.com/0x676e67/wreq/commit/bd5420c4d526f05b4430bd7e60f5f5df27fffa11))\n- Ensure HTTP version negotiation for non-TLS requests ([#397](https://github.com/0x676e67/wreq/issues/397)) - ([dd14d49](https://github.com/0x676e67/wreq/commit/dd14d49a2d579f9d36a49f38c5d9de373901d492))\n\n### Refactor\n\n- *(client)* Simplify client reference handling by removing unnecessary operations ([#476](https://github.com/0x676e67/wreq/issues/476)) - ([529928b](https://github.com/0x676e67/wreq/commit/529928b4bae30b2ec4fadd2c91185f3417919ea8))\n- *(client)* Refactor client `HTTP1`/`HTTP2` configuration API ([#371](https://github.com/0x676e67/wreq/issues/371)) - ([fac8d2d](https://github.com/0x676e67/wreq/commit/fac8d2d9cf6df102e101c4f8d9fda72bd2382935))\n- *(tls)* Refactor TLS connector structure ([#421](https://github.com/0x676e67/wreq/issues/421)) - ([bdd3942](https://github.com/0x676e67/wreq/commit/bdd394210ffa26d0e2956c73606436685bc962da))\n- *(websocket)* Refactor websocket implementation ([#380](https://github.com/0x676e67/wreq/issues/380)) - ([3b91be4](https://github.com/0x676e67/wreq/commit/3b91be4225aa060b43c00103af6fe5fa14a093dd))\n- *(websocket)* Improve error handling, rename APIs, and update API signatures ([#372](https://github.com/0x676e67/wreq/issues/372)) - ([44ec8c6](https://github.com/0x676e67/wreq/commit/44ec8c600119c46112b182b268263aa272139b10))\n- Move device fingerprinting to rquest-util maintenance ([#480](https://github.com/0x676e67/wreq/issues/480)) - ([5eb8684](https://github.com/0x676e67/wreq/commit/5eb868442018da9e7be15f9844392093ff5baa21))\n- Reduce dependency on `futures-core` / `futures-util` ([#449](https://github.com/0x676e67/wreq/issues/449)) - ([5a4f2be](https://github.com/0x676e67/wreq/commit/5a4f2be065bb1edc3c1e39fe9fe2b8c993078260))\n- Replace `HttpContext` with `EmulationProvider` for clarity and accuracy ([#436](https://github.com/0x676e67/wreq/issues/436)) - ([6a9d80a](https://github.com/0x676e67/wreq/commit/6a9d80a5cfa85b13b0a3b7bd08422ba0c563cf4a))\n- Replace \"impersonate\" with \"emulation\" for clarity and accuracy ([#434](https://github.com/0x676e67/wreq/issues/434)) - ([e2bac75](https://github.com/0x676e67/wreq/commit/e2bac75805fdefd79c3cba32cadd65107060558b))\n- Replace unsafe methods with safe methods for certificate handler ([#399](https://github.com/0x676e67/wreq/issues/399)) - ([bdf1fc5](https://github.com/0x676e67/wreq/commit/bdf1fc57d2150e7e471331abd1d745e7f786dbd7))\n- Replace unsafe methods with safe methods in `ConnectConfiguration` ([#398](https://github.com/0x676e67/wreq/issues/398)) - ([dda0d42](https://github.com/0x676e67/wreq/commit/dda0d42388623c14838396624b2d56a8b572c2f7))\n- Improve client API design and documentation ([#387](https://github.com/0x676e67/wreq/issues/387)) - ([7a63ba6](https://github.com/0x676e67/wreq/commit/7a63ba6e10734b233bbcce87c42a4978fccb7b25))\n- Rename method to accept_key for clarity - ([c32dadd](https://github.com/0x676e67/wreq/commit/c32daddb394d5b35009fc445c1e0f247a5c48ba0))\n\n### Documentation\n\n- *(client)* Update client `cloned` method documentation ([#409](https://github.com/0x676e67/wreq/issues/409)) - ([7d10ce6](https://github.com/0x676e67/wreq/commit/7d10ce6be0b26d7b99f24a720e171f84c8b9e41c))\n- Added backport reference docs ([#382](https://github.com/0x676e67/wreq/issues/382)) - ([7f57bd5](https://github.com/0x676e67/wreq/commit/7f57bd5876020cb827c2ac3161e4ef080e96718d))\n\n### Performance\n\n- *(connect)* Delay connector layer initialization to improve performance ([#408](https://github.com/0x676e67/wreq/issues/408)) - ([4903458](https://github.com/0x676e67/wreq/commit/4903458b81b161aac51ded38a562f139e08d94c9))\n- *(connector)* Optimize performance of switching TLS connector ([#406](https://github.com/0x676e67/wreq/issues/406)) - ([26f58e4](https://github.com/0x676e67/wreq/commit/26f58e4e39b1d9d0eb6525862a5ff146fff4ef5c))\n- *(socks)* Socks connection process DNS uses non-blocking query ([#420](https://github.com/0x676e67/wreq/issues/420)) - ([0d40c75](https://github.com/0x676e67/wreq/commit/0d40c75b1edc117fa81431256ca7f6510618ea43))\n- Always inline `into_tungstenite` ([#381](https://github.com/0x676e67/wreq/issues/381)) - ([b5e0b9f](https://github.com/0x676e67/wreq/commit/b5e0b9f0263248669940c702868c5afcdc01cc76))\n\n### Styling\n\n- Fmt code - ([e3ac7a7](https://github.com/0x676e67/wreq/commit/e3ac7a76ccdb98a3b143607f8d3f8f7293421b4e))\n\n### Testing\n\n- *(upgrade)* Add http2 upgrade test ([#384](https://github.com/0x676e67/wreq/issues/384)) - ([0724836](https://github.com/0x676e67/wreq/commit/0724836dbfae85bf118f4caf4de19ae3d878b60e))\n- Add unit test for cookie getter and setter functionality ([#451](https://github.com/0x676e67/wreq/issues/451)) - ([b71032e](https://github.com/0x676e67/wreq/commit/b71032e0229aa86b737426b643fabfaf549a854b))\n- Serialize tests that read/write the same environment variable ([#443](https://github.com/0x676e67/wreq/issues/443)) - ([b7560f9](https://github.com/0x676e67/wreq/commit/b7560f97998e4221472c32688ab7bea5df61edb6))\n\n### Miscellaneous Tasks\n\n- *(client)* Delete unnecessary clone - ([9793bcc](https://github.com/0x676e67/wreq/commit/9793bccbb2f4d6d45dfc90ec028222cdf065f29c))\n- *(client)* Rename client builder http2 timer name from `timer` to `http2_timer` ([#407](https://github.com/0x676e67/wreq/issues/407)) - ([e06d9ce](https://github.com/0x676e67/wreq/commit/e06d9ce8dd4f9f1a5f89c0ff3372869275f526b5))\n- *(connect)* Delete duplicate tls info acquisition logic - ([4b7877a](https://github.com/0x676e67/wreq/commit/4b7877a3805afb071931358e0a0f69c42e8b05c0))\n- *(connect)* Delete connector unnecessary keepalive field - ([08b5904](https://github.com/0x676e67/wreq/commit/08b5904ffb0374f6c327442a314615e6893b6c63))\n- *(example)* Update websocket example - ([2479972](https://github.com/0x676e67/wreq/commit/24799723f580badf92e81b3e972ad8cc2b0995f1))\n- *(tls)* Move `conf` to `client/conf` module - ([988e679](https://github.com/0x676e67/wreq/commit/988e67949ca9162e6449d41700e5bbbccdb84d2d))\n- *(tls)* Move `TlsConfig` to conf module - ([ffd1673](https://github.com/0x676e67/wreq/commit/ffd1673e3afa379086bc04b7a744e8733512388b))\n- *(websocket)* Simplify error handling and improve code readability ([#418](https://github.com/0x676e67/wreq/issues/418)) - ([60fa74d](https://github.com/0x676e67/wreq/commit/60fa74dc0abba1862d23adc4965152b1896eb3e4))\n- *(websocket)* Fmt code - ([a313ba0](https://github.com/0x676e67/wreq/commit/a313ba0f2707148e023f0126cc895788e3d42bfe))\n- *(websocket)* Improved version protocol handler - ([81a0183](https://github.com/0x676e67/wreq/commit/81a0183b14dbe9596c6eb4466656247d92563e62))\n- Update examples - ([7cc6b1e](https://github.com/0x676e67/wreq/commit/7cc6b1e5b3a836bcf0e33f9994bb5a162ed76ad2))\n- Add Crates.io MSRV - ([cc8cc28](https://github.com/0x676e67/wreq/commit/cc8cc284e7e7b976622a47271b273fa03a33a82b))\n- Update the compilation guide ([#466](https://github.com/0x676e67/wreq/issues/466)) - ([5ad4de9](https://github.com/0x676e67/wreq/commit/5ad4de96c5938c1d7c8ea399495b1f377ecf8f66))\n- Update compilation-guide ([#456](https://github.com/0x676e67/wreq/issues/456)) - ([723e0c1](https://github.com/0x676e67/wreq/commit/723e0c16d6ac923b8cc51312b2c2424366c0d915))\n- Merge v2 branch - ([8180cbc](https://github.com/0x676e67/wreq/commit/8180cbcc4f60d3ab6916ad07df8f1354e230c39f))\n- Improve Debug implementation ([#422](https://github.com/0x676e67/wreq/issues/422)) - ([566a33b](https://github.com/0x676e67/wreq/commit/566a33b3102b546f7f7c36161f4f98ae78bf2cb7))\n- Fmt code - ([8b3c8f6](https://github.com/0x676e67/wreq/commit/8b3c8f6b1f5e19400ae33fdce85e3169d98c80ba))\n- Simplified error qualifier types ([#412](https://github.com/0x676e67/wreq/issues/412)) - ([35b4347](https://github.com/0x676e67/wreq/commit/35b4347a35453b531f8339a9efe62b80a0ecd164))\n- Rename `Proxies` internal fields - ([dfe4a00](https://github.com/0x676e67/wreq/commit/dfe4a00c505dcd7ec5802b51dd685f25e6559831))\n- Update docs - ([6eb42e8](https://github.com/0x676e67/wreq/commit/6eb42e83452aab5d7921c56d7c1120cad676d805))\n- Move `http1`/`http2` config to `conf` mod - ([592038f](https://github.com/0x676e67/wreq/commit/592038ff1468ad0a59aff1057410c6cffc8d6e04))\n- Update client docs - ([6a35a0a](https://github.com/0x676e67/wreq/commit/6a35a0aa8ea2ccd4483b160ee1a19f97b539c7c8))\n- Fix `AlpnProtos` non upper case globals warning - ([265d938](https://github.com/0x676e67/wreq/commit/265d9388ae524fbed133136f114835f5175b9bd0))\n- Fix non upper case globals name - ([af02660](https://github.com/0x676e67/wreq/commit/af02660acffa86d48f0246d75de3e291869e86f6))\n- Remove dead code - ([00e939a](https://github.com/0x676e67/wreq/commit/00e939ac1a68950131713575d3eae60d1a1b621c))\n- Fmt code - ([096eef0](https://github.com/0x676e67/wreq/commit/096eef07bea970ef4fff57073e456c8269b992a6))\n- Fmt imports ([#388](https://github.com/0x676e67/wreq/issues/388)) - ([d73d1ac](https://github.com/0x676e67/wreq/commit/d73d1ac0dde1faeda4186aa17051849067e48c63))\n- Fmt code - ([05a9d40](https://github.com/0x676e67/wreq/commit/05a9d406b6bf2beb8066994fcc7269a01f900183))\n- Fmt code - ([ff3ad03](https://github.com/0x676e67/wreq/commit/ff3ad037e5ad4ca83d1928631a9d88d754ef1cb1))\n- Clippy fix - ([895db54](https://github.com/0x676e67/wreq/commit/895db54492677791693f760b6498d4b1eb9b619b))\n- Update websocket examples - ([4eefefd](https://github.com/0x676e67/wreq/commit/4eefefd464d4d0580651fdbe38c832d3f53b1e59))\n- Improved WebSocket protocols handler ([#370](https://github.com/0x676e67/wreq/issues/370)) - ([2abe066](https://github.com/0x676e67/wreq/commit/2abe06620c5de829db87ce8e7589d9864aa6d2ec))\n\n### Build\n\n- *(deps)* Update windows-registry requirement from 0.4.0 to 0.5.0 ([#471](https://github.com/0x676e67/wreq/issues/471)) - ([288e33a](https://github.com/0x676e67/wreq/commit/288e33aac4cbf0b3d6b51df38eb88952778eb447))\n- *(deps)* Update boring requirement from 4.15.7 to 4.15.8 ([#468](https://github.com/0x676e67/wreq/issues/468)) - ([3488f17](https://github.com/0x676e67/wreq/commit/3488f17e9019735af1ec934027c1ec7c8bd28780))\n- *(deps)* Update boring requirement from 4.15.5 to 4.15.6 - ([04659bb](https://github.com/0x676e67/wreq/commit/04659bbae0f4ded2e4a0f45f69e69c23da2f7e8d))\n- *(deps)* Update boring requirement from 4.15.3 to 4.15.5 ([#437](https://github.com/0x676e67/wreq/issues/437)) - ([b172177](https://github.com/0x676e67/wreq/commit/b1721771a8f1cfa5af7aa9006484b9bfd1c2fff2))\n- *(deps)* Update boring requirement from 4.15.2 to 4.15.3 ([#425](https://github.com/0x676e67/wreq/issues/425)) - ([aff379e](https://github.com/0x676e67/wreq/commit/aff379e045dc1c8bda0eeec9d091c08e9f5db86b))\n- *(deps)* Apple platform dependencies are minimized as much as possible ([#414](https://github.com/0x676e67/wreq/issues/414)) - ([858d911](https://github.com/0x676e67/wreq/commit/858d91196299e9a8f2851981d50b5421b530b580))\n- *(deps)* MacOS platform dependency is minimized ([#413](https://github.com/0x676e67/wreq/issues/413)) - ([f85c7ee](https://github.com/0x676e67/wreq/commit/f85c7ee337a74ef2686a0cc01870cc05eee031fc))\n- *(deps)* Update brotli requirement from 6.0.0 to 7.0.0 ([#401](https://github.com/0x676e67/wreq/issues/401)) - ([50614a7](https://github.com/0x676e67/wreq/commit/50614a74a02991124cf0a20ba09de993b79e1223))\n- *(deps)* Update lru requirement from 0.12 to 0.13 ([#393](https://github.com/0x676e67/wreq/issues/393)) - ([b3cda7d](https://github.com/0x676e67/wreq/commit/b3cda7d7f9efd9b7c35a5cd0c5a8a8588bb54897))\n- *(feature)* `apple-bindable-device` rename to `apple-network-device-binding` ([#426](https://github.com/0x676e67/wreq/issues/426)) - ([05a1adb](https://github.com/0x676e67/wreq/commit/05a1adb626a0614fd13a04fbeb7ae3d5304e4d8b))\n- Fix no default feature build - ([8ed417d](https://github.com/0x676e67/wreq/commit/8ed417df8fbbb14ec9f319219d6ca750200bd192))\n- Visualize macro conditional compilation ([#415](https://github.com/0x676e67/wreq/issues/415)) - ([01f1387](https://github.com/0x676e67/wreq/commit/01f138738785dd1391a06d1ff015ea7eacc727c1))\n- Update compilation guide ([#395](https://github.com/0x676e67/wreq/issues/395)) - ([96c75a4](https://github.com/0x676e67/wreq/commit/96c75a4be224d2be0275d101d43eb219489d7494))\n\n### Deps\n\n- *(ipnet)* Bump version to v2.11.0 ([#390](https://github.com/0x676e67/wreq/issues/390)) - ([2022b25](https://github.com/0x676e67/wreq/commit/2022b256d1d88dd991a3ed48f7c4678eb0f60f7c))\n- *(tokio)* Remove unused `rt` feature ([#389](https://github.com/0x676e67/wreq/issues/389)) - ([545e245](https://github.com/0x676e67/wreq/commit/545e2456db7353b2909c85d9b3186dbe6d8100e2))\n\n### Workflow\n\n- Update workflows check - ([321fba2](https://github.com/0x676e67/wreq/commit/321fba2939253f51637b5b18dd1dfc9990dc0d2d))\n\n## New Contributors ❤️\n\n* @tahmid-23 made their first contribution in [#423](https://github.com/0x676e67/wreq/pull/423)\n\n## [2.0.3](https://github.com/0x676e67/wreq/compare/v2.0.2..v2.0.3) - 2025-01-25\n\n### Documentation\n\n- Enhance documentation for `ImpersonateBuilder` methods ([#367](https://github.com/0x676e67/wreq/issues/367)) - ([d0dd33f](https://github.com/0x676e67/wreq/commit/d0dd33f22325b16138d743b03a39674daf8d89c8))\n\n### Miscellaneous Tasks\n\n- Update examples ([#368](https://github.com/0x676e67/wreq/issues/368)) - ([477e864](https://github.com/0x676e67/wreq/commit/477e864673d5e684070b54f44b48896760a05ef5))\n\n\n## [2.0.2](https://github.com/0x676e67/wreq/compare/v2.0.1..v2.0.2) - 2025-01-25\n\n### Features\n\n- Add implementations for `IntoCertCompressionAlgorithm` ([#363](https://github.com/0x676e67/wreq/issues/363)) - ([3e09a3f](https://github.com/0x676e67/wreq/commit/3e09a3f5fbea1f0a400ab3eaf9ca9832c4d595a4))\n- Expose `ClientMut` as public API ([#362](https://github.com/0x676e67/wreq/issues/362)) - ([455cf51](https://github.com/0x676e67/wreq/commit/455cf51ba37c10a57f00ad6310f87aae8d3f2af3))\n\n### Refactor\n\n- Simplify `IntoStreamDependency` implementations using macros ([#364](https://github.com/0x676e67/wreq/issues/364)) - ([9322f05](https://github.com/0x676e67/wreq/commit/9322f0594d0b1cf74bef110bdd113c7267ae1707))\n\n### Miscellaneous Tasks\n\n- Remove unnecessary type conversions - ([9d9bb4f](https://github.com/0x676e67/wreq/commit/9d9bb4fce39f3f6c7b6cbf24e06041a714ec1898))\n\n\n## [2.0.1](https://github.com/0x676e67/wreq/compare/v2.0.0..v2.0.1) - 2025-01-24\n\n### Features\n\n- Implement `IntoStreamDependency` for tuple and `StreamDependency` ([#359](https://github.com/0x676e67/wreq/issues/359)) - ([d7724f7](https://github.com/0x676e67/wreq/commit/d7724f753e4375a68603ee781be0f010bb329de9))\n\n### Documentation\n\n- Update performance information - ([2cb8a46](https://github.com/0x676e67/wreq/commit/2cb8a4689422c8cddf51f09620d699f56e9d8111))\n\n### Miscellaneous Tasks\n\n- Update owner ([#358](https://github.com/0x676e67/wreq/issues/358)) - ([4ee1438](https://github.com/0x676e67/wreq/commit/4ee143824e5726a8bfaf1bcec14c2d59802ad71d))\n\n\n## [2.0.0](https://github.com/0x676e67/wreq/compare/v2.0.0-rc.1..v2.0.0) - 2025-01-23\n\n### Testing\n\n- *(badssl)* Update cipher list - ([6b01366](https://github.com/0x676e67/wreq/commit/6b0136632b5241fad5fcb9620c54eac98f237ee9))\n\n### Miscellaneous Tasks\n\n- *(tls)* Load and wrap the certificate into `RootCertStore` ([#356](https://github.com/0x676e67/wreq/issues/356)) - ([adddada](https://github.com/0x676e67/wreq/commit/adddada9037b09ccb38a6eeea67f7adac328a38c))\n- *(tls)* Move `tls/ext/cert` to `tls/cert` ([#355](https://github.com/0x676e67/wreq/issues/355)) - ([eae2d93](https://github.com/0x676e67/wreq/commit/eae2d9364063ab5585b34e137eedb90fb5da18dd))\n- Move macros to lib mod ([#354](https://github.com/0x676e67/wreq/issues/354)) - ([6209589](https://github.com/0x676e67/wreq/commit/6209589bdd23cf38227745100c43d744f0c030b8))\n\n\n## [2.0.0-rc.1](https://github.com/0x676e67/wreq/compare/v1.5.0..v2.0.0-rc.1) - 2025-01-22\n\n### Features\n\n- *(mimic)* Added possibility to choose Client and OS to impersonate ([#290](https://github.com/0x676e67/wreq/issues/290)) - ([63cb5c5](https://github.com/0x676e67/wreq/commit/63cb5c53a735f172114afcab6c816762faedd934))\n- Rename `RootCertsStore` to `RootCertStore` ([#353](https://github.com/0x676e67/wreq/issues/353)) - ([152142f](https://github.com/0x676e67/wreq/commit/152142f00caf25b6d9c198155f417a84a6eead90))\n- `Impersonate`/`ImpersonateOS` impl serde ([#352](https://github.com/0x676e67/wreq/issues/352)) - ([98c61c8](https://github.com/0x676e67/wreq/commit/98c61c885478f1d0d1f81ae1f9cff75bbbe0e95e))\n- Add tests for `3DES` and `DH2048` cipher support ([#351](https://github.com/0x676e67/wreq/issues/351)) - ([bd73ddc](https://github.com/0x676e67/wreq/commit/bd73ddcb58bcfb936297cd338c8be589d2ce8c95))\n- Remove impersonate from str feature ([#350](https://github.com/0x676e67/wreq/issues/350)) - ([96387ec](https://github.com/0x676e67/wreq/commit/96387ec22c009883f1486e3c09586cbbc7f94477))\n- Add `read_timeout` option with override support in Request ([#334](https://github.com/0x676e67/wreq/issues/334)) - ([5d115a5](https://github.com/0x676e67/wreq/commit/5d115a5b5145213d3ec9f8408d88609aa43bf00a))\n- Disable boring module exports - ([bb63196](https://github.com/0x676e67/wreq/commit/bb631960f9326a1c60e3300fd7f2425af1faef4b))\n- Disable boring module exports ([#319](https://github.com/0x676e67/wreq/issues/319)) - ([7d30324](https://github.com/0x676e67/wreq/commit/7d3032433b561c0452c7b22a6fc5d5ba2ca37e84))\n- Remove internal headers cache ([#318](https://github.com/0x676e67/wreq/issues/318)) - ([846ad15](https://github.com/0x676e67/wreq/commit/846ad15348c5a7767a3c3c6d971a0a6e430b24e6))\n- Send `json` to avoid repeated query of `CONTENT_TYPE` ([#311](https://github.com/0x676e67/wreq/issues/311)) - ([bd2c519](https://github.com/0x676e67/wreq/commit/bd2c519156c66482ddd34b8aa4bf50fd36d3a213))\n\n### Bug Fixes\n\n- *(network)* Fix `NetworkScheme` debug format ([#332](https://github.com/0x676e67/wreq/issues/332)) - ([d0df934](https://github.com/0x676e67/wreq/commit/d0df93457dd100e75ffbf4fb8b61581cd24d79f6))\n\n### Refactor\n\n- Refactor client and impersonate configurations ([#321](https://github.com/0x676e67/wreq/issues/321)) - ([513f196](https://github.com/0x676e67/wreq/commit/513f1962503c32cdfeb748780cca26d3965be840))\n- Simplify client internal settings ([#320](https://github.com/0x676e67/wreq/issues/320)) - ([b7763cf](https://github.com/0x676e67/wreq/commit/b7763cf75e01b119cf96cd8cc02bb52888295052))\n\n### Documentation\n\n- *(websocket)* Update docs - ([5028926](https://github.com/0x676e67/wreq/commit/5028926e889c38ac72c36e1c4cad79926efc07cb))\n- Update network scheme docs - ([2ae744c](https://github.com/0x676e67/wreq/commit/2ae744cb185c2fbb512b72ac1d607c4be11408b1))\n- Update `Client` docs - ([8af9f1a](https://github.com/0x676e67/wreq/commit/8af9f1ad4e07ca62f9ea1bbf2c9e54d82869da0a))\n\n### Performance\n\n- Improve network scheme to avoid unnecessary clone ([#333](https://github.com/0x676e67/wreq/issues/333)) - ([a1cb889](https://github.com/0x676e67/wreq/commit/a1cb88944ea6d537349f4d5d3af50f00bb6beaa6))\n\n### Styling\n\n- Destructive updates, standard naming style ([#315](https://github.com/0x676e67/wreq/issues/315)) - ([247a26f](https://github.com/0x676e67/wreq/commit/247a26f1b883f4ebe95e4df1815e44472387b317))\n- Format code style - ([bd1a837](https://github.com/0x676e67/wreq/commit/bd1a83742e35a88e83c1e7d05f8b74080e67025d))\n- Format code style ([#314](https://github.com/0x676e67/wreq/issues/314)) - ([509977f](https://github.com/0x676e67/wreq/commit/509977f22846d8f22ad0b9588dbb1f4272121143))\n\n### Miscellaneous Tasks\n\n- *(http)* Fmt code - ([d66b156](https://github.com/0x676e67/wreq/commit/d66b156a2a21d29c4d4f1c02cd04fa8f44feb72c))\n- *(rewin)* Inline hotspot code - ([23cc53b](https://github.com/0x676e67/wreq/commit/23cc53b04f1825d0a729aeedd9dc93bcaebe0561))\n- *(rt)* Inline hotspot code - ([8cd9199](https://github.com/0x676e67/wreq/commit/8cd9199ea680c59bcbc4681cec8e8a962b37e37f))\n- Optional enable http2 tracing ([#335](https://github.com/0x676e67/wreq/issues/335)) - ([83918e1](https://github.com/0x676e67/wreq/commit/83918e1dcc1922a1989b7a5f0070081b0efe3c49))\n- Fmt code - ([2feee9c](https://github.com/0x676e67/wreq/commit/2feee9c1da1004530f563a30bfd6e43eb88bd7c0))\n- Simplify dependency version settings - ([f4f1e76](https://github.com/0x676e67/wreq/commit/f4f1e761166887b12cc192a22c29d685eb4046eb))\n- Update examples - ([dece4f0](https://github.com/0x676e67/wreq/commit/dece4f093c5842b5387f0ab2da9aa2bff27db699))\n- Format code - ([85b6795](https://github.com/0x676e67/wreq/commit/85b67951cee90ad3a98a9fceafd5382728c3a98f))\n- Fmt code - ([269d11d](https://github.com/0x676e67/wreq/commit/269d11dfe3356ac97ed73d31f4690417ad3f3a65))\n\n### Deps\n\n- *(boring2)* Pin 4.13.0 version ([#331](https://github.com/0x676e67/wreq/issues/331)) - ([9272524](https://github.com/0x676e67/wreq/commit/9272524fc73e6a32a682e00bec39ff1474ed1703))\n- *(hyper2)* Pin 1.5.0 version ([#330](https://github.com/0x676e67/wreq/issues/330)) - ([a638cd3](https://github.com/0x676e67/wreq/commit/a638cd3a2c248f9bb3eb39f5a077da1b2610e7d9))\n- *(tower)* Pin version v0.5.2 - ([0973fef](https://github.com/0x676e67/wreq/commit/0973fefe13bd2d8656a0d5ca66bba8f398eed0f9))\n- *(tower-layer)* Remove unused deps ([#322](https://github.com/0x676e67/wreq/issues/322)) - ([e446b61](https://github.com/0x676e67/wreq/commit/e446b61015076209c8b882bb01b2d92eda54cc2e))\n\n### Workflows\n\n- *(linux)* Remove unused deps install - ([4fe26e8](https://github.com/0x676e67/wreq/commit/4fe26e8d7fcbf3dcbabae77d51f4ca37be15573e))\n- Add `rc` version check - ([708e77b](https://github.com/0x676e67/wreq/commit/708e77b697b546bb59b8b777b51a65dc88c9da24))\n\n## New Contributors ❤️\n\n* @bkn9hs made their first contribution in [#328](https://github.com/0x676e67/wreq/pull/328)\n* @UwUDev made their first contribution in [#290](https://github.com/0x676e67/wreq/pull/290)\n\n## [1.5.0](https://github.com/0x676e67/wreq/compare/v1.3.6..v1.5.0) - 2025-01-11\n\n### Features\n\n- *(client)* Add chain settings of client - ([42b08a1](https://github.com/0x676e67/wreq/commit/42b08a15c669573b6e955967e9218b20ee869960))\n- *(client)* Optional cross-origin redirect proxy authentication ([#304](https://github.com/0x676e67/wreq/issues/304)) - ([fcdac5d](https://github.com/0x676e67/wreq/commit/fcdac5d643e65e53597a9d7de6a21bffddb6032c))\n- *(client)* Expose default headers as public API ([#296](https://github.com/0x676e67/wreq/issues/296)) - ([00e4199](https://github.com/0x676e67/wreq/commit/00e419908cc16376015be20ffc426a57ec327b40))\n- *(multipart)* Expose a Form::into_stream() method on async multipart forms ([#303](https://github.com/0x676e67/wreq/issues/303)) - ([f46563f](https://github.com/0x676e67/wreq/commit/f46563f294239bd6924ca4d01ee9c3a07df8a515))\n- *(proxy)* Remove system proxy cache ([#309](https://github.com/0x676e67/wreq/issues/309)) - ([7992c93](https://github.com/0x676e67/wreq/commit/7992c9321979d2f61bc96bbb54a84248a1bb566b))\n- *(tls)* Optional disable SSL renegotiation ([#306](https://github.com/0x676e67/wreq/issues/306)) - ([c9c0dd3](https://github.com/0x676e67/wreq/commit/c9c0dd301301003e206ff9f3230532b879e2c994))\n\n### Bug Fixes\n\n- Fix `Request` `try_clone` missing variables ([#301](https://github.com/0x676e67/wreq/issues/301)) - ([ca1c0fa](https://github.com/0x676e67/wreq/commit/ca1c0fa19c8d15b153e5e021f851e73c1489f23f))\n\n### Refactor\n\n- *(websocket)* Change parameters to `Cow` types for improved flexibility ([#298](https://github.com/0x676e67/wreq/issues/298)) - ([aff5af9](https://github.com/0x676e67/wreq/commit/aff5af9a6ab7e64269d7b113fe42b1c40325282f))\n- Rename mod `scheme` with `network` - ([dceb375](https://github.com/0x676e67/wreq/commit/dceb37573b65ac172d367b8a5bcd3dd891a34431))\n\n### Documentation\n\n- *(tls)* Update docs - ([f7b564b](https://github.com/0x676e67/wreq/commit/f7b564b4ed115a67a3db5c260a53f93bf27bcb48))\n\n### Performance\n\n- *(pool)* Reduce lock scope to decrease contention ([#308](https://github.com/0x676e67/wreq/issues/308)) - ([6b0c27c](https://github.com/0x676e67/wreq/commit/6b0c27ce0b6d6bb123dde3fc114496b37ad3536f))\n\n### Miscellaneous Tasks\n\n- *(websocket)* Simplify URL scheme matching and error handling logic ([#302](https://github.com/0x676e67/wreq/issues/302)) - ([901b397](https://github.com/0x676e67/wreq/commit/901b397c87dfffaf80e250492d6c3b73022066f4))\n- *(websocket)* Remove deprecated function ([#297](https://github.com/0x676e67/wreq/issues/297)) - ([427edf6](https://github.com/0x676e67/wreq/commit/427edf6e5dbaa0969239bf6073d4c5a4d56baf7a))\n- Annotating default values ​​improves maintainability - ([a043290](https://github.com/0x676e67/wreq/commit/a043290c1e925a002cbbf4c6d2848a6e3073a909))\n- Update websocket bad url handler - ([38eee48](https://github.com/0x676e67/wreq/commit/38eee48b0948c95cd1e3f24eb66284f787545ad0))\n- Add `#[inline]` to `cookie_store_mut` - ([6fc11c5](https://github.com/0x676e67/wreq/commit/6fc11c5f4ad81ded8d37cff685e79476b603a888))\n- Simplify template macro usage for platform-specific config ([#299](https://github.com/0x676e67/wreq/issues/299)) - ([675f198](https://github.com/0x676e67/wreq/commit/675f1985acf54eb27834393e80e3b0fa2c170aca))\n\n### Build\n\n- *(deps)* Update windows-registry requirement from 0.3.0 to 0.4.0 ([#295](https://github.com/0x676e67/wreq/issues/295)) - ([5a6fab4](https://github.com/0x676e67/wreq/commit/5a6fab4f3a50765afc155f1641cd2558af5c8693))\n- *(deps)* Update env_logger requirement from 0.10.0 to 0.11.6 ([#294](https://github.com/0x676e67/wreq/issues/294)) - ([a483462](https://github.com/0x676e67/wreq/commit/a483462cd97e6ebf6a6df932b39c44578b48bfb8))\n- Fix conditional compilation ([#307](https://github.com/0x676e67/wreq/issues/307)) - ([358a6ec](https://github.com/0x676e67/wreq/commit/358a6ecec2e59bb91ac962ffe7423041b1cb5ce4))\n\n\n## [1.3.6](https://github.com/0x676e67/wreq/compare/v1.3.5..v1.3.6) - 2025-01-08\n\n### Features\n\n- *(websocket)* Add `with_builder` method to modify request builder before sending ([#288](https://github.com/0x676e67/wreq/issues/288)) - ([ff9e9f2](https://github.com/0x676e67/wreq/commit/ff9e9f2cb5f1817c6b0187aaa6095a87e386a3d2))\n- Support `Apple` devices to bind device interface ([#293](https://github.com/0x676e67/wreq/issues/293)) - ([a71a460](https://github.com/0x676e67/wreq/commit/a71a46065b4f96200decc47891333ce699631b3f))\n\n### Bug Fixes\n\n- *(test)* Resolve test failures due to invalid upstream certificate site - ([1897e3a](https://github.com/0x676e67/wreq/commit/1897e3aa51b38f032bf246f57e04df3e3aa5f434))\n\n### Performance\n\n- *(pool)* Reduce `Dst` cloning overhead with `Arc` for `PoolKey` ([#289](https://github.com/0x676e67/wreq/issues/289)) - ([1946826](https://github.com/0x676e67/wreq/commit/194682691d448d1196cf37a34b3e89a3a4af76e9))\n\n### Testing\n\n- *(connector-layer)* Sync upstream connector layers tests ([#285](https://github.com/0x676e67/wreq/issues/285)) - ([9d772f0](https://github.com/0x676e67/wreq/commit/9d772f03cac1c9679afe134fb8e5926df1db199b))\n\n### Miscellaneous Tasks\n\n- Remove unused crate path prefix - ([d0ca971](https://github.com/0x676e67/wreq/commit/d0ca971ca58b93c3d1a1f90174a7abd633404eda))\n- Sync upstream `From<http::Response<T>> for Response` - ([954a807](https://github.com/0x676e67/wreq/commit/954a80789bc4fb69fefaa74a2db19767fe2f5bce))\n- Fmt code - ([f3aeb61](https://github.com/0x676e67/wreq/commit/f3aeb61a72943abb33ce33bb1824d46545c3230b))\n- Improved type convert ([#284](https://github.com/0x676e67/wreq/issues/284)) - ([7ab1f2f](https://github.com/0x676e67/wreq/commit/7ab1f2f25734b9af78607b66e0406d644c39fb49))\n\n### Revert\n\n- Remove `From<http::Response<T>> for Response` ([#282](https://github.com/0x676e67/wreq/issues/282)) - ([1e69245](https://github.com/0x676e67/wreq/commit/1e69245677517daaa8ec10ca64d347457925cb38))\n\n## New Contributors ❤️\n\n* @honeyspoon made their first contribution in [#282](https://github.com/0x676e67/wreq/pull/282)\n\n## [1.3.5](https://github.com/0x676e67/wreq/compare/v1.3.3..v1.3.5) - 2025-01-06\n\n### Features\n\n- *(multipart)* Sync upstream file multipart ([#278](https://github.com/0x676e67/wreq/issues/278)) - ([49a3f06](https://github.com/0x676e67/wreq/commit/49a3f06c40942c8b0a600058e769c21dc9d7200a))\n- *(request)* Insert header differentiates between append and overwrite ([#274](https://github.com/0x676e67/wreq/issues/274)) - ([c0026ca](https://github.com/0x676e67/wreq/commit/c0026caaa69ead0d42efba051308c87be21f4ab7))\n- *(request)* Add general HTTP authentication method ([#270](https://github.com/0x676e67/wreq/issues/270)) - ([5c3facb](https://github.com/0x676e67/wreq/commit/5c3facb9c575658b2171e154b8386d54921b0af6))\n\n### Bug Fixes\n\n- *(redirect)* Fix redirect test - ([9f4bd3f](https://github.com/0x676e67/wreq/commit/9f4bd3fc241aaec158b4cd4e7377fb959459f9c6))\n- *(test)* Fix proxy test - ([475752e](https://github.com/0x676e67/wreq/commit/475752e49e438ab3100c9e54082ea9b18bfdb33a))\n- *(timeout)* Fix timeout test - ([0bf0422](https://github.com/0x676e67/wreq/commit/0bf0422a6b950e9c72ad642927a1781531f17e03))\n- Fix migration hyper1 missing `TokioTimer` ([#275](https://github.com/0x676e67/wreq/issues/275)) - ([a2e8b47](https://github.com/0x676e67/wreq/commit/a2e8b47a80a3272bc621a7d83fd7c8262be6a6d1))\n\n### Documentation\n\n- Update `http2`/`network` docs ([#273](https://github.com/0x676e67/wreq/issues/273)) - ([5edaa93](https://github.com/0x676e67/wreq/commit/5edaa9311c255ceb1204c7bb6c90d2f716f4628b))\n\n### Testing\n\n- *(timeout)* Ignore the test in Tunnel VPN environment ([#279](https://github.com/0x676e67/wreq/issues/279)) - ([156fd1b](https://github.com/0x676e67/wreq/commit/156fd1b6b4f2b8a495dc6b446bd612881bacf3a5))\n- Ignore doc test ([#276](https://github.com/0x676e67/wreq/issues/276)) - ([5275c6b](https://github.com/0x676e67/wreq/commit/5275c6b1eee50108061682758d67524c7a40547f))\n- Remove unused wasm test - ([25166c9](https://github.com/0x676e67/wreq/commit/25166c977aceb05e752d7b973af6ef3a72cbca4e))\n\n### Miscellaneous Tasks\n\n- *(cookie)* Use `RwLock` types that do not poison themselves ([#268](https://github.com/0x676e67/wreq/issues/268)) - ([dcbd79d](https://github.com/0x676e67/wreq/commit/dcbd79dd324483442ccb715ac277b7ec82be93d3))\n- Add all features tests - ([138c43a](https://github.com/0x676e67/wreq/commit/138c43aacb7d753c1ebde15effa6a457a8260dd1))\n- Sync upstream tests - ([b782282](https://github.com/0x676e67/wreq/commit/b78228289d86fb93c1e301bf5b367a0f698b15d8))\n- Remove unused feature - ([668009d](https://github.com/0x676e67/wreq/commit/668009d641294f8ad227083318447455f3995c00))\n- Cargo clippy fix all-features - ([1e45f60](https://github.com/0x676e67/wreq/commit/1e45f60d23d8d03a0567ba2c9bb0b1e414714b4e))\n- Remove unused code - ([aa427f5](https://github.com/0x676e67/wreq/commit/aa427f5ecf01762c5cd45ae1690f6654eb20dc46))\n\n### Build\n\n- Fix linux build ([#277](https://github.com/0x676e67/wreq/issues/277)) - ([014e026](https://github.com/0x676e67/wreq/commit/014e02647a4c1f2264f7151576c7350425e59cb7))\n\n### Deps\n\n- Replace `futures_core` with `futures_util` ([#269](https://github.com/0x676e67/wreq/issues/269)) - ([ce9ac8d](https://github.com/0x676e67/wreq/commit/ce9ac8d36ba901b3271ddb879dc34bc65e1dd723))\n\n\n## [1.3.3](https://github.com/0x676e67/wreq/compare/v1.3.2..v1.3.3) - 2025-01-05\n\n### Features\n\n- *(mimic)* Add Tor browser `Firefox 128` mimic ([#267](https://github.com/0x676e67/wreq/issues/267)) - ([f69f660](https://github.com/0x676e67/wreq/commit/f69f6605de49c13f44006355d31ad9abaac3e060))\n- *(mimic)* Optional mimic http2 ([#262](https://github.com/0x676e67/wreq/issues/262)) - ([6e44e17](https://github.com/0x676e67/wreq/commit/6e44e17695f91336a19b69cd0ec12843d9a8ca7a))\n\n### Miscellaneous Tasks\n\n- Simplify http2 configuration - ([34700d1](https://github.com/0x676e67/wreq/commit/34700d1ccae4977f2a0a5b34cd4e9a10b68d6ecc))\n\n### Deps\n\n- *(pool)* Replace `futures_channel::mpsc` with `tokio::sync::mpsc` in Hyper ([#264](https://github.com/0x676e67/wreq/issues/264)) - ([f4895fb](https://github.com/0x676e67/wreq/commit/f4895fb8dbb47d7d10563259a500aae57fcf7bb6))\n\n\n## [1.3.2](https://github.com/0x676e67/wreq/compare/v1.3.0..v1.3.2) - 2025-01-04\n\n### Miscellaneous Tasks\n\n- Fix typo - ([0a095ce](https://github.com/0x676e67/wreq/commit/0a095cef2ff9443898c11531be32aa18984a10e2))\n- Rename and update access scope - ([607da50](https://github.com/0x676e67/wreq/commit/607da5005d9e2020582d961e0f0906b90b658681))\n\n\n## [1.3.0](https://github.com/0x676e67/wreq/compare/v1.2.6..v1.3.0) - 2025-01-04\n\n### Refactor\n\n- *(tls)* Refactor Application-layer protocol settings ([#260](https://github.com/0x676e67/wreq/issues/260)) - ([bc8b824](https://github.com/0x676e67/wreq/commit/bc8b8246779509209077506511ad2e8ccd580ba5))\n- Rename `HttpVersionPref` to `AlpnProtos` ([#258](https://github.com/0x676e67/wreq/issues/258)) - ([e99ec7a](https://github.com/0x676e67/wreq/commit/e99ec7a8aaf8047a726293099cedf8919bf622ba))\n\n### Documentation\n\n- *(tls)* Update docs - ([db3ee6c](https://github.com/0x676e67/wreq/commit/db3ee6c8418afabc05659c76626f775931537369))\n- *(tls)* Update docs - ([ad389e5](https://github.com/0x676e67/wreq/commit/ad389e5c92327e41eb4a3aa239c63d17bd51ec9d))\n- *(tls)* Update docs ([#261](https://github.com/0x676e67/wreq/issues/261)) - ([309e62f](https://github.com/0x676e67/wreq/commit/309e62f47bdd68b5f89cb41bcfa8629517a00e79))\n\n### Miscellaneous Tasks\n\n- *(mimic)* Always inline settings module - ([630e28f](https://github.com/0x676e67/wreq/commit/630e28f529baa21a2d5bf780be2003c3dfac6618))\n- *(tls)* Always inline alps proto len - ([5b33bc5](https://github.com/0x676e67/wreq/commit/5b33bc560cf394ef8022a14acd2602307a7f9535))\n- *(tls)* Cleaner bind calls - ([3ddbb64](https://github.com/0x676e67/wreq/commit/3ddbb64d0f2c7492fc1a6a9a8ff81f23f4e152d1))\n- *(tls)* Renaming cumbersome API names - ([1021cb1](https://github.com/0x676e67/wreq/commit/1021cb10eb0338685b313cb606a1576153ad07cf))\n- Improve verbose certificate configuration ([#256](https://github.com/0x676e67/wreq/issues/256)) - ([67eb333](https://github.com/0x676e67/wreq/commit/67eb333f965724cf1fd40c6314c274aa1ab08c72))\n\n\n## [1.2.6](https://github.com/0x676e67/wreq/compare/v1.2.5..v1.2.6) - 2025-01-03\n\n### Miscellaneous Tasks\n\n- *(tls/ext)* Clearer naming - ([a0f5e64](https://github.com/0x676e67/wreq/commit/a0f5e643dc55379b193e3d644038c79ef81c7a7b))\n- Inline suggestions - ([978198d](https://github.com/0x676e67/wreq/commit/978198d4154c80052f7d889d99fbc6de2435a07b))\n- Simplify method signatures - ([9bdc01d](https://github.com/0x676e67/wreq/commit/9bdc01d75cc8d767470cbacb09980792907d86f2))\n- Internal request for redundant method boundary ([#253](https://github.com/0x676e67/wreq/issues/253)) - ([a252cd1](https://github.com/0x676e67/wreq/commit/a252cd1784c982b378da0afb32793684558326ac))\n\n### Pref\n\n- Build request failures return errors instead of panic ([#254](https://github.com/0x676e67/wreq/issues/254)) - ([1dbc67c](https://github.com/0x676e67/wreq/commit/1dbc67c1eed981da6c81f02f535df286f43c571a))\n\n\n## [1.2.5](https://github.com/0x676e67/wreq/compare/v1.2.1..v1.2.5) - 2025-01-02\n\n### Features\n\n- *(client)* Improved set cookie operation ([#252](https://github.com/0x676e67/wreq/issues/252)) - ([e94d742](https://github.com/0x676e67/wreq/commit/e94d74253a3f2b603c82db95343ceca3ec8ff812))\n- *(tls)* Expose `CertCompressionAlgorithm` as public API ([#247](https://github.com/0x676e67/wreq/issues/247)) - ([0a6cbc6](https://github.com/0x676e67/wreq/commit/0a6cbc6660d3b3321d3df219bc5d807c2652c553))\n- *(tls)* Expose `TlsExtension` as public API ([#246](https://github.com/0x676e67/wreq/issues/246)) - ([98a18b3](https://github.com/0x676e67/wreq/commit/98a18b347568ff20db485e78a577ac812c9be38f))\n\n### Bug Fixes\n\n- Align the cfg compilation with the socket2 ([#245](https://github.com/0x676e67/wreq/issues/245)) - ([3122a32](https://github.com/0x676e67/wreq/commit/3122a329f4bfc1acafd8b6b0ad323c6e23db29e5))\n- Fix default TLS configuration hostname not set ([#244](https://github.com/0x676e67/wreq/issues/244)) - ([44b8216](https://github.com/0x676e67/wreq/commit/44b8216858fb1386ca1104b4d56234455e934e2d))\n\n### Refactor\n\n- Rename verbose identifiers for clarity - ([f1ebb79](https://github.com/0x676e67/wreq/commit/f1ebb7906f3f81e7047ad6bbc1387c12ccfe5ef5))\n- Responsibility-based module division - ([c3129ca](https://github.com/0x676e67/wreq/commit/c3129cad6b7405b2c52d4750e337060e4c1175c3))\n\n### Documentation\n\n- Update docs ([#243](https://github.com/0x676e67/wreq/issues/243)) - ([18d8934](https://github.com/0x676e67/wreq/commit/18d89342d4194ab37f5dfe00a3ba65509bc4ff7a))\n\n### Performance\n\n- Improve HTTP request in HTTPS connector ([#242](https://github.com/0x676e67/wreq/issues/242)) - ([2a99fd4](https://github.com/0x676e67/wreq/commit/2a99fd4ed667a77a8f9fba9607372750202a5c70))\n\n### Miscellaneous Tasks\n\n- *(client)* Avoid explicit type declarations - ([44d22ef](https://github.com/0x676e67/wreq/commit/44d22ef2de58cbd92720505c216e7490498be36b))\n- *(tls)* Simplify certificate loading configuration ([#249](https://github.com/0x676e67/wreq/issues/249)) - ([87275fc](https://github.com/0x676e67/wreq/commit/87275fc96d0cb6f7dee38f4945377a43d95ba377))\n- Add build all features - ([1148155](https://github.com/0x676e67/wreq/commit/114815563c007d56f343d4d55e92005ce487f309))\n- Some insignificant update - ([ad20677](https://github.com/0x676e67/wreq/commit/ad20677e88a0d13cec44f5e2690d0e0c9df506fa))\n- Rename  to - ([a97be9f](https://github.com/0x676e67/wreq/commit/a97be9fdaaf708adb4fc165c1ec8ba5cb11f4a47))\n- Fix closure capture ownership - ([e0c55f0](https://github.com/0x676e67/wreq/commit/e0c55f0bd11a1061dfe9f7f422fada7e87cc08d9))\n\n## New Contributors ❤️\n\n* @sudorf0 made their first contribution\n\n## [1.2.1](https://github.com/0x676e67/wreq/compare/v1.2.0..v1.2.1) - 2024-12-31\n\n### Miscellaneous Tasks\n\n- Using normal array storage - ([3ce9040](https://github.com/0x676e67/wreq/commit/3ce9040e791ab31ea9a8992e9219c771e56863ca))\n\n## New Contributors ❤️\n\n* @coutureone made their first contribution\n* @8176917 made their first contribution\n\n## [1.2.0](https://github.com/0x676e67/wreq/compare/v1.1.2..v1.2.0) - 2024-12-31\n\n### Features\n\n- *(client)* Add HTTP2 `Priority` frame configuration ([#238](https://github.com/0x676e67/wreq/issues/238)) - ([8c75d75](https://github.com/0x676e67/wreq/commit/8c75d7507a35e6dd7ad7d045c7e5ae1e772598dd))\n- Add `Firefox 117` impersonate ([#239](https://github.com/0x676e67/wreq/issues/239)) - ([cae2f6d](https://github.com/0x676e67/wreq/commit/cae2f6df217780ecaa4fd073ef12af597913e321))\n\n\n## [1.1.2](https://github.com/0x676e67/wreq/compare/v1.1.1..v1.1.2) - 2024-12-31\n\n### Features\n\n- Add verify hostname configuration ([#237](https://github.com/0x676e67/wreq/issues/237)) - ([3478e11](https://github.com/0x676e67/wreq/commit/3478e1110bc5d4819eec4d66bf2a09369199ca29))\n\n### Miscellaneous Tasks\n\n- Update comment - ([2252652](https://github.com/0x676e67/wreq/commit/22526524f0ccf36763fd2bd90a439c5e95efafd3))\n\n\n## [1.1.1](https://github.com/0x676e67/wreq/compare/v1.1.0..v1.1.1) - 2024-12-30\n\n### Bug Fixes\n\n- *(decoder)* Fix decoding extra empty frame ([#234](https://github.com/0x676e67/wreq/issues/234)) - ([d8118bc](https://github.com/0x676e67/wreq/commit/d8118bc3d141726d2f5e7a8232c8a07f5865efa2))\n\n### Performance\n\n- *(tls)* Use `Bytes` to optimize session key storage space ([#231](https://github.com/0x676e67/wreq/issues/231)) - ([1bd9db0](https://github.com/0x676e67/wreq/commit/1bd9db0d8aceb128a899ad5a0c0a651e10632b10))\n- Improve unnecessary convert when setting cookies ([#233](https://github.com/0x676e67/wreq/issues/233)) - ([2720bc4](https://github.com/0x676e67/wreq/commit/2720bc4e231530825051faf945f67b4d6fe9bb06))\n- `default_headers` will swap default headers ([#232](https://github.com/0x676e67/wreq/issues/232)) - ([3a737f0](https://github.com/0x676e67/wreq/commit/3a737f0eb5cdf40d178c72863a2148a2119b2cca))\n\n### Miscellaneous Tasks\n\n- Remove escape characters - ([0de340c](https://github.com/0x676e67/wreq/commit/0de340cbc495eacb733658e4f249797bda5f32b3))\n- Remove unused import - ([ab0ea9c](https://github.com/0x676e67/wreq/commit/ab0ea9cffccaec71080898ed6fd8ad7432ad2dc3))\n- Cargo clippy --fix - ([7c5369d](https://github.com/0x676e67/wreq/commit/7c5369dc4ee32cebaf7ecb77f946df541fa2eee9))\n- Remove unused code - ([aa9c7d8](https://github.com/0x676e67/wreq/commit/aa9c7d872fff06e5abe3eb5ffbc98c80ca481930))\n\n\n## [1.1.0](https://github.com/0x676e67/wreq/compare/v1.0.1..v1.1.0) - 2024-12-27\n\n### Features\n\n- *(request)* Insert when `json`/`form` does not have `CONTENT_TYPE` header ([#230](https://github.com/0x676e67/wreq/issues/230)) - ([80c338a](https://github.com/0x676e67/wreq/commit/80c338a835ed9b7015bc63415a44905aa64c61b2))\n- Without compression enabled, no compression header is sent ([#229](https://github.com/0x676e67/wreq/issues/229)) - ([79355d7](https://github.com/0x676e67/wreq/commit/79355d752334955eb27994f8e2c2acef9e828d66))\n\n### Bug Fixes\n\n- Username in URL plus basic_auth() results in two Authorization headers ([#228](https://github.com/0x676e67/wreq/issues/228)) - ([8398835](https://github.com/0x676e67/wreq/commit/8398835855dfd07fe162a8747b703a82aef4ee84))\n\n\n## [1.0.1](https://github.com/0x676e67/wreq/compare/v1.0.0..v1.0.1) - 2024-12-27\n\n### Miscellaneous Tasks\n\n- Cargo clippy --fix - ([389e32a](https://github.com/0x676e67/wreq/commit/389e32a05f97f6dcdbecf8235049da5ce8e37914))\n- Update alpn protocol order ([#226](https://github.com/0x676e67/wreq/issues/226)) - ([d920df3](https://github.com/0x676e67/wreq/commit/d920df3a9bbf02678664f90fab2b815f49c9c067))\n\n\n## [1.0.0](https://github.com/0x676e67/wreq/compare/v1.0.0-rc.3..v1.0.0) - 2024-12-25\n\n### Features\n\n- *(client)* Add `no-keepalive` for `Client` ([#221](https://github.com/0x676e67/wreq/issues/221)) - ([20ac5bf](https://github.com/0x676e67/wreq/commit/20ac5bfc17712dc703e479c6e88ac071ae760bdd))\n- Request specific `address`/`interface` override ([#223](https://github.com/0x676e67/wreq/issues/223)) - ([7ea06e1](https://github.com/0x676e67/wreq/commit/7ea06e1ac1b0073311596c643f1d92dbafeffa2b))\n\n### Miscellaneous Tasks\n\n- Argo clippy --fix - ([8d766f6](https://github.com/0x676e67/wreq/commit/8d766f6601503d7a2a2ad62e7d416c67ae6d46f8))\n\n\n## [1.0.0-rc.3](https://github.com/0x676e67/wreq/compare/v1.0.0-rc.2..v1.0.0-rc.3) - 2024-12-25\n\n### Features\n\n- Optional to enable impersonate customization ([#217](https://github.com/0x676e67/wreq/issues/217)) - ([f68de0b](https://github.com/0x676e67/wreq/commit/f68de0b6d5048014b83d887005a5c838f5eb1d31))\n\n### Performance\n\n- Avoiding Unnecessary Copies ([#219](https://github.com/0x676e67/wreq/issues/219)) - ([6f6c660](https://github.com/0x676e67/wreq/commit/6f6c6609aaf78d508d5e7184fd92ce99d6d0f70e))\n\n### Miscellaneous Tasks\n\n- *(util/clent)* Remove extra clones - ([72697ca](https://github.com/0x676e67/wreq/commit/72697ca2455487bf856ab256433b3b7779dea433))\n- Fix clippy accidentally deleted code ([#220](https://github.com/0x676e67/wreq/issues/220)) - ([200e3f4](https://github.com/0x676e67/wreq/commit/200e3f4e487c8010a37c929c2ceefaf2dc61996d))\n- Update macros ([#218](https://github.com/0x676e67/wreq/issues/218)) - ([2f977a1](https://github.com/0x676e67/wreq/commit/2f977a19196a67893b9dd4d74daf6b76632187fe))\n- Remove unnecessary `Arc` wrapper from `redirect`/`base_url` ([#216](https://github.com/0x676e67/wreq/issues/216)) - ([3787346](https://github.com/0x676e67/wreq/commit/3787346539188082a8bf58536cf26baae32780e1))\n\n\n## [1.0.0-rc.2](https://github.com/0x676e67/wreq/compare/v1.0.0-rc.1..v1.0.0-rc.2) - 2024-12-24\n\n### Features\n\n- Allow pluggable tower layers in connector service stack ([#214](https://github.com/0x676e67/wreq/issues/214)) - ([4b07f13](https://github.com/0x676e67/wreq/commit/4b07f139570e3f072b68c654bfd5b29a5ea47341))\n\n### Bug Fixes\n\n- Propagate Body::size_hint when wrapping bodies ([#213](https://github.com/0x676e67/wreq/issues/213)) - ([e05a781](https://github.com/0x676e67/wreq/commit/e05a781a7b2be9a39cb6c9a8689c389e9a8f92ec))\n\n### Miscellaneous Tasks\n\n- Remove `clone` from `Dst` - ([9885d91](https://github.com/0x676e67/wreq/commit/9885d91c7cc199b4edfb1296581b27bda368b148))\n- Remove `new` method for `InnerRequestBuilder` ([#212](https://github.com/0x676e67/wreq/issues/212)) - ([6b64a60](https://github.com/0x676e67/wreq/commit/6b64a6010c9ae3835427aace4c25ea25eaee4588))\n- Cargo clippy --fix - ([908b284](https://github.com/0x676e67/wreq/commit/908b2842c27b3179f8f9509715c8a0ee46f0cb77))\n\n\n## [1.0.0-rc.1](https://github.com/0x676e67/wreq/compare/v0.33.5..v1.0.0-rc.1) - 2024-12-24\n\n### Features\n\n- *(body)* Improve interop with hyper for `Body` type - ([ef73639](https://github.com/0x676e67/wreq/commit/ef7363920143efec31b5400d0ea408699f1053e7))\n- *(client)* Request specific proxy override ([#211](https://github.com/0x676e67/wreq/issues/211)) - ([a547b0e](https://github.com/0x676e67/wreq/commit/a547b0e4c11bdd9ce990af891eaaed9d1c004ab1))\n- *(client)* Add impl `Service<http::Request<Body>>` for `Client` ([#202](https://github.com/0x676e67/wreq/issues/202)) - ([88dcf59](https://github.com/0x676e67/wreq/commit/88dcf59056c16d8b6fc6bec3a082d1be1c4e3df7))\n- *(client)* Export `http1`/`http2` Builder as public API - ([2ce96f6](https://github.com/0x676e67/wreq/commit/2ce96f6f61daa5a08a055ebfc05d4cf231126323))\n- *(client)* Export `http1`/`http2` Builder as public API ([#199](https://github.com/0x676e67/wreq/issues/199)) - ([fb3d72b](https://github.com/0x676e67/wreq/commit/fb3d72b78deca6f51e201ab803a7e3644c9286a7))\n- *(client)* Add the maximum safe retry count for HTTP/2 connections ([#196](https://github.com/0x676e67/wreq/issues/196)) - ([2f8ff8c](https://github.com/0x676e67/wreq/commit/2f8ff8ca783f1ef88950f391b29034aa03636cff))\n- Support request setting HTTP override ALPN ([#188](https://github.com/0x676e67/wreq/issues/188)) - ([f3af980](https://github.com/0x676e67/wreq/commit/f3af9801761915ac2f031314e9d46ff31538050e))\n- Hyper v1 upgrade ([#187](https://github.com/0x676e67/wreq/issues/187)) - ([3441ee7](https://github.com/0x676e67/wreq/commit/3441ee76640b3d9273e7b3617972ef683655cc3a))\n\n### Bug Fixes\n\n- *(http2)* Fix http2 header frame initial `stream_id` settings ([#185](https://github.com/0x676e67/wreq/issues/185)) - ([2f773be](https://github.com/0x676e67/wreq/commit/2f773be0da6e963ca823ddbe0e2d9583a8b62aa7))\n- Fix http protocol auto-negotiation ([#189](https://github.com/0x676e67/wreq/issues/189)) - ([d144b63](https://github.com/0x676e67/wreq/commit/d144b6356a01b561d50f774243fa3555ab9d7b52))\n\n### Miscellaneous Tasks\n\n- *(pool)* Use `Mutex` types that do not poison themselves ([#192](https://github.com/0x676e67/wreq/issues/192)) - ([dec4d82](https://github.com/0x676e67/wreq/commit/dec4d8265356a065ff8a406344898ebc19895e71))\n- *(tls)* Disable custom TLS builder ([#208](https://github.com/0x676e67/wreq/issues/208)) - ([bb12473](https://github.com/0x676e67/wreq/commit/bb12473723a73139226c6b4845acc85815b543c7))\n- *(tls)* Compile-time calculation of extended permutation ([#207](https://github.com/0x676e67/wreq/issues/207)) - ([871ab3b](https://github.com/0x676e67/wreq/commit/871ab3bc4838842d60c300291e1c6c4f83d1b58c))\n- Refactor connect network request extension ([#210](https://github.com/0x676e67/wreq/issues/210)) - ([f4e67ef](https://github.com/0x676e67/wreq/commit/f4e67ef76340c6b5d21944385339b723829c697a))\n- By default, impersonate from a string is disabled ([#206](https://github.com/0x676e67/wreq/issues/206)) - ([35f7f11](https://github.com/0x676e67/wreq/commit/35f7f11c67638af54e79565d679d55068f162f7a))\n- Removed TLS config examples to prevent misconfigurations by inexperienced users ([#205](https://github.com/0x676e67/wreq/issues/205)) - ([48d1f5b](https://github.com/0x676e67/wreq/commit/48d1f5b86a885a86a3be2af1694d6328b360f1f9))\n- Disable the exposure of internal connect dst API ([#203](https://github.com/0x676e67/wreq/issues/203)) - ([35994c2](https://github.com/0x676e67/wreq/commit/35994c25ded24cfcb57877cf4e1b859e39b989f7))\n- Remove unused code - ([663e346](https://github.com/0x676e67/wreq/commit/663e346bce7bfd0090374f99df1b6152ca7eb644))\n- Remove unused code - ([0d4f06f](https://github.com/0x676e67/wreq/commit/0d4f06f7ab5769aa80828004f7da74df6a63afe9))\n- Deleted permutation storage - ([39e1ef6](https://github.com/0x676e67/wreq/commit/39e1ef6ccd2382c8b9a00873341092df4876df7f))\n- Use shorter feature name - ([4246a0f](https://github.com/0x676e67/wreq/commit/4246a0fd6d72ee68d0441de57b3c84f2e9c5b879))\n- Remove dead code - ([f516b0a](https://github.com/0x676e67/wreq/commit/f516b0a85e48edfd08848fa1be3af7451fd2a7fd))\n- Refactor connect layer detail handle ([#198](https://github.com/0x676e67/wreq/issues/198)) - ([eff1fee](https://github.com/0x676e67/wreq/commit/eff1fee3489f01d47bd406f8632a303863dc1522))\n- Refactor connect mod - ([7ecbd25](https://github.com/0x676e67/wreq/commit/7ecbd25f2611161a539bd57e8d6b4945f6ab433a))\n- Remove unused code - ([4ef7db6](https://github.com/0x676e67/wreq/commit/4ef7db685884d91aad7221753a9280f6fd1e5891))\n- Cleaned up some unnecessary code ([#194](https://github.com/0x676e67/wreq/issues/194)) - ([1304ec1](https://github.com/0x676e67/wreq/commit/1304ec14e003c96a3d9815a43502d0d886e0ca61))\n- Simplified TLS TCP stream abstraction ([#193](https://github.com/0x676e67/wreq/issues/193)) - ([273ca6c](https://github.com/0x676e67/wreq/commit/273ca6cdc732419703162a178526fa899db9087c))\n- Remove unused code ([#191](https://github.com/0x676e67/wreq/issues/191)) - ([d586d56](https://github.com/0x676e67/wreq/commit/d586d563add0343cd4172974afc035b563c1897a))\n- Cargo fmt --all - ([6a114f9](https://github.com/0x676e67/wreq/commit/6a114f974593e95cb21b917f54716b779a4a41d3))\n- Static calc extension permutation ([#184](https://github.com/0x676e67/wreq/issues/184)) - ([1da5d42](https://github.com/0x676e67/wreq/commit/1da5d42ebbcff2eaf304dacd58e90f9b6412023f))\n- Macros simplify some debug implement ([#183](https://github.com/0x676e67/wreq/issues/183)) - ([5a92fa5](https://github.com/0x676e67/wreq/commit/5a92fa58714b635c4cbc53299b8b49b9b9d11155))\n- Remove dead code ([#182](https://github.com/0x676e67/wreq/issues/182)) - ([65391fb](https://github.com/0x676e67/wreq/commit/65391fb83729bcfd39ef548ebd9d24218c86c4f3))\n\n### Deps\n\n- *(tokio-util)* V0.7.0 ([#190](https://github.com/0x676e67/wreq/issues/190)) - ([303abf6](https://github.com/0x676e67/wreq/commit/303abf64952d97aabd21243b9824c9d345c25343))\n\n## New Contributors ❤️\n\n* @invalid-email-address made their first contribution\n\n## [0.33.5](https://github.com/0x676e67/wreq/compare/v0.33.3..v0.33.5) - 2024-12-19\n\n### Features\n\n- *(client)* Http1 sends lowercase request headers by default to improve performance ([#179](https://github.com/0x676e67/wreq/issues/179)) - ([b296e0e](https://github.com/0x676e67/wreq/commit/b296e0eab4b4213516830471cf1b42de2481049f))\n- Add `firefox 133` impersonate ([#181](https://github.com/0x676e67/wreq/issues/181)) - ([6710421](https://github.com/0x676e67/wreq/commit/6710421bc53916f6762053e27f1103e7f54cdd06))\n\n\n## [0.33.3](https://github.com/0x676e67/wreq/compare/v0.33.1..v0.33.3) - 2024-12-16\n\n### Bug Fixes\n\n- *(proxy)* Fix `ws`/`wss` upgrade support for `http`/`https` proxy ([#176](https://github.com/0x676e67/wreq/issues/176)) - ([8c3881c](https://github.com/0x676e67/wreq/commit/8c3881c87a7cbfb91701f37eb697c04b2863649d))\n\n\n## [0.33.1](https://github.com/0x676e67/wreq/compare/v0.33.0..v0.33.1) - 2024-12-16\n\n### Miscellaneous Tasks\n\n- Avoiding setup bloat when customizing your DNS resolver ([#174](https://github.com/0x676e67/wreq/issues/174)) - ([bc870c5](https://github.com/0x676e67/wreq/commit/bc870c542710ec548c2292ba3440490357b76e33))\n- Show clear errors when TLS connector build fails ([#173](https://github.com/0x676e67/wreq/issues/173)) - ([f722ce6](https://github.com/0x676e67/wreq/commit/f722ce6578d872008a4a7c64fbbba8ddddb14db4))\n\n\n## [0.33.0] - 2024-12-15\n\n### Features\n\n- *(async/client)* Add try get user agent - ([c72eed6](https://github.com/0x676e67/wreq/commit/c72eed679d380693e39155d63b63284f51bccc7a))\n- *(client)* Request specific cookie store override ([#171](https://github.com/0x676e67/wreq/issues/171)) - ([1357a3c](https://github.com/0x676e67/wreq/commit/1357a3ccfd09b874c2937dde5c0988281a3747c9))\n- *(client)* Add support for base URL parameter - ([6101905](https://github.com/0x676e67/wreq/commit/610190586a67b54ea5feb88d2cdbbc215bc8b9fa))\n- *(client)* Add support for base URL parameter ([#159](https://github.com/0x676e67/wreq/issues/159)) - ([30530ce](https://github.com/0x676e67/wreq/commit/30530ce80149abb2da1c00d6ef8f752aea963d06))\n- *(client)* Request specific redirect policy override ([#147](https://github.com/0x676e67/wreq/issues/147)) - ([cfedb58](https://github.com/0x676e67/wreq/commit/cfedb583f0df0f28c12799b2cc0e93ab2d86b10c))\n- *(client)* Set `content-length` in advance for header sorting ([#144](https://github.com/0x676e67/wreq/issues/144)) - ([755cabd](https://github.com/0x676e67/wreq/commit/755cabde8c4edf91c7822ef4c08e7ce95bc2f3fe))\n- *(client)* Add proxy management APIs: set, append, and clear proxies ([#132](https://github.com/0x676e67/wreq/issues/132)) - ([966fb0f](https://github.com/0x676e67/wreq/commit/966fb0f05c514b5c11c8ad18b158444a5b882f2e))\n- *(client)* Add address/interface level connection pool ([#123](https://github.com/0x676e67/wreq/issues/123)) - ([877c30f](https://github.com/0x676e67/wreq/commit/877c30fc6c308fc116062622ea48f5e2568d9c19))\n- *(client)* Support proxy-level connection pool ([#122](https://github.com/0x676e67/wreq/issues/122)) - ([6e4aff1](https://github.com/0x676e67/wreq/commit/6e4aff11a5268d9c39f91bd42585b610fe3f51db))\n- *(client)* Limit number of connections in pool ([#118](https://github.com/0x676e67/wreq/issues/118)) - ([326d415](https://github.com/0x676e67/wreq/commit/326d41536b07592b2ba0b591b57aa7cd77e5108f))\n- *(client)* Greatly improve the speed of creating clients ([#108](https://github.com/0x676e67/wreq/issues/108)) - ([27e8a55](https://github.com/0x676e67/wreq/commit/27e8a55f698fda9d0e4c42964f1bc5d580bd539b))\n- *(client)* Added async client creation to reduce blocking of async runtime ([#105](https://github.com/0x676e67/wreq/issues/105)) - ([b7f36dd](https://github.com/0x676e67/wreq/commit/b7f36dd1961304bf332780b4ec04330cb9fcb975))\n- *(client)* Optional configuration of Client TLS extension ([#78](https://github.com/0x676e67/wreq/issues/78)) - ([bab6cb6](https://github.com/0x676e67/wreq/commit/bab6cb6b766806096e083832c837f1353a22b99b))\n- *(client)* Default send header names as title case (only http1) ([#61](https://github.com/0x676e67/wreq/issues/61)) - ([bf91fff](https://github.com/0x676e67/wreq/commit/bf91fffcbd91f4d92a53c5ad5bb1c5acf48606ee))\n- *(client)* Adaptively select and upgrade the websocket connector ([#48](https://github.com/0x676e67/wreq/issues/48)) - ([b76070c](https://github.com/0x676e67/wreq/commit/b76070c4c3d0f48909a0be8e686ef7bd95093341))\n- *(client)* Add `impersonate_with_headers` allows optionally setting request headers ([#128](https://github.com/0x676e67/wreq/issues/128)) - ([eca7cd4](https://github.com/0x676e67/wreq/commit/eca7cd4abbf030da57b92e5eb2dfa0b35ad153ee))\n- *(client)* Suggest `inline` to the compiler ([#122](https://github.com/0x676e67/wreq/issues/122)) - ([532ca84](https://github.com/0x676e67/wreq/commit/532ca84a96f085ad04fc7706c310198317ad5ed0))\n- *(client)* Simplify client configuration ([#110](https://github.com/0x676e67/wreq/issues/110)) - ([c12dce6](https://github.com/0x676e67/wreq/commit/c12dce66658ba610d670090744d0397ff0068c07))\n- *(client)* Simplify the header configuration process - ([4a3f544](https://github.com/0x676e67/wreq/commit/4a3f54414892313eeabc5f3e602e844d1978c8aa))\n- *(client)* Allow binding interface ([#92](https://github.com/0x676e67/wreq/issues/92)) - ([3156086](https://github.com/0x676e67/wreq/commit/31560869cc1d02323bc8c330b3415fe3f02ad389))\n- *(client)* Add custom header order support ([#83](https://github.com/0x676e67/wreq/issues/83)) - ([4680b8a](https://github.com/0x676e67/wreq/commit/4680b8a69c7d9a33d07b13d44ddfa92a2df28c2a))\n- *(client)* Add ability to set proxies/address after client has been initialised ([#34](https://github.com/0x676e67/wreq/issues/34)) - ([837266d](https://github.com/0x676e67/wreq/commit/837266dcb80a0b8b5670675b851b580206ae78a1))\n- *(client)* Support client proxy settings ([#32](https://github.com/0x676e67/wreq/issues/32)) - ([30c0e2a](https://github.com/0x676e67/wreq/commit/30c0e2a6e4bfd1327b0ac1ad6f9e9c35e69dc632))\n- *(client)* Support impersonate webSocket - ([d3c6dbf](https://github.com/0x676e67/wreq/commit/d3c6dbf272e7b6778b37b10f13cd71df67c1e791))\n- *(client)* Optional enable permute_extensions - ([1aa849f](https://github.com/0x676e67/wreq/commit/1aa849fd4ad77c30815a9a9cd71838a0274f628f))\n- *(client)* Optional enable_ech_grease, only effective for Chrome - ([335e038](https://github.com/0x676e67/wreq/commit/335e03848228292cfc74d3dc90695bc68db8a7d4))\n- *(client)* Support configured IPv4 or IPv6 address (depending on host's preferences) before connection - ([b1f6203](https://github.com/0x676e67/wreq/commit/b1f620332640b57cc71a5cfbe718b1e81f93a1e5))\n- *(connect)* Reduce unnecessary connection overhead ([#62](https://github.com/0x676e67/wreq/issues/62)) - ([225ffb9](https://github.com/0x676e67/wreq/commit/225ffb9ef3834e78570f53b62e62e9c6df451d34))\n- *(connect)* Add PSK extension ([#52](https://github.com/0x676e67/wreq/issues/52)) - ([04a95ab](https://github.com/0x676e67/wreq/commit/04a95ab8d3f2feac429df28cb2ad258edd8ad45e))\n- *(connector)* Using session cache to delay initialization of connector ([#78](https://github.com/0x676e67/wreq/issues/78)) - ([8bdb826](https://github.com/0x676e67/wreq/commit/8bdb8264d1fe039d3366e78880005470c3fb98fb))\n- *(connector)* Enable encrypted client hello - ([4a577a1](https://github.com/0x676e67/wreq/commit/4a577a18a06b2fb930e1c2b13cd92ec0c6b05e24))\n- *(dns)* Export dns resolver `HickoryDnsResolver` ([#55](https://github.com/0x676e67/wreq/issues/55)) - ([6907f48](https://github.com/0x676e67/wreq/commit/6907f48ae16f538164c3550802a9a269eeeca2d1))\n- *(dns)* Optional `LookupIpStrategy` for `hickory_dns` ([#33](https://github.com/0x676e67/wreq/issues/33)) - ([7e6847a](https://github.com/0x676e67/wreq/commit/7e6847af02f8c8fb38ac0b38e80ca233b9b0d243))\n- *(dns)* Enable happy eyeballs when using hickory-dns ([#115](https://github.com/0x676e67/wreq/issues/115)) - ([e300a2d](https://github.com/0x676e67/wreq/commit/e300a2d314364a8cf4a269891c065f01a9f2b99b))\n- *(extension)* Set application protocol (ALPN) for http1 ([#104](https://github.com/0x676e67/wreq/issues/104)) - ([9ba260f](https://github.com/0x676e67/wreq/commit/9ba260f5dd0e818f9ec1acc176606ff4bd527d10))\n- *(feature)* Optional enable websocket - ([28270bf](https://github.com/0x676e67/wreq/commit/28270bf02cb26513c36f927497ff5ef898d373a9))\n- *(http2)* Exposing Http2Settings fields ([#75](https://github.com/0x676e67/wreq/issues/75)) - ([15ead8e](https://github.com/0x676e67/wreq/commit/15ead8ec5bd32e1bf47844bd6c87c463ace103db))\n- *(http2)* Add `http2_max_frame_size` settings ([#73](https://github.com/0x676e67/wreq/issues/73)) - ([9a69087](https://github.com/0x676e67/wreq/commit/9a6908756613fdd00b65895958998bbb1e73e493))\n- *(http2)* Add headers frame default priority ([#106](https://github.com/0x676e67/wreq/issues/106)) - ([e1927dc](https://github.com/0x676e67/wreq/commit/e1927dcb05af5db69221cf60b6f6156c25e5e97d))\n- *(http2)* Optimize http2 frame order settings ([#80](https://github.com/0x676e67/wreq/issues/80)) - ([e381f66](https://github.com/0x676e67/wreq/commit/e381f66b4e4289a867d1dd9ce1b7981b32a07f21))\n- *(impersonate)* Add Chrome 130 impersonate ([#65](https://github.com/0x676e67/wreq/issues/65)) - ([ebeba7d](https://github.com/0x676e67/wreq/commit/ebeba7de534dc1da6c772bafca3af0f208fc9c42))\n- *(impersonate)* Add `Safari iPad 18` impersonate ([#10](https://github.com/0x676e67/wreq/issues/10)) - ([304b1bd](https://github.com/0x676e67/wreq/commit/304b1bd5f1d9561b190f283be89a7f15ef587f53))\n- *(impersonate)* Add Safari 18 impersonate - ([acbcbf8](https://github.com/0x676e67/wreq/commit/acbcbf8c578fdb8aff077036ade0b12f403df2df))\n- *(impersonate)* Add Chrome 128 impersonate ([#130](https://github.com/0x676e67/wreq/issues/130)) - ([c787890](https://github.com/0x676e67/wreq/commit/c78789056b64e7f383f3a73b6913398b3d9857c4))\n- *(impersonate)* Add `Safari17_0` impersonate ([#71](https://github.com/0x676e67/wreq/issues/71)) - ([62f998e](https://github.com/0x676e67/wreq/commit/62f998e89766714def861e732308096dba8da1a4))\n- *(impersonate)* Reuse Safari cipher list in groups ([#65](https://github.com/0x676e67/wreq/issues/65)) - ([06efa36](https://github.com/0x676e67/wreq/commit/06efa366832a579bc389378d5af955ab0f226eed))\n- *(impersonate)* Export the Impersonate custom extension configuration ([#64](https://github.com/0x676e67/wreq/issues/64)) - ([9233546](https://github.com/0x676e67/wreq/commit/9233546c429ffa590e7e6143e07c7769cef45ef3))\n- *(impersonate)* Optimize reuse of impersonate configuration ([#61](https://github.com/0x676e67/wreq/issues/61)) - ([f369748](https://github.com/0x676e67/wreq/commit/f3697488aa0896bb68a8da496dc52242f9a98aa5))\n- *(impersonate)* Add Edge_127 impersonate ([#59](https://github.com/0x676e67/wreq/issues/59)) - ([c9f8861](https://github.com/0x676e67/wreq/commit/c9f8861d1e46e7526c6d8fac22126e74ed5987f0))\n- *(impersonate)* Optimize TLS connector context handle ([#37](https://github.com/0x676e67/wreq/issues/37)) - ([dc3aadc](https://github.com/0x676e67/wreq/commit/dc3aadc2b897404569f2d2b3c34312788834acb2))\n- *(impersonate)* Add Safari_17_5 impersonate - ([bb44019](https://github.com/0x676e67/wreq/commit/bb44019174143d9277c1743668f1a194d32e022e))\n- *(impersonate)* Add Safari_17_5 impersonate ([#28](https://github.com/0x676e67/wreq/issues/28)) - ([aa975df](https://github.com/0x676e67/wreq/commit/aa975df80a7515d629471dea3da9c1b50bfe9448))\n- *(impersonate)* Add Safari_IOS_17_4_1 impersonate - ([8be0f37](https://github.com/0x676e67/wreq/commit/8be0f37945360ef0e835afb351502a3385e03d39))\n- *(impersonate)* Add Safari_IOS_16_5 impersonate - ([ebfb961](https://github.com/0x676e67/wreq/commit/ebfb9616b7b3f0e9d89b5e320f6997414853f383))\n- *(impersonate)* Specification version number match - ([0c23082](https://github.com/0x676e67/wreq/commit/0c23082929fadf77dcc0dab6b668a541655c4994))\n- *(impersonate)* Add Chrome124 impersonate - ([f63d081](https://github.com/0x676e67/wreq/commit/f63d081b24b6820e13e63b867f3306387780e181))\n- *(impersonate)* Add Safari_17_4_1 impersonate - ([bd9f4c1](https://github.com/0x676e67/wreq/commit/bd9f4c129c24088261aff358943f74db1c27067a))\n- *(impersonate)* Add Safari_IOS_17_2 impersonate - ([e84fb19](https://github.com/0x676e67/wreq/commit/e84fb1970565701d6b838c3e80b0e9288a98122c))\n- *(impersonate)* Add Chrome123 impersonate - ([eb6744b](https://github.com/0x676e67/wreq/commit/eb6744b785424609cd1079d06164badf583199c8))\n- *(impersonate)* Improve fingerprint OkHttp fingerprint UserAgent - ([4ce6850](https://github.com/0x676e67/wreq/commit/4ce68504b73b3c57388e2e818cc81fcd3525c06a))\n- *(impersonate)* Optimize the overhead of parsing request headers at runtime - ([b0af7fa](https://github.com/0x676e67/wreq/commit/b0af7fa875310144a783298e39ad6c08a844efd2))\n- *(impersonate)* Add Edge122 impersonate - ([2e73827](https://github.com/0x676e67/wreq/commit/2e73827ac1c935e423741f620874f1c997c2cf97))\n- *(impersonate)* Optimize the overhead of parsing request headers at runtime - ([63b4dbf](https://github.com/0x676e67/wreq/commit/63b4dbf1b2db96476ab003077572c75321f01a40))\n- *(impersonate)* Add Safari17_2_1 impersonate - ([44f5933](https://github.com/0x676e67/wreq/commit/44f593391b3097e07ef8c64382f33451a07e201d))\n- *(impersonate)* Add Edge101 impersonate - ([5e66c0d](https://github.com/0x676e67/wreq/commit/5e66c0da426f21f73d42e2fbf79113bdbc039a8f))\n- *(impersonate)* Add Edge99 impersonate - ([ea51acf](https://github.com/0x676e67/wreq/commit/ea51acf5cee796f50fce1c39f9d0b3d52fc197c5))\n- *(impersonate)* Add Safari16_5 impersonate - ([9a919ff](https://github.com/0x676e67/wreq/commit/9a919ff72b6baf750949a35cf10e0eab961dee6b))\n- *(impersonate)* Add Chrome117 impersonate - ([0d0ee83](https://github.com/0x676e67/wreq/commit/0d0ee83421269bb8d5948984e9a02cc9d5f7cb44))\n- *(impersonate)* Improve safari fingerprint impersonate - ([0b62959](https://github.com/0x676e67/wreq/commit/0b62959fbf6ffd35d91d710e6ce8f3846bc6026d))\n- *(impersonate)* Add Chrome101 impersonate - ([02a0a17](https://github.com/0x676e67/wreq/commit/02a0a1704e3e015c8884d70b8c0404c19858c42f))\n- *(impersonate)* Add Chrome100 impersonate - ([2c1549b](https://github.com/0x676e67/wreq/commit/2c1549b1a5e6647fd9732c01ff4325616a6be941))\n- *(impersonate)* Add Chrome120 impersonate - ([fe63a86](https://github.com/0x676e67/wreq/commit/fe63a86290e0d7b397e1789de805eb89dc91e2d0))\n- *(impersonate)* Add Safari16 impersonate - ([4e4701f](https://github.com/0x676e67/wreq/commit/4e4701f3309fc34da40b1fbd65e9b4f944ee2a9f))\n- *(impersonate)* Add Safari15_6_1 impersonate - ([86e17a0](https://github.com/0x676e67/wreq/commit/86e17a05097cdd82dbaa90c1c53d7c82a7042a5a))\n- *(impersonate)* Add Safari 15_3/15_5 Impersonate - ([0af1670](https://github.com/0x676e67/wreq/commit/0af1670952a94b7fbd63222b89656e8ec1889e97))\n- *(impersonate)* Add Chrome v116 Impersonate - ([13971bd](https://github.com/0x676e67/wreq/commit/13971bdaf3d9c0c5c6c6e7455c0bd51a82cbcffd))\n- *(impersonate)* Add Chrome v119 Impersonate - ([1ce01d7](https://github.com/0x676e67/wreq/commit/1ce01d77263b478992a67aeb05245949386029fd))\n- *(impersonate)* Use the default locations of trusted certificates for verification. - ([6b20712](https://github.com/0x676e67/wreq/commit/6b207127ead62ab81aba9984e9e62e8042504233))\n- *(impersonate)* Remove max_concurrent_streams for v118 - ([fbcf65f](https://github.com/0x676e67/wreq/commit/fbcf65faa6277e6f9946f65145ce2c29581e3220))\n- *(impersonate)* Add Chrome v118 Impersonate - ([f9a097d](https://github.com/0x676e67/wreq/commit/f9a097dd5d5c8a9b070fa6e2d7629a40d1dd791b))\n- *(impersonate)* Add Safari 12 Impersonate - ([b5454f7](https://github.com/0x676e67/wreq/commit/b5454f7263849309544698eceefb2833419f669e))\n- *(impersonate)* Support more OkHttp fingerprints - ([43e00ed](https://github.com/0x676e67/wreq/commit/43e00ed237c4aafb9a6abfe3f22d74c000343647))\n- *(impersonate)* Add OkHttp5-alpha Impersonate - ([a172d90](https://github.com/0x676e67/wreq/commit/a172d90a1ac6952314403441b9d20f0e2eae748a))\n- *(impersonate)* Add OkHttp3 Impersonate - ([754f58d](https://github.com/0x676e67/wreq/commit/754f58dedaf67502b7b0364b8554ab629b8e0c09))\n- *(impersonate)* Support disable certs verification - ([cffe303](https://github.com/0x676e67/wreq/commit/cffe303cd1acfc99eab0ca43752c3c343d37a540))\n- *(multipart)* Adds support for manually setting size - ([2ca0e26](https://github.com/0x676e67/wreq/commit/2ca0e26cfa0f7ffd6061a453fe71b06d490c3ea9))\n- *(proxy)* Optional disable internal proxy cache ([#92](https://github.com/0x676e67/wreq/issues/92)) - ([45da58f](https://github.com/0x676e67/wreq/commit/45da58fcb047efebe583d736c8d5fed18742ec0f))\n- *(proxy)* Add support for SOCKS4 ([#27](https://github.com/0x676e67/wreq/issues/27)) - ([533059a](https://github.com/0x676e67/wreq/commit/533059a2023fef19bb7276bcc6bf58323353b09d))\n- *(proxy)* Use  instead of  for reading proxy settings on Windows ([#116](https://github.com/0x676e67/wreq/issues/116)) - ([4918e4d](https://github.com/0x676e67/wreq/commit/4918e4d6b813e4f9a7f2b9188a9a28d9a458e1f0))\n- *(proxy)* Adds NO_PROXY environment variable support ([#877](https://github.com/0x676e67/wreq/issues/877)) - ([6914091](https://github.com/0x676e67/wreq/commit/691409158273505eb43353c3936759df0ddd7b28))\n- *(redirect)* Expose method for accessing the previous and next request ([#148](https://github.com/0x676e67/wreq/issues/148)) - ([bdbc7f1](https://github.com/0x676e67/wreq/commit/bdbc7f1c40d0e3c64b946a3137f5e91530c2acf1))\n- *(request)* Add `with_host_header` method for populating Host header ([#142](https://github.com/0x676e67/wreq/issues/142)) - ([33b7e21](https://github.com/0x676e67/wreq/commit/33b7e21e7f2683a6a65e3d92321e07516b52e5af))\n- *(tls)* Dynamically configure WebSocket TLS connection alpn protos ([#104](https://github.com/0x676e67/wreq/issues/104)) - ([1918892](https://github.com/0x676e67/wreq/commit/1918892a1f9956274983a023c1572a80f1b514e6))\n- *(tls)* No additional WebSocket connector is needed for HTTP/1 client ([#81](https://github.com/0x676e67/wreq/issues/81)) - ([a4ffa85](https://github.com/0x676e67/wreq/commit/a4ffa85e1f350126c7b5a7f8b954588e9c6b6f63))\n- *(tls)* Update session ticket setting - ([0942894](https://github.com/0x676e67/wreq/commit/0942894ac9a9507d76150ec7c4a9800f2981be65))\n- *(tls)* Implement Debug for TlsSettings ([#80](https://github.com/0x676e67/wreq/issues/80)) - ([a88712a](https://github.com/0x676e67/wreq/commit/a88712a4448d8dd72d5f678cce731b7b4d3dc67c))\n- *(tls)* Add option `session_ticket` extension ([#79](https://github.com/0x676e67/wreq/issues/79)) - ([ea5c8f1](https://github.com/0x676e67/wreq/commit/ea5c8f1273abf6ff93f1aa5e3dc7869de29378b0))\n- *(tls)* Expose more custom TL settings ([#76](https://github.com/0x676e67/wreq/issues/76)) - ([ef880a7](https://github.com/0x676e67/wreq/commit/ef880a7feb30c124ee5833b22ae2ee0e6cd4503a))\n- *(tls)* Simplify TLS version settings ([#66](https://github.com/0x676e67/wreq/issues/66)) - ([c584368](https://github.com/0x676e67/wreq/commit/c58436853b2aeea690404ff95c389f7f37f8fc24))\n- *(tls)* Optional webpki root certificates feature ([#40](https://github.com/0x676e67/wreq/issues/40)) - ([d0de915](https://github.com/0x676e67/wreq/commit/d0de91513332e7ff64c4ef4347d701ee5bda0576))\n- *(tls)* Avoid repeated loading of native root CA ([#37](https://github.com/0x676e67/wreq/issues/37)) - ([2ad61c7](https://github.com/0x676e67/wreq/commit/2ad61c7619064b863e184f3bf18eb207ade1c1e7))\n- *(tls)* Optional built-in root certificates feature ([#36](https://github.com/0x676e67/wreq/issues/36)) - ([016bb5d](https://github.com/0x676e67/wreq/commit/016bb5d20e95d27e25022cfc5396ebf4484f0d2f))\n- *(tls)* Some `Chrome`/`Edge` versions have `ECH` enabled by default ([#9](https://github.com/0x676e67/wreq/issues/9)) - ([fecd878](https://github.com/0x676e67/wreq/commit/fecd87820d8014af9abad29befcb405a3ac8593f))\n- *(tls)* Some `Chrome`/`Edge` versions have `ECH` enabled by default ([#8](https://github.com/0x676e67/wreq/issues/8)) - ([a68fa56](https://github.com/0x676e67/wreq/commit/a68fa56c75a2c28efcfca324488c1340889b6674))\n- *(tls)* Enable permute extensions for `Chrome`/`Edge` 106 and above ([#6](https://github.com/0x676e67/wreq/issues/6)) - ([20e61f0](https://github.com/0x676e67/wreq/commit/20e61f081bbd8b6da9113714c7cec8aaf11aec22))\n- *(tls)* Add preconfigured TLS settings ([#118](https://github.com/0x676e67/wreq/issues/118)) - ([440bbdf](https://github.com/0x676e67/wreq/commit/440bbdf2eed0f47ad781715d4c41d11c8d782e6d))\n- *(tls)* Add option to configure TLS server name indication (SNI) ([#117](https://github.com/0x676e67/wreq/issues/117)) - ([9847c41](https://github.com/0x676e67/wreq/commit/9847c41e91a4d8cc229eba65df9fe83d98800d94))\n- *(tls)* Optimize tls configuration process ([#113](https://github.com/0x676e67/wreq/issues/113)) - ([87219ca](https://github.com/0x676e67/wreq/commit/87219ca951cb620e10cf1a61bdb41d573dd3b285))\n- *(tls)* Add `CA Certificate` settings ([#112](https://github.com/0x676e67/wreq/issues/112)) - ([0b39bb0](https://github.com/0x676e67/wreq/commit/0b39bb0c91ab403ab60ee32bd47c8b263c00cd17))\n- *(tls)* Reuse https connector layer ([#107](https://github.com/0x676e67/wreq/issues/107)) - ([5c32b6d](https://github.com/0x676e67/wreq/commit/5c32b6d24bdecace26e07e1e6e45ed17ea3dcd1b))\n- *(tls)* Add zstd support for chrome models and derivatives ([#93](https://github.com/0x676e67/wreq/issues/93)) - ([0204bb4](https://github.com/0x676e67/wreq/commit/0204bb4a25b3b56b6ef4f4b56a06e837873b4339))\n- *(websocket)* Add websocket handshake with a specified websocket key ([#50](https://github.com/0x676e67/wreq/issues/50)) - ([cf46944](https://github.com/0x676e67/wreq/commit/cf469447eebab3ab112c965f722e9b20314b8d0e))\n- *(websocket)* Improve websocket API usage ([#49](https://github.com/0x676e67/wreq/issues/49)) - ([72070aa](https://github.com/0x676e67/wreq/commit/72070aa29529d718ea19625fc8e43909dee1c5b7))\n- *(websocket)* Improve websocket upgrade ([#73](https://github.com/0x676e67/wreq/issues/73)) - ([348f04c](https://github.com/0x676e67/wreq/commit/348f04cd634b1b17267c2f0ff75851768590b6a4))\n- *(websocket)* Add upgrade with custom handshake key - ([b02396b](https://github.com/0x676e67/wreq/commit/b02396b64187cd770166c68a7556e56a2513ba06))\n- *(websocket)* Export header method - ([4ab0b0a](https://github.com/0x676e67/wreq/commit/4ab0b0a1664fb7969e9089a7f658ef36b01cad0c))\n- *(websocket)* Export header method - ([290d163](https://github.com/0x676e67/wreq/commit/290d16395fd3c9b1f9509bbec0e978655cb20b9f))\n- *(websocket)* Export `UpgradedRequestBuilder` - ([fac7251](https://github.com/0x676e67/wreq/commit/fac7251e922e802042bc6984928fa7d3c798e685))\n- *(websocket)* Support configuration websocket - ([319dd6a](https://github.com/0x676e67/wreq/commit/319dd6a9fc6f6f18295e276bcd21d6ed63c0c9ee))\n- Add loading of dynamic root certificate store ([#170](https://github.com/0x676e67/wreq/issues/170)) - ([44a5784](https://github.com/0x676e67/wreq/commit/44a578440a23f2c4bebabe137564c009f62b9049))\n- Add `Edge 131` impersonate ([#158](https://github.com/0x676e67/wreq/issues/158)) - ([9dd73ab](https://github.com/0x676e67/wreq/commit/9dd73ab6c9d9839f9ad1a6381f5f78d7ef400108))\n- Add `Safari 18.1.1` impersonate ([#157](https://github.com/0x676e67/wreq/issues/157)) - ([2c23ab0](https://github.com/0x676e67/wreq/commit/2c23ab002466f93c4dfcebaa2c4c7658ff18a7e1))\n- Add `Safari 18.2` impersonate ([#151](https://github.com/0x676e67/wreq/issues/151)) - ([638864c](https://github.com/0x676e67/wreq/commit/638864c78cdeff1c5d107ca12933a255f35cbedb))\n- Impl `IntoUrl` for `&Url` ([#146](https://github.com/0x676e67/wreq/issues/146)) - ([a1c2343](https://github.com/0x676e67/wreq/commit/a1c2343c76c811c55f6e54a81e7bbea8884c0e0e))\n- Implement IntoUrl for Cow<'a, str> ([#145](https://github.com/0x676e67/wreq/issues/145)) - ([6c0b14c](https://github.com/0x676e67/wreq/commit/6c0b14ca224c42ed3d57bfe1acf21017dfbb3acf))\n- Support changing cookie provider after initialization ([#114](https://github.com/0x676e67/wreq/issues/114)) - ([f1c5a07](https://github.com/0x676e67/wreq/commit/f1c5a07f2943ef0c4fc418d2e73ff558eafb7df1))\n- Support changing interface after initialization - ([61ed45a](https://github.com/0x676e67/wreq/commit/61ed45a8acfaf1a2a47b09937b79b45364c1d0b1))\n- Support changing interface after initialization ([#103](https://github.com/0x676e67/wreq/issues/103)) - ([81d79da](https://github.com/0x676e67/wreq/commit/81d79da1ef340386c5c10811a07b42b68af79d52))\n- Support changing redirect policy after initialization ([#102](https://github.com/0x676e67/wreq/issues/102)) - ([1c4bc66](https://github.com/0x676e67/wreq/commit/1c4bc6634e5a9ff12a6e6dc4a240c5e056882f29))\n- Support changing header order after initialization ([#101](https://github.com/0x676e67/wreq/issues/101)) - ([d5dd02b](https://github.com/0x676e67/wreq/commit/d5dd02bf96707cc83874cd25271ac94df9adfbf1))\n- Support changing impersonate fingerprint after initialization ([#100](https://github.com/0x676e67/wreq/issues/100)) - ([50393ee](https://github.com/0x676e67/wreq/commit/50393ee3051af81f971a0215ce841498bef6ff29))\n- Changing request headers after client initialization ([#97](https://github.com/0x676e67/wreq/issues/97)) - ([9954095](https://github.com/0x676e67/wreq/commit/99540955a55e9c89a2eb5bfc2cdd1cd64b5fc466))\n- Add `Chrome 131` impersonate ([#94](https://github.com/0x676e67/wreq/issues/94)) - ([a425faf](https://github.com/0x676e67/wreq/commit/a425faf4c7fc6251b0bd4720621d50bd4321e7b3))\n- Expose `hickory-resolver` as public API ([#93](https://github.com/0x676e67/wreq/issues/93)) - ([4bd5636](https://github.com/0x676e67/wreq/commit/4bd5636ab961023ee7d1d0acb3e359e3c665c733))\n- Expose `tokio-boring` as public API ([#88](https://github.com/0x676e67/wreq/issues/88)) - ([5b28f91](https://github.com/0x676e67/wreq/commit/5b28f91857480ed1536891003b14998c404f5b82))\n- Optionl BoringSSL PQ experimental feature ([#84](https://github.com/0x676e67/wreq/issues/84)) - ([3be7f0f](https://github.com/0x676e67/wreq/commit/3be7f0f10d3ca392734f201f24a3b0c901930a44))\n- Improve unnecessary header sorting storage overhead ([#44](https://github.com/0x676e67/wreq/issues/44)) - ([8e8f88e](https://github.com/0x676e67/wreq/commit/8e8f88e2426a190a92ad438ec6a1240126eb38ef))\n- Improve header sort ([#43](https://github.com/0x676e67/wreq/issues/43)) - ([d547d73](https://github.com/0x676e67/wreq/commit/d547d73f70784ccfd330f20f9f6c7486cb1752db))\n- Add file function to async::multipart ([#32](https://github.com/0x676e67/wreq/issues/32)) - ([432e44e](https://github.com/0x676e67/wreq/commit/432e44eb78adc2e38c33bd55c072bd88f8bdd0fd))\n- Add zstd support - ([d087d5c](https://github.com/0x676e67/wreq/commit/d087d5c02e1fdf8ce3022d2734880ec319e880d5))\n- Update safari impersonate - ([ee38133](https://github.com/0x676e67/wreq/commit/ee38133de5b91e9f82e6e860f4bf0ccc6095a908))\n- Enable client to be a service without ownership ([#1556](https://github.com/0x676e67/wreq/issues/1556)) - ([7a11d39](https://github.com/0x676e67/wreq/commit/7a11d397eb5990dc2346cf95ae0f186231d38388))\n- Add Response::text() - ([2fbc201](https://github.com/0x676e67/wreq/commit/2fbc20167d6656850069c6496c73969c78b0a8d2))\n- Set default headers - ([f4437ea](https://github.com/0x676e67/wreq/commit/f4437ea7b1c2a208fe07d17184d473b32b176ce4))\n\n### Bug Fixes\n\n- *(client)* Return an error instead of panic when parsing invalid URL ([#164](https://github.com/0x676e67/wreq/issues/164)) - ([0daacd1](https://github.com/0x676e67/wreq/commit/0daacd1d7c6fcd1e44aee84dfbdbf4d384acc948))\n- *(client)* Fix retry request via connection pool extension ([#138](https://github.com/0x676e67/wreq/issues/138)) - ([2971538](https://github.com/0x676e67/wreq/commit/2971538ebaaf0005ebc4b9d336d8243e7a613b23))\n- *(client)* Fix redirect via connection pool extension ([#137](https://github.com/0x676e67/wreq/issues/137)) - ([6c3a0cb](https://github.com/0x676e67/wreq/commit/6c3a0cbd45a539ebc17b38c0841d25be3ef00307))\n- *(client)* Fix redirect header sorting ([#135](https://github.com/0x676e67/wreq/issues/135)) - ([275baf6](https://github.com/0x676e67/wreq/commit/275baf63cecf609701bebd1d08c51cb1a27510cb))\n- *(client)* Fix http redirect via proxy ([#134](https://github.com/0x676e67/wreq/issues/134)) - ([c71dd91](https://github.com/0x676e67/wreq/commit/c71dd915511b2b354d3f795f2c29779aec8e237d))\n- *(client)* Fix `ClientBuilder` not `Send` + `Sync` ([#51](https://github.com/0x676e67/wreq/issues/51)) - ([c6312fc](https://github.com/0x676e67/wreq/commit/c6312fc6c8cbe6a11a67399e73d203b4f7091f8b))\n- *(client)* Optional setting of default accept ([#133](https://github.com/0x676e67/wreq/issues/133)) - ([fc4df7c](https://github.com/0x676e67/wreq/commit/fc4df7ced3d564d1f4b1475cfc9a68e808be342a))\n- *(client)* Fix the header sending order, set accept before request ([#131](https://github.com/0x676e67/wreq/issues/131)) - ([2beae56](https://github.com/0x676e67/wreq/commit/2beae56c0a0e9119e270864ca4efbbc0d557a917))\n- *(client)* Fix http version setting order ([#120](https://github.com/0x676e67/wreq/issues/120)) - ([60f3521](https://github.com/0x676e67/wreq/commit/60f352157a3483104170d10bc0f1367110b24d34))\n- *(client)* `headers_order` error - ([1801359](https://github.com/0x676e67/wreq/commit/1801359894ac277c9cb6fd4c48f1c459b3adab2f))\n- *(connect)* Unnecessarily panic when parsing invalid URI ([#166](https://github.com/0x676e67/wreq/issues/166)) - ([b42559b](https://github.com/0x676e67/wreq/commit/b42559beed13ab5fcfe881dc2cae36f932b54f14))\n- *(connector)* Initialize pool key extension when creating a client ([#126](https://github.com/0x676e67/wreq/issues/126)) - ([d6e3878](https://github.com/0x676e67/wreq/commit/d6e38788498a56e0f89162bb15210d3bd82e7ab1))\n- *(connector)* Fix TLS session failure when changing address ([#55](https://github.com/0x676e67/wreq/issues/55)) - ([ed39758](https://github.com/0x676e67/wreq/commit/ed39758a9155652b4f7fd63900c4eaf60590c92c))\n- *(extension)* Fix configure chrome new curves ([#67](https://github.com/0x676e67/wreq/issues/67)) - ([bd872e4](https://github.com/0x676e67/wreq/commit/bd872e4d221938f88d1a42b5816d62c8834f8427))\n- *(hickory-dns)* Fix initialization when `/etc/resolv.conf` is missing ([#163](https://github.com/0x676e67/wreq/issues/163)) - ([97ed7d6](https://github.com/0x676e67/wreq/commit/97ed7d63773f411e4bdea66aa6dfea6f536ac2c1))\n- *(http)* Compatible with some CDN servers, Http1 retains case by default when sending headers([#56](https://github.com/0x676e67/wreq/issues/56)) - ([f653f9c](https://github.com/0x676e67/wreq/commit/f653f9c6563d28abf4ebf96ce3882daaa03c84ed))\n- *(impersonate)* Fix safari header order ([#72](https://github.com/0x676e67/wreq/issues/72)) - ([f9be4a4](https://github.com/0x676e67/wreq/commit/f9be4a482c5fa63664f6b23a8f8139a48fa80c5d))\n- *(impersonate)* Fix `safari15_3`/`safari15_5` http2 fingerprint ([#70](https://github.com/0x676e67/wreq/issues/70)) - ([63ef44e](https://github.com/0x676e67/wreq/commit/63ef44e86ddad718547000e1352898fdaa7697c6))\n- *(impersonate)* Add Safari17_5 from string - ([1ce9a61](https://github.com/0x676e67/wreq/commit/1ce9a610df3afd35a235fde333aed0ded34dabb9))\n- *(impersonate)* Fix v116 impersonate - ([427f6a2](https://github.com/0x676e67/wreq/commit/427f6a22025934ae0e759840b5d7c16b4015d2fe))\n- *(proxy)* Make HTTP(S)_PROXY variables take precedence over ALL_PROXY ([#87](https://github.com/0x676e67/wreq/issues/87)) - ([e28b30a](https://github.com/0x676e67/wreq/commit/e28b30a3da8e4fcb075c07da6e677ffbb80ed681))\n- *(response)* `copy_to()` and `text()` return `reqwest::Result` - ([2c60511](https://github.com/0x676e67/wreq/commit/2c60511bcee3c633467b6be46f3d1e27af5f0905))\n- *(tls)* Fix SNI verification ([#87](https://github.com/0x676e67/wreq/issues/87)) - ([0cfb181](https://github.com/0x676e67/wreq/commit/0cfb181a895bbd32f8ad48b1eeb376172a077232))\n- *(tls)* Fix unsafe code block warnings ([#52](https://github.com/0x676e67/wreq/issues/52)) - ([127a1a9](https://github.com/0x676e67/wreq/commit/127a1a923b2203e31de41d171acd37e14aa5fb9f))\n- *(tls)* Fix CA certificate conditional compilation ([#41](https://github.com/0x676e67/wreq/issues/41)) - ([27b4119](https://github.com/0x676e67/wreq/commit/27b411915be3314338427186fac5760a615c4f11))\n- *(tls)* Fix default tls configuration to use websocket ([#30](https://github.com/0x676e67/wreq/issues/30)) - ([889867c](https://github.com/0x676e67/wreq/commit/889867c6194a7fb812d1a3ec957e30f0757bfcc1))\n- *(tls)* Fix default TLS SNI context configuration conflict ([#13](https://github.com/0x676e67/wreq/issues/13)) - ([94db0fc](https://github.com/0x676e67/wreq/commit/94db0fca006ca65d0d13f04eb23237512113937b))\n- *(tls)* Fix setting config TLS version - ([6544c11](https://github.com/0x676e67/wreq/commit/6544c111048bcf0513cd7a6ba8ba148f65502ac9))\n- *(tls)* Fix optional config TLS size version - ([bb16145](https://github.com/0x676e67/wreq/commit/bb16145fa799f3b078ed50a695cbd27a02f0457e))\n- *(websocket)* Fix websocket upgrade builder ([#134](https://github.com/0x676e67/wreq/issues/134)) - ([111d928](https://github.com/0x676e67/wreq/commit/111d92877982dded4dd2b5c63318dff43631c967))\n- Improve TLS connector creation, fix client creation taking too long ([#107](https://github.com/0x676e67/wreq/issues/107)) - ([26f254c](https://github.com/0x676e67/wreq/commit/26f254c5b805ddaf6cf423b55aad5e74760796da))\n- Fix decompressing deflate with zlib specific wrapper fails ([#99](https://github.com/0x676e67/wreq/issues/99)) - ([c865b9c](https://github.com/0x676e67/wreq/commit/c865b9cf5dad766c9da35e85757a0a26e2f3efbf))\n- Update Chrome version from 129 to 130 ([#68](https://github.com/0x676e67/wreq/issues/68)) - ([f27704a](https://github.com/0x676e67/wreq/commit/f27704a876dd28b929a534e212b53218141a789e))\n- Fix incorrect Accept-Encoding header combinations in Accepts::as_str ([#89](https://github.com/0x676e67/wreq/issues/89)) - ([1373a01](https://github.com/0x676e67/wreq/commit/1373a018b3c374a28e37aed8a3da9fd563a8f665))\n- Set nodelay correctly to handle when a tls feature is enabled but connection is to an http server ([#2062](https://github.com/0x676e67/wreq/issues/2062)) - ([1485ce6](https://github.com/0x676e67/wreq/commit/1485ce6f754413a81a9673252349f953c1d86e82))\n- Split connect timeout for multiple IPs ([#1940](https://github.com/0x676e67/wreq/issues/1940)) - ([2a881fb](https://github.com/0x676e67/wreq/commit/2a881fb50489b21aa6c879eea0cb339755240fb5))\n- Strip BOM in `Response::text_with_charset` ([#1898](https://github.com/0x676e67/wreq/issues/1898)) - ([3abcc7c](https://github.com/0x676e67/wreq/commit/3abcc7c4f537c16ad9937f8cc60fb23cb506ac85))\n- Strip BOM in Response::text_with_charset - ([d820ad2](https://github.com/0x676e67/wreq/commit/d820ad237feade4527743067c8f6fc3e19972c7b))\n- Wasm client: pass response header to builder by reference ([#1350](https://github.com/0x676e67/wreq/issues/1350)) - ([c9217d8](https://github.com/0x676e67/wreq/commit/c9217d8d1bc6c65605ad4909cb45a1cb72b778a0))\n- Respect https_only option when redirecting ([#1313](https://github.com/0x676e67/wreq/issues/1313)) - ([bdc57be](https://github.com/0x676e67/wreq/commit/bdc57beabbf3fe77c2196d17ef3f7640d37b81cf))\n- Upgrade to http2 if the server reports that it supports it ([#1166](https://github.com/0x676e67/wreq/issues/1166)) - ([2940740](https://github.com/0x676e67/wreq/commit/2940740493ce55e8baee44a47fd759d9e3aa3187))\n- Tests::support::server - ([07d6bca](https://github.com/0x676e67/wreq/commit/07d6bca08f0ef8deb752eb17e87ecca1e2c441ae))\n\n### Refactor\n\n- *(client)* Removed confusing way to enable `hickory-dns` ([#34](https://github.com/0x676e67/wreq/issues/34)) - ([769d797](https://github.com/0x676e67/wreq/commit/769d7979f583ac435d808a8831c806638e009c7a))\n- *(client)* Turn off default redirect ([#4](https://github.com/0x676e67/wreq/issues/4)) - ([2b80121](https://github.com/0x676e67/wreq/commit/2b80121e69cb15f74885516429406df457eb1c56))\n- *(client)* Simplify Headers Frame priority settings ([#126](https://github.com/0x676e67/wreq/issues/126)) - ([3449c2f](https://github.com/0x676e67/wreq/commit/3449c2f54ed4fcc9d94bfc484b2b739dd892e474))\n- *(client)* Set_proxies accepts an slice of references ([#119](https://github.com/0x676e67/wreq/issues/119)) - ([a25ada0](https://github.com/0x676e67/wreq/commit/a25ada0a0cf297ab43b48fd7915d3c24f740028d))\n- *(hickory-dns)* Async `new_resolver` ([#84](https://github.com/0x676e67/wreq/issues/84)) - ([73ff128](https://github.com/0x676e67/wreq/commit/73ff1286ac383372f84f5a37e653c237032c2192))\n- *(impersonate)* Simplify Impersonate enum parsing with macro ([#71](https://github.com/0x676e67/wreq/issues/71)) - ([b3efecf](https://github.com/0x676e67/wreq/commit/b3efecf6221510b6ac9d55a0b651f321d0557635))\n- *(impersonate)* Reuse code - ([dbc6d66](https://github.com/0x676e67/wreq/commit/dbc6d662b2feb33231c1e37b780c6645761d23bb))\n- *(impersonate)* Refactor unnecessary settings - ([716a190](https://github.com/0x676e67/wreq/commit/716a190617dbe73b6fd771e05748179221cdaac6))\n- *(impersonate)* Revert to SslVerifyMode::NONE - ([f921d58](https://github.com/0x676e67/wreq/commit/f921d5814ac12027fdf5c05af0ebe5518348ff60))\n- *(impersonate)* Update SSL verify mode - ([3ca497c](https://github.com/0x676e67/wreq/commit/3ca497cc74f7f846e7ca25068dbcf049e523c31e))\n- *(proxy)* Remove internal proxy sys cache ([#26](https://github.com/0x676e67/wreq/issues/26)) - ([714b48f](https://github.com/0x676e67/wreq/commit/714b48fbe3d070126054ef96b58f8b85b208db7f))\n- *(tls)* Simplified TLS version mappr ([#70](https://github.com/0x676e67/wreq/issues/70)) - ([2e2ebf9](https://github.com/0x676e67/wreq/commit/2e2ebf9a7bec8492de1d01d2b19bc5526e4164ac))\n- *(tls)* Refactor internal `TLS`/`HTTP2` module ([#69](https://github.com/0x676e67/wreq/issues/69)) - ([7f10e51](https://github.com/0x676e67/wreq/commit/7f10e519f1ae74cca2a59bb88b6bba312fea029f))\n- *(tls)* Simplify TLS custom settings ([#46](https://github.com/0x676e67/wreq/issues/46)) - ([499fe4a](https://github.com/0x676e67/wreq/commit/499fe4aa3486d9dcc4292b5cf9153b1c987dd2f4))\n- *(tls)* Public and reuse tls/http2 templates ([#42](https://github.com/0x676e67/wreq/issues/42)) - ([e082581](https://github.com/0x676e67/wreq/commit/e08258124fba80dc9d6f2a1f4d1804c9685a9fb6))\n- *(tls)* Simplify TLS/HTTP2 configuration ([#7](https://github.com/0x676e67/wreq/issues/7)) - ([c44d01f](https://github.com/0x676e67/wreq/commit/c44d01f42350e0bb736a7e360147fb4763559551))\n- *(tls)* Simplify TLS configuration ([#5](https://github.com/0x676e67/wreq/issues/5)) - ([56840ab](https://github.com/0x676e67/wreq/commit/56840ab4652f809a429325919aeedec9d5010634))\n- *(tls)* Refactored changes and refactored TLS build - ([c1b1e09](https://github.com/0x676e67/wreq/commit/c1b1e097f6e690000a35df16eb537029f1253c57))\n- *(tls)* Refactor TLS connection layer configuration ([#111](https://github.com/0x676e67/wreq/issues/111)) - ([db4e566](https://github.com/0x676e67/wreq/commit/db4e566f9c494c7905b8e9022b68426d0b96e4ae))\n- *(tls)* Simplify TLS connector configuration ([#103](https://github.com/0x676e67/wreq/issues/103)) - ([322d030](https://github.com/0x676e67/wreq/commit/322d030968a0106220be5c0e6c4641680ddba3cd))\n- *(tls)* Major module changes ([#91](https://github.com/0x676e67/wreq/issues/91)) - ([76114b0](https://github.com/0x676e67/wreq/commit/76114b0a6674b0afd2d8cb5927fe2d6f58705458))\n- *(websocket)* Major changes, abstract WebSocket message structure ([#94](https://github.com/0x676e67/wreq/issues/94)) - ([266f0cb](https://github.com/0x676e67/wreq/commit/266f0cbf72c40262912be32c0a144a185fcac50e))\n- Unified naming API ([#150](https://github.com/0x676e67/wreq/issues/150)) - ([da5e052](https://github.com/0x676e67/wreq/commit/da5e052c9f31fb908c30c21953ee01c6344b68fe))\n- Do not create default request headers unless necessary ([#120](https://github.com/0x676e67/wreq/issues/120)) - ([1d40d7e](https://github.com/0x676e67/wreq/commit/1d40d7e576eb796ce9d74815ab9937ca1cb17640))\n- Reduce `unsafe` scope for improved safety and readability ([#115](https://github.com/0x676e67/wreq/issues/115)) - ([79e6cb8](https://github.com/0x676e67/wreq/commit/79e6cb8b055d71b35d630ef11908b3fb8707e2e7))\n- Delete unnecessary clone ([#98](https://github.com/0x676e67/wreq/issues/98)) - ([c5c6004](https://github.com/0x676e67/wreq/commit/c5c6004785c1c14721c6643af67fcdc728757f68))\n- Integrate tls/http2 unified configuration module ([#77](https://github.com/0x676e67/wreq/issues/77)) - ([cef5650](https://github.com/0x676e67/wreq/commit/cef5650fa3fe208a97fddf8fd27715893770a020))\n- Normalize DNS module exports ([#64](https://github.com/0x676e67/wreq/issues/64)) - ([b0a1ba6](https://github.com/0x676e67/wreq/commit/b0a1ba6f6de1964c31145a3a23ec8175cf195925))\n- Refactor custom root CA certificate loading source ([#38](https://github.com/0x676e67/wreq/issues/38)) - ([cfd3603](https://github.com/0x676e67/wreq/commit/cfd36030927c617c38d0bfd0fd6e09c4112d4a45))\n- Rename the `client` module to `http` - ([5568b31](https://github.com/0x676e67/wreq/commit/5568b31cb3df741bb1f8f507f2b7858b00395263))\n- Enabled `accept-encoding` will be determined by the `feature` ([#95](https://github.com/0x676e67/wreq/issues/95)) - ([85de77b](https://github.com/0x676e67/wreq/commit/85de77b1eca6272dfba13d61f8392563b561c835))\n- Enabling `accept-encoding` will be determined by the feature - ([4bf9465](https://github.com/0x676e67/wreq/commit/4bf94652db2b776a0df366d9f2e3c8d44daf7c52))\n- Blocking feature doesn't need multi-threaded tokio runtime ([#90](https://github.com/0x676e67/wreq/issues/90)) - ([7ab0c67](https://github.com/0x676e67/wreq/commit/7ab0c678d7ffc6f23b4b039db702e380492f4df8))\n- Change Debug of Error to output url as str ([#88](https://github.com/0x676e67/wreq/issues/88)) - ([b9b684b](https://github.com/0x676e67/wreq/commit/b9b684b2212878ef84a5c18da3f5122bcd74ecab))\n- Remove unused crates - ([9fb269e](https://github.com/0x676e67/wreq/commit/9fb269e5f38a0e200000db2ac0a3786d859575f2))\n- Remove unused crates ([#54](https://github.com/0x676e67/wreq/issues/54)) - ([c0c273d](https://github.com/0x676e67/wreq/commit/c0c273d4e648a0441ab9efee63927ff263e9f27a))\n- Migrate trust-dns to hickory-dns - ([ae7d775](https://github.com/0x676e67/wreq/commit/ae7d7753f005120182e9a00486beb7f196b8c5fd))\n- Migrate trust-dns to hickory-dns - ([712600a](https://github.com/0x676e67/wreq/commit/712600a2e11cf21e850183391d1e77caedc297bd))\n- Disable ssl verify - ([5680bb0](https://github.com/0x676e67/wreq/commit/5680bb0a290d6556ba2f358293dca31824c68af8))\n\n### Documentation\n\n- Improve `TLS`/`HTTP2` custom configuration documentation ([#67](https://github.com/0x676e67/wreq/issues/67)) - ([8a72439](https://github.com/0x676e67/wreq/commit/8a72439a3c9aa2c8c06492d8928330bac518d6e3))\n- Update docs ([#54](https://github.com/0x676e67/wreq/issues/54)) - ([a010145](https://github.com/0x676e67/wreq/commit/a01014519b499621fec2fb03a7e9d3c333c1855d))\n- Update docs ([#82](https://github.com/0x676e67/wreq/issues/82)) - ([41816f8](https://github.com/0x676e67/wreq/commit/41816f8b26e42be0166c8df9cb6492c71be77056))\n- Fix docs build ([#81](https://github.com/0x676e67/wreq/issues/81)) - ([2045cea](https://github.com/0x676e67/wreq/commit/2045cea5e05abcfeb7c91d94a1e0497eb22bfe19))\n- Add cfg notes about http3 builder methods ([#2070](https://github.com/0x676e67/wreq/issues/2070)) - ([c65dd7f](https://github.com/0x676e67/wreq/commit/c65dd7f783d8aae8ee47e751353d1befeb9dea20))\n- Remove redundant link targets ([#2019](https://github.com/0x676e67/wreq/issues/2019)) - ([50dbaf3](https://github.com/0x676e67/wreq/commit/50dbaf391087cfa951accc765126b4f5d017d8a3))\n- Fix building on docs.rs ([#1789](https://github.com/0x676e67/wreq/issues/1789)) - ([7fdd014](https://github.com/0x676e67/wreq/commit/7fdd014d46d9bf07555a2321166f3029e9a25ac8))\n- Fix wording on main docs page ([#1765](https://github.com/0x676e67/wreq/issues/1765)) - ([673449a](https://github.com/0x676e67/wreq/commit/673449aa823394d224815b8cc168e059e4c4ebe1))\n- Fix some typos ([#1562](https://github.com/0x676e67/wreq/issues/1562)) - ([81fc85a](https://github.com/0x676e67/wreq/commit/81fc85a68949bd0ff73cfd9f292393b5c5ed42ed))\n- Fix broken doc comment example. ([#1584](https://github.com/0x676e67/wreq/issues/1584)) - ([e9ba0a9](https://github.com/0x676e67/wreq/commit/e9ba0a9dc79f63c3655f334df23b50b9a841e326))\n- Fix some typos ([#1531](https://github.com/0x676e67/wreq/issues/1531)) - ([6ca5f3e](https://github.com/0x676e67/wreq/commit/6ca5f3e50c979909b786a4f1e2c73611164254c7))\n- Provide basic auth example ([#1362](https://github.com/0x676e67/wreq/issues/1362)) - ([be8ab7b](https://github.com/0x676e67/wreq/commit/be8ab7b951610cbc85764198943ab053e8608454))\n- Fix some typos ([#1346](https://github.com/0x676e67/wreq/issues/1346)) - ([597833d](https://github.com/0x676e67/wreq/commit/597833d906f2453a6976e6ed6ed71af91c534382))\n- Adds amplifying note about private key formats ([#1335](https://github.com/0x676e67/wreq/issues/1335)) - ([eb9e343](https://github.com/0x676e67/wreq/commit/eb9e343142b7fe7392408141dab7145cb4a30ba2))\n- Build wasm32-unknown-unknown docs ([#998](https://github.com/0x676e67/wreq/issues/998)) - ([cff487f](https://github.com/0x676e67/wreq/commit/cff487ff58630cf0ac59f3e46cbf20cf50a28b3f))\n- Make encoding_rs link clickable ([#674](https://github.com/0x676e67/wreq/issues/674)) - ([a9dd94a](https://github.com/0x676e67/wreq/commit/a9dd94a99fdb30a77992ea0afa552f266efbd8a3))\n\n### Styling\n\n- *(connect)* Replace all non-refutable if let patterns with let statements ([#44](https://github.com/0x676e67/wreq/issues/44)) - ([ec598d8](https://github.com/0x676e67/wreq/commit/ec598d8b9262680b570ac15fff1623a0e050edb8))\n- *(impersonate)* Remove dead code ([#51](https://github.com/0x676e67/wreq/issues/51)) - ([61c6055](https://github.com/0x676e67/wreq/commit/61c605531881215c8ab95f8eda557969c7d6d6fb))\n- *(tls)* Remove unused closure - ([a39ba21](https://github.com/0x676e67/wreq/commit/a39ba2198e5a7144b60567f9cb815c1fc7d85d2e))\n\n### Testing\n\n- Fix test_badssl_no_built_in_roots - ([427ff74](https://github.com/0x676e67/wreq/commit/427ff74adf2266413413b2ab4da6c5669efadf33))\n- Add more badssl tests for rustls - ([8027a28](https://github.com/0x676e67/wreq/commit/8027a2894af496ce25c7f2a035e265cc8bf9bf59))\n- Response::text() - ([33c7ce4](https://github.com/0x676e67/wreq/commit/33c7ce4ce2f65587ea60c011151a5605887e97f3))\n- Add tests for setting default headers - ([2bd558d](https://github.com/0x676e67/wreq/commit/2bd558d8c74a03622dbb02d194440aa13c0a9048))\n- Use verbose output - ([f5b4dd4](https://github.com/0x676e67/wreq/commit/f5b4dd4123f4f2098895be3833e81cdf9b5a8460))\n- Fixed up issue with reading a Body and finished RequestBuilder tests - ([59ba7cf](https://github.com/0x676e67/wreq/commit/59ba7cf23b48c94c7223cf0f2047e9e7b1e0a275))\n- Added some trivial tests for the RequestBuilder - ([980488f](https://github.com/0x676e67/wreq/commit/980488f918a70f24a859f3776f4b4dd947c3758e))\n\n### Miscellaneous Tasks\n\n- *(client)* Client `set_redirect_policy` rename to `set_redirect` ([#149](https://github.com/0x676e67/wreq/issues/149)) - ([0ed4a76](https://github.com/0x676e67/wreq/commit/0ed4a76067b87568a33a110be6d742b946875ede))\n- *(client)* Accept request header is appended by default ([#125](https://github.com/0x676e67/wreq/issues/125)) - ([06ccdc7](https://github.com/0x676e67/wreq/commit/06ccdc70c685ef5a8817fcbef177566ec7be50b4))\n- *(client)* Impersonate does not clone request headers unless necessary - ([2043388](https://github.com/0x676e67/wreq/commit/204338837c20ac0bffd585b4f7238b5b58650254))\n- *(docs)* Fix missing link for 'blocking' - ([4574019](https://github.com/0x676e67/wreq/commit/457401904596260c712c0b9f4f27e6d47b4a2141))\n- *(request)* Avoid panic when adding host header - ([80e4871](https://github.com/0x676e67/wreq/commit/80e48718e634dd6696688d415e858c46acffbc81))\n- *(request)* Delete WASM legacy API ([#141](https://github.com/0x676e67/wreq/issues/141)) - ([ddcd980](https://github.com/0x676e67/wreq/commit/ddcd9806d49dbcf47e55389bf5dc97871d566377))\n- *(tls)* Rename `http_version_pref` to `alpn_protos` ([#131](https://github.com/0x676e67/wreq/issues/131)) - ([4b7edba](https://github.com/0x676e67/wreq/commit/4b7edba4a792504382567d18451074a249b0a2bc))\n- *(tls)* Export extension as public API - ([05a6a6f](https://github.com/0x676e67/wreq/commit/05a6a6fec7390736d71d818c1b8aa20f96d3e95f))\n- *(tls)* Remove redundant settings ([#109](https://github.com/0x676e67/wreq/issues/109)) - ([ecda80c](https://github.com/0x676e67/wreq/commit/ecda80cf576de73e854ed7e5efca3843fdb6d062))\n- Move `ImpersonateSettings` to implement location - ([99ea68b](https://github.com/0x676e67/wreq/commit/99ea68b161ed7ac8e3b384464cb270034b831bce))\n- Simplify root certificate load ([#169](https://github.com/0x676e67/wreq/issues/169)) - ([68e9f26](https://github.com/0x676e67/wreq/commit/68e9f26a946c781bd1c06fd67dbfb3c13894350d))\n- Simplify root certificate load - ([566f2fb](https://github.com/0x676e67/wreq/commit/566f2fb7a4a0e5cb7d1899db5257e509d5d9f142))\n- To avoid ambiguity, `ca_cert_store` is renamed to `root_certs_store` ([#162](https://github.com/0x676e67/wreq/issues/162)) - ([b76ef15](https://github.com/0x676e67/wreq/commit/b76ef15e2fdc206cd949fd44e7a147ee52e91ac3))\n- Update macro export scope - ([3115132](https://github.com/0x676e67/wreq/commit/3115132eee19a7e303adaadce87c8740a222f167))\n- Update impersonate template - ([82d7b93](https://github.com/0x676e67/wreq/commit/82d7b9331ddc24d546115a54ac594f84dc49f137))\n- Macro static creation of impersonate template ([#156](https://github.com/0x676e67/wreq/issues/156)) - ([7383d66](https://github.com/0x676e67/wreq/commit/7383d6630a20dd104825bdb6a9fed80482ee3450))\n- Do not pre-append `content-length` in non-header sorting state ([#152](https://github.com/0x676e67/wreq/issues/152)) - ([075f973](https://github.com/0x676e67/wreq/commit/075f97304ffb8f3889dee5a22c4220818afecbb4))\n- Simplify the impersonate template - ([92f52d1](https://github.com/0x676e67/wreq/commit/92f52d1e596d69f6b8690704ab74ac2def7740b3))\n- Fix typo - ([650256c](https://github.com/0x676e67/wreq/commit/650256c42aa6cf9582e83e8d750bb1b50ca5d134))\n- Introduce macro for conditional header initialization ([#127](https://github.com/0x676e67/wreq/issues/127)) - ([b8a2e48](https://github.com/0x676e67/wreq/commit/b8a2e488796c509901f90f32c4549c78c3bcdc49))\n- Refactor struct fields to use Cow<'static, T> for better efficiency ([#124](https://github.com/0x676e67/wreq/issues/124)) - ([8b79c5b](https://github.com/0x676e67/wreq/commit/8b79c5b4182e6e4e861b37b6db76f3a9c4a4a81b))\n- Cache template request headers ([#121](https://github.com/0x676e67/wreq/issues/121)) - ([3b65d8f](https://github.com/0x676e67/wreq/commit/3b65d8faca44fc6d241e59140db92238c6eef49b))\n- Update - ([7d1bbbc](https://github.com/0x676e67/wreq/commit/7d1bbbc8c97247be5d43957ed68438465f311388))\n- Simplify impersonate template - ([871a7af](https://github.com/0x676e67/wreq/commit/871a7af7074b7dbe1bfffa93445d98da3a3fc08e))\n- Simplify pre-configured TLS settings - ([2ca512e](https://github.com/0x676e67/wreq/commit/2ca512ee0b793ffcce22927c2e3fbb91e36ec05a))\n- Remove tunnel proxy user agent setting ([#116](https://github.com/0x676e67/wreq/issues/116)) - ([04fa9fa](https://github.com/0x676e67/wreq/commit/04fa9fafb5b6bc6401fe738109e58f7e0473fc11))\n- Reuse redirect policies whenever possible - ([49bb717](https://github.com/0x676e67/wreq/commit/49bb7174a2b84d88855805d1dcea5966e6133cdb))\n- Inline some hot code - ([a07cf10](https://github.com/0x676e67/wreq/commit/a07cf105fb84a97264d4af71fd7f5962790b6f48))\n- Use custom connector builder - ([6c51bd1](https://github.com/0x676e67/wreq/commit/6c51bd1d4b8592181a2fa59164d054b96fbe41d6))\n- Disable dynamic distribution loading of connector builder ([#113](https://github.com/0x676e67/wreq/issues/113)) - ([6814489](https://github.com/0x676e67/wreq/commit/6814489773f67c84cc83f316e98ab6da38913b5b))\n- Disable dynamic distribution loading of certificates ([#112](https://github.com/0x676e67/wreq/issues/112)) - ([75095ba](https://github.com/0x676e67/wreq/commit/75095ba8d3085bfd52bb92e581ec76ec7b923bb2))\n- Undo the dynamic distribution configuration headers ([#111](https://github.com/0x676e67/wreq/issues/111)) - ([a7c9376](https://github.com/0x676e67/wreq/commit/a7c937603966bae1b811d3cb9b67f3958279e579))\n- Cargo clippy --fix ([#106](https://github.com/0x676e67/wreq/issues/106)) - ([065f294](https://github.com/0x676e67/wreq/commit/065f294a1b67ac9bb979966f955500e4f93a4098))\n- Remove unnecessary tls feature - ([7f70c48](https://github.com/0x676e67/wreq/commit/7f70c48f63d27409b509dc620b4451e061548ef2))\n- 1.80 as MSRV ([#74](https://github.com/0x676e67/wreq/issues/74)) - ([9814951](https://github.com/0x676e67/wreq/commit/98149512c90cc51d51d14cf3e0cfe8d26899b49d))\n- 1.70 as MSRV - ([34bc71d](https://github.com/0x676e67/wreq/commit/34bc71d13ccab181869ae377ff0d3c8ae0779f64))\n- 1.70 as MSRV ([#53](https://github.com/0x676e67/wreq/issues/53)) - ([29adc92](https://github.com/0x676e67/wreq/commit/29adc923bd197f8d92cf03d964d689c7b01e27de))\n- A few simple cleanups/lints ([#1849](https://github.com/0x676e67/wreq/issues/1849)) - ([280af15](https://github.com/0x676e67/wreq/commit/280af156459845a6b4535aa9045979861b67c310))\n- Update changelog for 0.11.15 - ([bf7ff55](https://github.com/0x676e67/wreq/commit/bf7ff556494bc5e35164c325faad49e1cdd3c8e9))\n- Fix appveyor build for backtrace-sys dependency ([#526](https://github.com/0x676e67/wreq/issues/526)) - ([2a64140](https://github.com/0x676e67/wreq/commit/2a64140de82d93ca2b3a804c07f16e7a5bf66fa1))\n- Update gitignore - ([3bc907f](https://github.com/0x676e67/wreq/commit/3bc907f7deaeff0a9f9e02c7c3f9e4c4495aeafe))\n\n### Revert\n\n- *(client)* Remove use of unused TLS Server Name Indication - ([a935f99](https://github.com/0x676e67/wreq/commit/a935f992194542b3dd4b6204963eeb3b53d5f8d0))\n- *(impersonate)* Revert Edge122 configure new curves ([#66](https://github.com/0x676e67/wreq/issues/66)) - ([ba5cd48](https://github.com/0x676e67/wreq/commit/ba5cd48a3982b370924c06c82bf26e93191a146b))\n- *(impersonate)* Remove chrome99 impersonate ([#38](https://github.com/0x676e67/wreq/issues/38)) - ([8f9ebdd](https://github.com/0x676e67/wreq/commit/8f9ebdd608ac4f8a21bcc59fce6c8710dd03d757))\n- *(tls)* Revert tls_built_in_root_certs option ([#105](https://github.com/0x676e67/wreq/issues/105)) - ([d0cda0b](https://github.com/0x676e67/wreq/commit/d0cda0be402797c265e209a7b9fee55db89a2faa))\n- Remove `proxies_maybe_http_auth` state - ([52791a6](https://github.com/0x676e67/wreq/commit/52791a69dba7d61620257c0736c809683e1b3626))\n\n### Body\n\n- Don't call poll_ready on tx when 0 bytes remaining. ([#479](https://github.com/0x676e67/wreq/issues/479)) - ([d62f8c2](https://github.com/0x676e67/wreq/commit/d62f8c2bbd39d6cf5562c2f3c0aad32bad81d331))\n\n### CI\n\n- Enable dependabot for GitHub Action Workflow ([#1831](https://github.com/0x676e67/wreq/issues/1831)) - ([eca2a2f](https://github.com/0x676e67/wreq/commit/eca2a2f23f97409e6828e171b13d0eb3bc34465c))\n- Make a single final job that depends on all others ([#1291](https://github.com/0x676e67/wreq/issues/1291)) - ([b9cf2db](https://github.com/0x676e67/wreq/commit/b9cf2db69756cde5e3091cc6a06cff1deb2e3764))\n- Check documentation ([#1246](https://github.com/0x676e67/wreq/issues/1246)) - ([9293cd2](https://github.com/0x676e67/wreq/commit/9293cd206143d48bb68033b7de835ca2c6cdeea3))\n\n### Doc\n\n- `stream` feature is needed for `wrap_stream` and `From<File>` for `Body` ([#1456](https://github.com/0x676e67/wreq/issues/1456)) - ([9339c54](https://github.com/0x676e67/wreq/commit/9339c543235ca09664e388284811746020350b4b))\n\n### Error\n\n- Add functions to check more error types. ([#945](https://github.com/0x676e67/wreq/issues/945)) - ([668e89b](https://github.com/0x676e67/wreq/commit/668e89b78ae1e7a0e88fb7f99649b7c907d2f0da))\n\n### Examples\n\n- Allow passing URL via CLI - ([7388b67](https://github.com/0x676e67/wreq/commit/7388b676df8431b63edc337ce8dc3032953fe07e))\n\n### Feature\n\n- Auto detect MacOS proxy settings ([#1955](https://github.com/0x676e67/wreq/issues/1955)) - ([70d100c](https://github.com/0x676e67/wreq/commit/70d100c1b81dc8856e7cfb7b31b682c2028ca877))\n\n### From<http\n\n- :Response> for Response ([#360](https://github.com/0x676e67/wreq/issues/360)) - ([4857a59](https://github.com/0x676e67/wreq/commit/4857a5917dd5445a3f5ed04edcff01b95eda7823))\n\n### Impersonate\n\n- Bugfix `chrome_123`, `chrome_124` headers - ([429bb1d](https://github.com/0x676e67/wreq/commit/429bb1d763d5a4c37a0104efe7c03ecdc6434071))\n\n### Lint\n\n- Fix unused `Identity` if only using `default-tls` ([#1164](https://github.com/0x676e67/wreq/issues/1164)) - ([287a6d1](https://github.com/0x676e67/wreq/commit/287a6d18528418381dbb28e7bd6728b1ac24b5d3))\n\n### Response.copy_to\n\n- Fix docs markup - ([4aa34bb](https://github.com/0x676e67/wreq/commit/4aa34bb5916a70e8216e5198cea278d42967d74b))\n\n### WASM\n\n- Add `try_clone` implementations to `Request` and `RequestBuilder` ([#1286](https://github.com/0x676e67/wreq/issues/1286)) - ([c4388fc](https://github.com/0x676e67/wreq/commit/c4388fcff9401d23169c6731901457e89039bf53))\n- Set RequestCredentials to None by default ([#1249](https://github.com/0x676e67/wreq/issues/1249)) - ([42b3160](https://github.com/0x676e67/wreq/commit/42b31600c30609cb8df90c799fbfbd0c305e422d))\n\n### [#1095]\n\n- Implement `basic_auth` for WASM - ([28840af](https://github.com/0x676e67/wreq/commit/28840afd46fe3b81b7c77dde4537ad702826c7f7))\n\n### Actions\n\n- Remove --all flag from rustfmt ([#795](https://github.com/0x676e67/wreq/issues/795)) - ([b3d5f78](https://github.com/0x676e67/wreq/commit/b3d5f78b8f3ddd36a4fc6568e8a091f947dd0ff5))\n\n### Async\n\n- Add conversions from static slices to Body - ([87f03e1](https://github.com/0x676e67/wreq/commit/87f03e167c0deba25f1ca40376a5b69d598cb88f))\n\n### Async/client\n\n- Return a impl Future on execute() - ([4fba983](https://github.com/0x676e67/wreq/commit/4fba983e5e6722a457a10988e20e5277faf01e4c))\n\n### Async/reponse\n\n- Return a impl Future on json() - ([5e38b41](https://github.com/0x676e67/wreq/commit/5e38b419f00d6526e67078b8dd52054859a5ede5))\n\n### Async/request\n\n- Add methods to split and reassemble a RequestBuilder ([#1770](https://github.com/0x676e67/wreq/issues/1770)) - ([119366e](https://github.com/0x676e67/wreq/commit/119366e95720aa1b35e5bf79cd91255d6050e360))\n- Add a basic example for send() - ([0c84e6b](https://github.com/0x676e67/wreq/commit/0c84e6b9e9a7f48edc3b591bf7e28caa4f246ecd))\n- Return a impl Future on send() - ([8b62f47](https://github.com/0x676e67/wreq/commit/8b62f47ac3f5de43fbbe0445d0958eb8710f9174))\n\n### Blocking\n\n- Add tcp_keepalive option ([#1100](https://github.com/0x676e67/wreq/issues/1100)) - ([a2133ae](https://github.com/0x676e67/wreq/commit/a2133aec3b313bb370c0cf88173de33ce7cba465))\n- Opt-out CPUs auto-detection in debug mode ([#807](https://github.com/0x676e67/wreq/issues/807)) - ([7622c75](https://github.com/0x676e67/wreq/commit/7622c750648fe5453e83f7fa57e73732eb699638))\n\n### Boring\n\n- Upgrade latest version - ([ec7f212](https://github.com/0x676e67/wreq/commit/ec7f212a554044c0a407e779f1db7343e6be392a))\n\n### Boringssl\n\n- Add SSL_set_permute_extensions - ([29538bc](https://github.com/0x676e67/wreq/commit/29538bc02e88866e5b8016539bbce1e41b4c6883))\n\n### Bug\n\n- Fix custom content-type overidden by json method ([#1833](https://github.com/0x676e67/wreq/issues/1833)) - ([b13ca4b](https://github.com/0x676e67/wreq/commit/b13ca4b3399b42e7bbdafc374a129ea09bf33b17))\n- Fix custom content-type overidden by json method - ([2364364](https://github.com/0x676e67/wreq/commit/23643640ac72e26061314b15c1f6372df4117413))\n\n### Build\n\n- *(deps)* Bump actions/checkout from 3 to 4 ([#35](https://github.com/0x676e67/wreq/issues/35)) - ([07e700d](https://github.com/0x676e67/wreq/commit/07e700d41482eeb7b3e571608439241b43f96bec))\n- *(deps)* Bump softprops/action-gh-release from 1 to 2 ([#36](https://github.com/0x676e67/wreq/issues/36)) - ([ff76de9](https://github.com/0x676e67/wreq/commit/ff76de993a07df45b4b8be690ce725fc2e344e89))\n- Fix `android`/`fuchsia`/`linux` --no-default-features build ([#110](https://github.com/0x676e67/wreq/issues/110)) - ([40e2b8a](https://github.com/0x676e67/wreq/commit/40e2b8a10748b3b32ea9076c4ca69d14d9596324))\n- Fix `--no-default-features` build - ([0d0fef0](https://github.com/0x676e67/wreq/commit/0d0fef05250bdfc915671e9cf86cd229621964be))\n\n### Cargo\n\n- Update to rustls 0.16 - ([3033f11](https://github.com/0x676e67/wreq/commit/3033f11639c2ef0eab86286083b40586079d2662))\n\n### Client\n\n- Add convenience method for DELETE - ([a3983f3](https://github.com/0x676e67/wreq/commit/a3983f3122b2d1495ea36bb5a8fd019a7605ae56))\n\n### Dep\n\n- Upgrade trust-dns-resolver from v0.22 to v0.23 ([#1965](https://github.com/0x676e67/wreq/issues/1965)) - ([0292486](https://github.com/0x676e67/wreq/commit/0292486abab25914c046b71ab6d6da24206614d3))\n\n### Dependencies\n\n- Upgrade base64 to latest version ([#692](https://github.com/0x676e67/wreq/issues/692)) - ([3090a68](https://github.com/0x676e67/wreq/commit/3090a68d5383c572deba077d37d44e1c0424ac11))\n\n### Deps\n\n- *(async-tungstenite)* Downgrade `async-tungstenite` to `0.27.0` ([#161](https://github.com/0x676e67/wreq/issues/161)) - ([f26f8c4](https://github.com/0x676e67/wreq/commit/f26f8c4eccde38c91cb0ee9e55825b26429680a4))\n- *(async-tungstenite)* 0.28.0 ([#24](https://github.com/0x676e67/wreq/issues/24)) - ([a924df3](https://github.com/0x676e67/wreq/commit/a924df32110b68ec020e04d20a21f3c032bd087a))\n- *(base64)* Bump version to v0.22.x ([#46](https://github.com/0x676e67/wreq/issues/46)) - ([65e5b6d](https://github.com/0x676e67/wreq/commit/65e5b6d775c6cf252a96b06febd82317067057e1))\n- *(boring)* V4.x ([#76](https://github.com/0x676e67/wreq/issues/76)) - ([8eb0bf4](https://github.com/0x676e67/wreq/commit/8eb0bf45f9a7333f79d882dca935cbbc3c52e8dc))\n- *(boring-sys)* Bump version to v2.0.6 - ([1f4fcc6](https://github.com/0x676e67/wreq/commit/1f4fcc6dd7fe4a35616f7c7f6a9480c1a9411a9f))\n- *(boring-sys)* Bump version to v2.0.5 - ([e62c99d](https://github.com/0x676e67/wreq/commit/e62c99df8b33174d7b2616406786b341cc7e8add))\n- *(boring-sys)* Bump version to v2.0.4 - ([fa9b28c](https://github.com/0x676e67/wreq/commit/fa9b28c1679c02f0cfcffadb7ace9bdb753a623f))\n- *(boring-sys)* Bump version to v2.0.3 - ([1a79070](https://github.com/0x676e67/wreq/commit/1a7907054e33cda15bd89cccf49aa06938525f98))\n- *(boring/hyper/h2)* Migration patch crate name ([#109](https://github.com/0x676e67/wreq/issues/109)) - ([676d7b3](https://github.com/0x676e67/wreq/commit/676d7b3038cc12499b5dac4befaf5c1448ca6684))\n- *(brotli)* 7.0.0 ([#22](https://github.com/0x676e67/wreq/issues/22)) - ([94e2fdd](https://github.com/0x676e67/wreq/commit/94e2fdd605f969a185bc104d62e8e3e7b6f44b78))\n- *(chore)* Update to the latest rustls ([#969](https://github.com/0x676e67/wreq/issues/969)) - ([1a2c102](https://github.com/0x676e67/wreq/commit/1a2c10256a924ff8753f683c4200b8b4d05a2cdd))\n- *(cookie_store)* Bump version to v0.21.x ([#47](https://github.com/0x676e67/wreq/issues/47)) - ([fbf0bdc](https://github.com/0x676e67/wreq/commit/fbf0bdcee4b9a58d565b1083fb7c61fc29ef64c7))\n- *(h2)* Use h2 dependencies export by hyper ([#63](https://github.com/0x676e67/wreq/issues/63)) - ([6effc9d](https://github.com/0x676e67/wreq/commit/6effc9d2445fdeefb63d271441b65b163a6f4ee1))\n- *(hyper)* Bump version to v0.14.60 ([#74](https://github.com/0x676e67/wreq/issues/74)) - ([6842220](https://github.com/0x676e67/wreq/commit/6842220dc1bf28eeee2834b3952c48a8a2bbc1d8))\n- *(hyper)* Bump version to v0.14.50 ([#45](https://github.com/0x676e67/wreq/issues/45)) - ([c0cbf29](https://github.com/0x676e67/wreq/commit/c0cbf294ec1c86d63b13b8592b3ef32e121dc1e6))\n- *(hyper)* Bump version to v0.14.33 - ([b7fa5f3](https://github.com/0x676e67/wreq/commit/b7fa5f344b0b8b9957b197df7ad79309e3acc593))\n- *(hyper)* Bump version to v0.14.28 - ([bdcbe40](https://github.com/0x676e67/wreq/commit/bdcbe40a74357630cf96398af1994d950acb2bc6))\n- *(hyper_imp)* Bump version to v0.14.30 - ([4ba5b00](https://github.com/0x676e67/wreq/commit/4ba5b0059956761b6774f55e181a05b806425b26))\n- *(ipnet)* 2.10.0 ([#15](https://github.com/0x676e67/wreq/issues/15)) - ([f708a86](https://github.com/0x676e67/wreq/commit/f708a86a4ece4598a1788750a5c6a3a3fa6ab1e5))\n- *(ipnet)* V2.9.0 ([#56](https://github.com/0x676e67/wreq/issues/56)) - ([b14d428](https://github.com/0x676e67/wreq/commit/b14d4284028b0ee551716d2000a6a305c3d59a95))\n- *(mime)* V0.3.17 ([#57](https://github.com/0x676e67/wreq/issues/57)) - ([1f76f27](https://github.com/0x676e67/wreq/commit/1f76f2788d8779a7e29baca4acf4b3a124b1b25d))\n- *(percent-encoding)* V2.3 ([#75](https://github.com/0x676e67/wreq/issues/75)) - ([31ce45c](https://github.com/0x676e67/wreq/commit/31ce45cfb7691ff0e0684a92eef78dad6feda652))\n- *(system-configuration)* V0.6.0 - ([8f68af5](https://github.com/0x676e67/wreq/commit/8f68af567683dc449df4b014bc6d7771f3065727))\n- *(tokio-socks)* 0.5.2 ([#23](https://github.com/0x676e67/wreq/issues/23)) - ([d05a3f5](https://github.com/0x676e67/wreq/commit/d05a3f552b2ded4eeaa7f65d8b96f8ec96e570c7))\n- *(tungstenite)* Backport dependencies - ([1c9da5b](https://github.com/0x676e67/wreq/commit/1c9da5be63e837284ba49870c160a9e8dcccad59))\n- *(url)* V2.5 ([#58](https://github.com/0x676e67/wreq/issues/58)) - ([5d71c95](https://github.com/0x676e67/wreq/commit/5d71c95816ef018fd113280e6626dbd408d0d2d2))\n- *(windows-registry)* 0.3.0 ([#25](https://github.com/0x676e67/wreq/issues/25)) - ([cb9cf99](https://github.com/0x676e67/wreq/commit/cb9cf99ed1cc2d7904be6455e178cb6ef8f618ef))\n- *(winreg)* V0.52.0 - ([74144c2](https://github.com/0x676e67/wreq/commit/74144c25e220b85b51e4f635a4a25fd7c086fc2f))\n- Remove unnecessary libc dependencies ([#53](https://github.com/0x676e67/wreq/issues/53)) - ([6a24c13](https://github.com/0x676e67/wreq/commit/6a24c13ab7ee0d1e448654993daa9ddb36e4c87a))\n- Update winrege 0.10 -> 0.50 ([#1869](https://github.com/0x676e67/wreq/issues/1869)) - ([e02df1f](https://github.com/0x676e67/wreq/commit/e02df1f448d845fe01e6ea82c76ec89a59e5d568))\n- Update rustls v0.20.1 -> v0.21.0 ([#1791](https://github.com/0x676e67/wreq/issues/1791)) - ([a0b5ea5](https://github.com/0x676e67/wreq/commit/a0b5ea5d7179778ce3e02117863b23b452b84d48))\n- Update async-compression v0.3.13 => v0.4.0 ([#1828](https://github.com/0x676e67/wreq/issues/1828)) - ([7e7b116](https://github.com/0x676e67/wreq/commit/7e7b116a134cc0d6d646ab316dd83976369d5298))\n\n### Dpes\n\n- *(typed-builder)* V0.20.0 ([#16](https://github.com/0x676e67/wreq/issues/16)) - ([ea70d90](https://github.com/0x676e67/wreq/commit/ea70d902c68bf785c45c255c61ed48276f005e14))\n\n### Example\n\n- Update usage doc for blocking example ([#1112](https://github.com/0x676e67/wreq/issues/1112)) - ([1f425a0](https://github.com/0x676e67/wreq/commit/1f425a0244bcd7b4565dceb9076450d951f2ec03))\n\n### Fmt\n\n- Wasm body ([#1359](https://github.com/0x676e67/wreq/issues/1359)) - ([bd4e0c6](https://github.com/0x676e67/wreq/commit/bd4e0c663c243b584dca114c1d376f67b1967f64))\n\n### Http3\n\n- Upgrade dependencies ([#2028](https://github.com/0x676e67/wreq/issues/2028)) - ([52190df](https://github.com/0x676e67/wreq/commit/52190df64fb56edbfb9cb7c054662b1cfedad476))\n- Enable `runtime-tokio` for `quinn` ([#1846](https://github.com/0x676e67/wreq/issues/1846)) - ([06c8e5b](https://github.com/0x676e67/wreq/commit/06c8e5b0b008afee8114fb979b85cd8b73415391))\n- Don't force `webpki` when experiemental `http3` is enabled ([#1845](https://github.com/0x676e67/wreq/issues/1845)) - ([c9f0c28](https://github.com/0x676e67/wreq/commit/c9f0c28e4c6e2b9d09544df832c41deef3847505))\n\n### Impersonate\n\n- Add `chrome_126` - ([808e23a](https://github.com/0x676e67/wreq/commit/808e23a935439ac8a0d41c9aa6ab1661070761d7))\n- Chrome_123, chrome_125 - add `zstd` to Accept-Encoding header - ([f17d07e](https://github.com/0x676e67/wreq/commit/f17d07e1d0c3aa8036dcbd785508a43f25bf21cd))\n\n### Msrv\n\n- Bump to 1.63 ([#1947](https://github.com/0x676e67/wreq/issues/1947)) - ([4aa8516](https://github.com/0x676e67/wreq/commit/4aa8516770eb96c66e753621660275e65e269213))\n\n### Multipart\n\n- Force a CRLF at the end of request - ([a525209](https://github.com/0x676e67/wreq/commit/a52520941f518ade756a73797e875722d1ba344b))\n\n### Native-tls\n\n- Add Identiy::from_pkcs8_pem ([#1655](https://github.com/0x676e67/wreq/issues/1655)) - ([231b18f](https://github.com/0x676e67/wreq/commit/231b18f83572836c674404b33cb1ca8b35ca3e36))\n\n### Proxy\n\n- Add support for proxy authentication with user-specified header values ([#2053](https://github.com/0x676e67/wreq/issues/2053)) - ([c09c5e6](https://github.com/0x676e67/wreq/commit/c09c5e6bbcf79b3984cd4c2cf2f2f5d9e2a4a6af))\n- Refactor a collapsible_match ([#1214](https://github.com/0x676e67/wreq/issues/1214)) - ([544282a](https://github.com/0x676e67/wreq/commit/544282a0b49d6ba2ac78b844c23415c0bf62a304))\n\n### Refractor\n\n- *(tls/settings)* Generate configuration using builder mode ([#121](https://github.com/0x676e67/wreq/issues/121)) - ([a370f18](https://github.com/0x676e67/wreq/commit/a370f18774eced8c2c62ed2d4d9f9db72639eaba))\n\n### Remove\n\n- *(client)* Remove blocking client support ([#123](https://github.com/0x676e67/wreq/issues/123)) ([#124](https://github.com/0x676e67/wreq/issues/124)) ([#125](https://github.com/0x676e67/wreq/issues/125)) - ([5091f9a](https://github.com/0x676e67/wreq/commit/5091f9ae4f8394ec5e5a6dbf138c598c8d5b2295))\n\n### Request\n\n- Test adding duplicate headers to the request ([#519](https://github.com/0x676e67/wreq/issues/519)) - ([1bdc3fa](https://github.com/0x676e67/wreq/commit/1bdc3fa3c8dd3c4038efc566b7ccdbc86e38cfa3))\n\n### Tmp\n\n- Use upstream git repo for hyper-native-tls - ([d12d604](https://github.com/0x676e67/wreq/commit/d12d604e380b8f1ee8cc9e22fd218ce3d283aa4e))\n\n### Wasm\n\n- Add method `user_agent` to `ClientBuilder`. ([#2018](https://github.com/0x676e67/wreq/issues/2018)) - ([a9b960f](https://github.com/0x676e67/wreq/commit/a9b960fc24455c3c5c7e35b54dbcc6512cc86d2b))\n- Blob url support ([#1797](https://github.com/0x676e67/wreq/issues/1797)) - ([2fa69ad](https://github.com/0x676e67/wreq/commit/2fa69ad384ceb9a0f718ceb45b092341a5285dd4))\n- Fix premature abort for streaming bodies ([#1782](https://github.com/0x676e67/wreq/issues/1782)) - ([df2b3ba](https://github.com/0x676e67/wreq/commit/df2b3baadc1eade54b1c22415792b778442673a4))\n- Fix standalone/multipart body conversion to JsValue ([#1364](https://github.com/0x676e67/wreq/issues/1364)) - ([0ef1a2e](https://github.com/0x676e67/wreq/commit/0ef1a2ea78eaa5aeb280fd1dbbbabb83abc45c30))\n- Don't send request body as plain uint8 array ([#1358](https://github.com/0x676e67/wreq/issues/1358)) - ([bb3d102](https://github.com/0x676e67/wreq/commit/bb3d102108493da9adf9081b4d0badbff4a2bd91))\n- Add missing `as_bytes` method to `Body` implementation ([#1270](https://github.com/0x676e67/wreq/issues/1270)) - ([d40276c](https://github.com/0x676e67/wreq/commit/d40276c0f081c2cc1ebc8b63ad6075daf0f6dff0))\n- Avoid dependency on serde-serialize feature ([#1337](https://github.com/0x676e67/wreq/issues/1337)) - ([cfa301c](https://github.com/0x676e67/wreq/commit/cfa301c7fa0f83330f57b312f4e762a3e47ff2cb))\n- Omit request body if it's empty ([#1012](https://github.com/0x676e67/wreq/issues/1012)) - ([d42385e](https://github.com/0x676e67/wreq/commit/d42385e7f2cc364efa5e16a7154e7e0cebdd1b57))\n- Impl TryFrom<HttpRequest<T>> for Request ([#997](https://github.com/0x676e67/wreq/issues/997)) - ([dd8441f](https://github.com/0x676e67/wreq/commit/dd8441fd23dae6ffb79b4cea2862e5bca0c59743))\n- Add error_for_status to wasm response ([#779](https://github.com/0x676e67/wreq/issues/779)) - ([1478313](https://github.com/0x676e67/wreq/commit/147831375613a5e508487b2d85a99104ae1505af))\n- Add url function to wasm response ([#777](https://github.com/0x676e67/wreq/issues/777)) - ([fd88e0c](https://github.com/0x676e67/wreq/commit/fd88e0c648e6632f3f92ed119b1a93aefd66ed64))\n- Add request body in the form of Bytes ([#696](https://github.com/0x676e67/wreq/issues/696)) - ([f6f81f9](https://github.com/0x676e67/wreq/commit/f6f81f9cc1ab84a007fe4203822de08d72c07f57))\n- Add bytes method to wasm response ([#694](https://github.com/0x676e67/wreq/issues/694)) - ([b24b0be](https://github.com/0x676e67/wreq/commit/b24b0be461ed39a96335e40561d07a35f2c3eb36))\n- Translate over response headers ([#689](https://github.com/0x676e67/wreq/issues/689)) - ([dd65fc7](https://github.com/0x676e67/wreq/commit/dd65fc7c3ad037e6674e8bac8c46f4bdeca6c4ca))\n\n## New Contributors ❤️\n\n* @0x676e67 made their first contribution\n* @dairoot made their first contribution in [#68](https://github.com/0x676e67/wreq/pull/68)\n* @AliaSabur made their first contribution in [#31](https://github.com/0x676e67/wreq/pull/31)\n* @deedy5 made their first contribution\n* @dependabot[bot] made their first contribution\n* @seanmonstar made their first contribution\n* @jan-auer made their first contribution\n* @lorepozo made their first contribution\n* @abls made their first contribution\n* @Noah-Kennedy made their first contribution\n* @tshepang made their first contribution\n* @bitfl0wer made their first contribution\n* @FirelightFlagboy made their first contribution\n* @tnull made their first contribution\n* @conradludgate made their first contribution\n* @droe made their first contribution\n* @NobodyXu made their first contribution\n* @jefflloyd made their first contribution\n* @brian030128 made their first contribution\n* @eric-seppanen made their first contribution\n* @T-Sujeeban made their first contribution\n* @cipherbrain made their first contribution\n* @bouzuya made their first contribution\n* @VivekPanyam made their first contribution\n* @paolobarbolini made their first contribution\n* @ollyswanson made their first contribution\n* @daxpedda made their first contribution\n* @attila-lin made their first contribution\n* @smndtrl made their first contribution\n* @nyurik made their first contribution\n* @complexspaces made their first contribution\n* @cpu made their first contribution\n* @hulin32 made their first contribution\n* @skyf0l made their first contribution\n* @nickelc made their first contribution\n* @jneem made their first contribution\n* @kckeiks made their first contribution\n* @lucab made their first contribution\n* @j7nw4r made their first contribution\n* @TurnOfACard made their first contribution\n* @anhcuky made their first contribution\n* @lstrojny made their first contribution\n* @dmeijboom made their first contribution\n* @4JX made their first contribution\n* @link2xt made their first contribution\n* @beeb made their first contribution\n* @Khoulaiz made their first contribution\n* @BlackDex made their first contribution\n* @Austaras made their first contribution\n* @kianmeng made their first contribution\n* @Alvenix made their first contribution\n* @irrelevelephant made their first contribution\n* @mirecl made their first contribution\n* @lpraneis made their first contribution\n* @luqmana made their first contribution\n* @vidhanio made their first contribution\n* @futursolo made their first contribution\n* @neoeinstein made their first contribution\n* @ctron made their first contribution\n* @ made their first contribution\n* @cuishuang made their first contribution\n* @Mathspy made their first contribution\n* @eyalsatori made their first contribution\n* @flavio made their first contribution\n* @MisileLab made their first contribution\n* @jqnatividad made their first contribution\n* @ducaale made their first contribution\n* @biluohc made their first contribution\n* @nihaals made their first contribution\n* @ViddeM made their first contribution\n* @edmorley made their first contribution\n* @sugar700 made their first contribution\n* @kraktus made their first contribution\n* @TjeuKayim made their first contribution\n* @ecclarke42 made their first contribution\n* @nikstur made their first contribution\n* @vsaase made their first contribution\n* @BiagioFesta made their first contribution\n* @niuhuan made their first contribution\n* @nwolber made their first contribution\n* @fredr made their first contribution\n* @jeschkies made their first contribution\n* @pfernie made their first contribution\n* @crapStone made their first contribution\n* @6543 made their first contribution\n* @striezel made their first contribution\n* @victoryaskevich made their first contribution\n* @abatkin made their first contribution\n* @skystar-p made their first contribution\n* @silvioprog made their first contribution\n* @jmgilman made their first contribution\n* @Dr-Emann made their first contribution\n* @jplatte made their first contribution\n* @blyxxyz made their first contribution\n* @dlesl made their first contribution\n* @Saruniks made their first contribution\n* @campbellC made their first contribution\n* @kjvalencik made their first contribution\n* @mlodato517 made their first contribution\n* @bensadiku made their first contribution\n* @marcoieni made their first contribution\n* @ctjhoa made their first contribution\n* @jonhoo made their first contribution\n* @Septias made their first contribution\n* @kotborealis made their first contribution\n* @bishtpawan made their first contribution\n* @Gottox made their first contribution\n* @CfirTsabari made their first contribution\n* @ibraheemdev made their first contribution\n* @svenstaro made their first contribution\n* @kornelski made their first contribution\n* @meldron made their first contribution\n* @webern made their first contribution\n* @rakshith-ravi made their first contribution\n* @Marwes made their first contribution\n* @glyphpoch made their first contribution\n* @markhildreth made their first contribution\n* @wchargin made their first contribution\n* @amousset made their first contribution\n* @baoyachi made their first contribution\n* @messense made their first contribution\n* @ranile made their first contribution\n* @varoonp123 made their first contribution\n* @Martichou made their first contribution\n* @frewsxcv made their first contribution\n* @zicklag made their first contribution\n* @thomastaylor312 made their first contribution\n* @fiag made their first contribution\n* @est31 made their first contribution\n* @stevelr made their first contribution\n* @taiki-e made their first contribution\n* @federico-terzi made their first contribution\n* @XyLyXyRR made their first contribution\n* @pluehne made their first contribution\n* @sdroege made their first contribution\n* @Snarpix made their first contribution\n* @fabricedesre made their first contribution\n* @shuoli84 made their first contribution\n* @JOE1994 made their first contribution\n* @Jasonoro made their first contribution\n* @zacps made their first contribution\n* @fuyumatsuri made their first contribution\n* @707090 made their first contribution\n* @snejugal made their first contribution\n* @TaKO8Ki made their first contribution\n* @vorner made their first contribution\n* @alex made their first contribution\n* @LionsAd made their first contribution\n* @davidpdrsn made their first contribution\n* @alianse777 made their first contribution\n* @tasn made their first contribution\n* @jsha made their first contribution\n* @bryanburgers made their first contribution\n* @dcuenot made their first contribution\n* @slonopotamus made their first contribution\n* @hecrj made their first contribution\n* @x1957 made their first contribution\n* @cuviper made their first contribution\n* @x448 made their first contribution\n* @Luro02 made their first contribution\n* @eugene-babichenko made their first contribution\n* @kentfredric made their first contribution\n* @Diggsey made their first contribution\n* @nicklan made their first contribution\n* @tesuji made their first contribution\n* @metajack made their first contribution\n* @manyuanrong made their first contribution\n* @WindSoilder made their first contribution\n* @r-arias made their first contribution\n* @rhysd made their first contribution\n* @kodieg made their first contribution\n* @rodoufu made their first contribution\n* @Lucretiel made their first contribution\n* @mbrobbel made their first contribution\n* @tobdub made their first contribution\n* @jgall made their first contribution\n* @cbourjau made their first contribution\n* @gathuku made their first contribution\n* @vorot93 made their first contribution\n* @khuey made their first contribution\n* @SOF3 made their first contribution\n* @benesch made their first contribution\n* @danieleades made their first contribution\n* @basdebue made their first contribution\n* @vigneshsarma made their first contribution\n* @travier-anssi made their first contribution\n* @ancwrd1 made their first contribution\n* @nirasan made their first contribution\n* @prfss made their first contribution\n* @repi made their first contribution\n* @mathstuf made their first contribution\n* @GuillaumeGomez made their first contribution\n* @bluejekyll made their first contribution\n* @Liby99 made their first contribution\n* @quininer made their first contribution\n* @aaneto made their first contribution\n* @chenl made their first contribution\n* @jeromegn made their first contribution\n* @theduke made their first contribution\n* @arnodb made their first contribution\n* @CJP10 made their first contribution\n* @fbenkstein made their first contribution\n* @ismith made their first contribution\n* @antoinecarton made their first contribution\n* @mavax made their first contribution\n* @gbonnema made their first contribution\n* @emschwartz made their first contribution\n* @puffybsd made their first contribution\n* @sudo-ben made their first contribution\n* @shouya made their first contribution\n* @martin-t made their first contribution\n* @kevinwilson541 made their first contribution\n* @polyfloyd made their first contribution\n* @Eijebong made their first contribution\n* @illicitonion made their first contribution\n* @dbrgn made their first contribution\n* @davidwilemski made their first contribution\n* @KNnut made their first contribution\n* @MarkDDR made their first contribution\n* @yageek made their first contribution\n* @JoshMcguigan made their first contribution\n* @frol made their first contribution\n* @spk made their first contribution\n* @rukai made their first contribution\n* @jcaesar made their first contribution\n* @andy128k made their first contribution\n* @bhansconnect made their first contribution\n* @scottschroeder made their first contribution\n* @DoumanAsh made their first contribution\n* @kennytm made their first contribution\n* @cakey made their first contribution\n* @mattias-p made their first contribution\n* @Siilwyn made their first contribution\n* @Sh4pe made their first contribution\n* @Dylan-DPC made their first contribution\n* @csirkeee made their first contribution\n* @is made their first contribution\n* @oli-obk made their first contribution\n* @sbstp made their first contribution\n* @shepmaster made their first contribution\n* @tafia made their first contribution\n* @knight42 made their first contribution\n* @Henning-K made their first contribution\n* @osa1 made their first contribution\n* @marmistrz made their first contribution\n* @kamalmarhubi made their first contribution\n* @chrisvittal made their first contribution\n* @e00E made their first contribution\n* @KodrAus made their first contribution\n* @Roguelazer made their first contribution\n* @bhendo made their first contribution\n* @tomprince made their first contribution\n* @AndyGauge made their first contribution\n* @jaemk made their first contribution\n* @budziq made their first contribution\n* @steverob made their first contribution\n* @rap2hpoutre made their first contribution\n* @TedDriggs made their first contribution\n* @imp made their first contribution\n* @gsquire made their first contribution\n* @rylio made their first contribution\n* @emk made their first contribution\n* @Keruspe made their first contribution\n* @quodlibetor made their first contribution\n* @sfackler made their first contribution\n* @sebasgarcep made their first contribution\n* @saghm made their first contribution\n* @nelsonjchen made their first contribution\n* @badboy made their first contribution\n* @brycefisher made their first contribution\n* @aidanhs made their first contribution\n* @Michael-F-Bryan made their first contribution\n\n<!-- generated by git-cliff -->\n"
  },
  {
    "path": "bench/http1.rs",
    "content": "//! HTTP/1.1 benchmark\n\nmod support;\n\nuse std::time::Duration;\n\nuse criterion::{Criterion, criterion_group, criterion_main};\nuse support::{HttpVersion, Tls, bench};\n\nconst NUM_REQUESTS_TO_SEND: usize = 500;\n\n#[inline]\nfn bench(c: &mut Criterion) {\n    bench::bench(c, Tls::Disabled, HttpVersion::Http1, NUM_REQUESTS_TO_SEND)\n        .expect(\"Failed to run HTTP/1 benchmark server\")\n}\n\ncriterion_group!(\n    name = benches;\n    config = Criterion::default()\n        .sample_size(10)\n        .warm_up_time(Duration::from_secs(3));\n    targets = bench\n);\ncriterion_main!(benches);\n"
  },
  {
    "path": "bench/http1_over_tls.rs",
    "content": "//! HTTP/1.1 over TLS benchmark\n\nmod support;\n\nuse std::time::Duration;\n\nuse criterion::{Criterion, criterion_group, criterion_main};\nuse support::{HttpVersion, Tls, bench};\n\nconst NUM_REQUESTS_TO_SEND: usize = 500;\n\n#[inline]\nfn bench(c: &mut Criterion) {\n    bench::bench(c, Tls::Enabled, HttpVersion::Http1, NUM_REQUESTS_TO_SEND)\n        .expect(\"Failed to run HTTP/1 over TLS benchmark server\")\n}\n\ncriterion_group!(\n    name = benches;\n    config = Criterion::default()\n        .sample_size(10)\n        .warm_up_time(Duration::from_secs(3));\n    targets = bench\n);\ncriterion_main!(benches);\n"
  },
  {
    "path": "bench/http2.rs",
    "content": "//! HTTP/2 benchmark\n\nmod support;\n\nuse std::time::Duration;\n\nuse criterion::{Criterion, criterion_group, criterion_main};\nuse support::{HttpVersion, Tls, bench};\n\nconst NUM_REQUESTS_TO_SEND: usize = 500;\n\n#[inline]\nfn bench(c: &mut Criterion) {\n    bench::bench(c, Tls::Disabled, HttpVersion::Http2, NUM_REQUESTS_TO_SEND)\n        .expect(\"Failed to run HTTP/2 benchmark server\")\n}\n\ncriterion_group!(\n    name = benches;\n    config = Criterion::default()\n        .sample_size(10)\n        .warm_up_time(Duration::from_secs(3));\n    targets = bench\n);\ncriterion_main!(benches);\n"
  },
  {
    "path": "bench/http2_over_tls.rs",
    "content": "//! HTTP/2 over TLS benchmark\n\nmod support;\n\nuse std::time::Duration;\n\nuse criterion::{Criterion, criterion_group, criterion_main};\nuse support::{HttpVersion, Tls, bench};\n\nconst NUM_REQUESTS_TO_SEND: usize = 500;\n\n#[inline]\nfn bench(c: &mut Criterion) {\n    bench::bench(c, Tls::Enabled, HttpVersion::Http2, NUM_REQUESTS_TO_SEND)\n        .expect(\"Failed to run HTTP/2 over TLS benchmark server\")\n}\n\ncriterion_group!(\n    name = benches;\n    config = Criterion::default()\n        .sample_size(10)\n        .warm_up_time(Duration::from_secs(3));\n    targets = bench\n);\ncriterion_main!(benches);\n"
  },
  {
    "path": "bench/support/bench.rs",
    "content": "use criterion::Criterion;\n\nuse crate::support::{\n    BoxError, HttpVersion, Tls, client::bench_clients, current_thread_runtime,\n    multi_thread_runtime, server::with_server,\n};\n\npub const CURRENT_THREAD_LABEL: &str = \"current_thread\";\npub const MULTI_THREAD_LABEL: &str = \"multi_thread\";\npub const CONCURRENT_CASES: &[usize] = &[10, 50, 100, 150];\n\n/// Recommended chunk sizes for real-world network scenarios:\n///   - 16 KB: Matches standard TCP buffers, ideal for HTTP/2 frames.\n///   - 32 KB: For large HTTP payloads, fits modern socket buffers.\n///   - 64 KB: Default Linux buffer size, optimized for large uploads.\n///   - 128 KB: For high-throughput, large-scale transfers.\n///   - 256 KB: Bulk data, maximum throughput on fast networks.\n///\n/// For benchmarking latency-sensitive and high-throughput transfers.\npub const BODY_CASES: [(&[u8], usize); 7] = [\n    (&[b'a'; 1024], 1024),                  // 1 KB, chunk 1 KB\n    (&[b'a'; 10 * 1024], 10 * 1024),        // 10 KB, chunk 10 KB\n    (&[b'a'; 64 * 1024], 16 * 1024),        // 64 KB, chunk 16 KB\n    (&[b'a'; 128 * 1024], 32 * 1024),       // 128 KB, chunk 32 KB\n    (&[b'a'; 1024 * 1024], 64 * 1024),      // 1 MB, chunk 64 KB\n    (&[b'a'; 2 * 1024 * 1024], 128 * 1024), // 2 MB, chunk 128 KB\n    (&[b'a'; 4 * 1024 * 1024], 256 * 1024), // 4 MB, chunk 256 KB\n];\n\npub fn bench(\n    c: &mut Criterion,\n    tls: Tls,\n    http_version: HttpVersion,\n    num_requests: usize,\n) -> Result<(), BoxError> {\n    const OS: &str = std::env::consts::OS;\n    const ARCH: &str = std::env::consts::ARCH;\n\n    let system = sysinfo::System::new_all();\n    let cpu_model = system\n        .cpus()\n        .first()\n        .map_or(\"n/a\", |cpu| cpu.brand().trim_start().trim_end());\n\n    for &concurrent_limit in CONCURRENT_CASES {\n        for body in BODY_CASES {\n            with_server(tls, |addr| {\n                // single-threaded client\n                let mut group = c.benchmark_group(format!(\n                    \"{cpu_model}/{OS}_{ARCH}/{CURRENT_THREAD_LABEL}/{tls}/{http_version}/{concurrent_limit}/{}KB\",\n                    body.0.len() / 1024,\n                ));\n\n                bench_clients(\n                    &mut group,\n                    current_thread_runtime,\n                    addr,\n                    tls,\n                    http_version,\n                    num_requests,\n                    concurrent_limit,\n                    body,\n                )?;\n                group.finish();\n                Ok(())\n            })?;\n\n            with_server(tls, |addr| {\n                // multi-threaded client\n                let mut group = c.benchmark_group(format!(\n                    \"{cpu_model}/{OS}_{ARCH}/{MULTI_THREAD_LABEL}/{tls}/{http_version}/{concurrent_limit}/{}KB\",\n                    body.0.len() / 1024,\n                ));\n                bench_clients(\n                    &mut group,\n                    multi_thread_runtime,\n                    addr,\n                    tls,\n                    http_version,\n                    num_requests,\n                    concurrent_limit,\n                    body,\n                )?;\n                group.finish();\n                Ok(())\n            })?;\n        }\n    }\n\n    Ok(())\n}\n"
  },
  {
    "path": "bench/support/client.rs",
    "content": "use std::{convert::Infallible, net::SocketAddr, sync::Arc};\n\nuse bytes::Bytes;\nuse criterion::{BenchmarkGroup, measurement::WallTime};\nuse http_body_util::BodyExt;\nuse tokio::{runtime::Runtime, sync::Semaphore};\n\nuse super::{BoxError, HttpVersion, Tls};\n\nfn create_wreq_client(tls: Tls, http_version: HttpVersion) -> Result<wreq::Client, BoxError> {\n    let builder = wreq::Client::builder()\n        .no_proxy()\n        .redirect(wreq::redirect::Policy::none())\n        .tls_cert_verification(!matches!(tls, Tls::Enabled));\n\n    let builder = match http_version {\n        HttpVersion::Http1 => builder.http1_only(),\n        HttpVersion::Http2 => builder.http2_only(),\n    };\n\n    Ok(builder.build()?)\n}\n\nfn create_reqwest_client(tls: Tls, http_version: HttpVersion) -> Result<reqwest::Client, BoxError> {\n    let builder = reqwest::Client::builder()\n        .no_proxy()\n        .redirect(reqwest::redirect::Policy::none())\n        .danger_accept_invalid_certs(matches!(tls, Tls::Enabled));\n\n    let builder = match http_version {\n        HttpVersion::Http1 => builder.http1_only(),\n        HttpVersion::Http2 => builder.http2_prior_knowledge(),\n    };\n\n    Ok(builder.build()?)\n}\n\nasync fn wreq_body_assert(mut response: wreq::Response, expected_body_size: usize) {\n    let mut body_size = 0;\n    while let Some(Ok(chunk)) = response.frame().await {\n        if let Ok(chunk) = chunk.into_data() {\n            body_size += chunk.len();\n        }\n    }\n    assert!(\n        body_size == expected_body_size,\n        \"Unexpected response body: got {body_size} bytes, expected {expected_body_size} bytes\"\n    );\n}\n\nasync fn reqwest_body_assert(mut response: reqwest::Response, expected_body_size: usize) {\n    let mut body_size = 0;\n    while let Ok(Some(chunk)) = response.chunk().await {\n        body_size += chunk.len();\n    }\n    assert!(\n        body_size == expected_body_size,\n        \"Unexpected response body: got {body_size} bytes, expected {expected_body_size} bytes\"\n    );\n}\n\nfn stream_from_bytes(\n    body: &'static [u8],\n    chunk_size: usize,\n) -> impl futures_util::stream::TryStream<Ok = Bytes, Error = Infallible> + Send + 'static {\n    futures_util::stream::unfold((body, 0), move |(body, offset)| async move {\n        if offset >= body.len() {\n            None\n        } else {\n            let end = (offset + chunk_size).min(body.len());\n            let chunk = Bytes::from_static(&body[offset..end]);\n            Some((Ok::<Bytes, Infallible>(chunk), (body, end)))\n        }\n    })\n}\n\n#[inline]\nfn wreq_body(stream: bool, (body, chunk_size): (&'static [u8], usize)) -> wreq::Body {\n    if stream {\n        let stream = stream_from_bytes(body, chunk_size);\n        wreq::Body::wrap_stream(stream)\n    } else {\n        wreq::Body::from(body)\n    }\n}\n\n#[inline]\nfn reqwest_body(stream: bool, (body, chunk_size): (&'static [u8], usize)) -> reqwest::Body {\n    if stream {\n        let stream = stream_from_bytes(body, chunk_size);\n        reqwest::Body::wrap_stream(stream)\n    } else {\n        reqwest::Body::from(body)\n    }\n}\n\nasync fn wreq_requests_concurrent(\n    client: &wreq::Client,\n    url: &str,\n    num_requests: usize,\n    concurrent_limit: usize,\n    body: (&'static [u8], usize),\n    stream: bool,\n) {\n    let semaphore = Arc::new(Semaphore::new(concurrent_limit));\n    let mut handles = Vec::with_capacity(num_requests);\n    for _ in 0..num_requests {\n        let client = client.clone();\n        let url = url.to_string();\n        let semaphore = semaphore.clone();\n        let fut = async move {\n            let _permit = semaphore\n                .acquire()\n                .await\n                .expect(\"Semaphore should be acquirable\");\n            let response = client\n                .post(url)\n                .body(wreq_body(stream, body))\n                .send()\n                .await\n                .expect(\"Unexpected request failure\");\n            wreq_body_assert(response, body.0.len()).await;\n        };\n        handles.push(tokio::spawn(fut));\n    }\n    futures_util::future::join_all(handles).await;\n}\n\nasync fn reqwest_requests_concurrent(\n    client: &reqwest::Client,\n    url: &str,\n    num_requests: usize,\n    concurrent_limit: usize,\n    body: (&'static [u8], usize),\n    stream: bool,\n) {\n    let semaphore = Arc::new(Semaphore::new(concurrent_limit));\n    let mut handles = Vec::with_capacity(num_requests);\n    for _ in 0..num_requests {\n        let client = client.clone();\n        let url = url.to_string();\n        let semaphore = semaphore.clone();\n        let fut = async move {\n            let _permit = semaphore\n                .acquire()\n                .await\n                .expect(\"Semaphore should be acquirable\");\n            let response = client\n                .post(url)\n                .body(reqwest_body(stream, body))\n                .send()\n                .await\n                .expect(\"Unexpected request failure\");\n            reqwest_body_assert(response, body.0.len()).await;\n        };\n        handles.push(tokio::spawn(fut));\n    }\n    futures_util::future::join_all(handles).await;\n}\n\n#[allow(clippy::too_many_arguments)]\npub fn bench_clients(\n    group: &mut BenchmarkGroup<'_, WallTime>,\n    rt: fn() -> Runtime,\n    addr: SocketAddr,\n    tls: Tls,\n    http_version: HttpVersion,\n    num_requests: usize,\n    concurrent_limit: usize,\n    body: (&'static [u8], usize),\n) -> Result<(), BoxError> {\n    let url = format!(\"{tls}://{addr}\");\n\n    fn make_benchmark_label<T: ?Sized>(stream: bool) -> String {\n        let client = std::any::type_name::<T>()\n            .split(\"::\")\n            .next()\n            .expect(\"Type name should contain at least one segment\");\n\n        let body_type = if stream { \"stream\" } else { \"full\" };\n        format!(\"{body_type}/{client}\")\n    }\n\n    for stream in [false, true] {\n        let client = create_wreq_client(tls, http_version)?;\n        group.bench_function(make_benchmark_label::<wreq::Client>(stream), |b| {\n            b.to_async(rt()).iter(|| {\n                wreq_requests_concurrent(\n                    &client,\n                    &url,\n                    num_requests,\n                    concurrent_limit,\n                    body,\n                    stream,\n                )\n            })\n        });\n        ::std::mem::drop(client);\n\n        let client = create_reqwest_client(tls, http_version)?;\n        group.bench_function(make_benchmark_label::<reqwest::Client>(stream), |b| {\n            b.to_async(rt()).iter(|| {\n                reqwest_requests_concurrent(\n                    &client,\n                    &url,\n                    num_requests,\n                    concurrent_limit,\n                    body,\n                    stream,\n                )\n            })\n        });\n        ::std::mem::drop(client);\n    }\n\n    Ok(())\n}\n"
  },
  {
    "path": "bench/support/server.rs",
    "content": "use std::{convert::Infallible, io, net::SocketAddr, pin::Pin, sync::Arc, time::Duration};\n\nuse btls::{\n    pkey::PKey,\n    ssl::{Ssl, SslAcceptor, SslMethod},\n    x509::X509,\n};\nuse bytes::Bytes;\nuse http_body_util::{BodyExt, Collected, Full};\nuse hyper::{body::Incoming, service::service_fn};\nuse hyper_util::{\n    rt::{TokioExecutor, TokioIo, TokioTimer},\n    server::conn::auto::Builder,\n};\nuse tokio::{\n    io::{AsyncRead, AsyncWrite},\n    net::{TcpListener, TcpStream},\n    sync::oneshot,\n    task::JoinSet,\n};\nuse tokio_btls::SslStream;\n\nuse super::{BoxError, Tls, multi_thread_runtime};\n\npub struct Server {\n    listener: std::net::TcpListener,\n    tls_acceptor: Option<Arc<SslAcceptor>>,\n    builder: Builder<TokioExecutor>,\n}\n\nimpl Server {\n    pub fn new(tls: Tls) -> Result<Self, BoxError> {\n        let tls_acceptor = match tls {\n            Tls::Enabled => {\n                let mut builder = SslAcceptor::mozilla_intermediate_v5(SslMethod::tls())?;\n\n                let cert = X509::from_der(include_bytes!(\"../../tests/support/server.cert\"))?;\n                let key =\n                    PKey::private_key_from_der(include_bytes!(\"../../tests/support/server.key\"))?;\n\n                builder.set_certificate(&cert)?;\n                builder.set_private_key(&key)?;\n                builder.check_private_key()?;\n\n                Some(Arc::new(builder.build()))\n            }\n            Tls::Disabled => None,\n        };\n\n        let mut builder = Builder::new(TokioExecutor::new());\n        builder.http1().timer(TokioTimer::new()).keep_alive(true);\n        builder\n            .http2()\n            .timer(TokioTimer::new())\n            .keep_alive_interval(Duration::from_secs(30));\n\n        let listener = std::net::TcpListener::bind(\"127.0.0.1:0\")?;\n        listener.set_nonblocking(true)?;\n\n        Ok(Server {\n            listener,\n            tls_acceptor,\n            builder,\n        })\n    }\n\n    fn local_addr(&self) -> io::Result<SocketAddr> {\n        self.listener.local_addr()\n    }\n\n    async fn run(self, mut shutdown: oneshot::Receiver<()>) -> Result<(), BoxError> {\n        let mut join_set = JoinSet::new();\n        let listener = TcpListener::from_std(self.listener)?;\n\n        loop {\n            tokio::select! {\n                _ = &mut shutdown => {\n                    break;\n                }\n                accept = listener.accept() => {\n                    if let Ok((socket, _peer_addr)) = accept {\n                        let tls_acceptor = self.tls_acceptor.clone();\n                        let builder = self.builder.clone();\n                        join_set.spawn(async move {\n                            handle_connection(socket, tls_acceptor, builder).await;\n                        });\n                    }\n                }\n            }\n        }\n\n        while let Some(result) = join_set.join_next().await {\n            if let Err(e) = result {\n                eprintln!(\"connection task failed: {e}\");\n            }\n        }\n\n        // Tokio internally accepts TCP connections while the TCPListener is active;\n        // drop the listener to immediately refuse connections rather than letting\n        // them hang.\n        ::std::mem::drop(listener);\n        Ok(())\n    }\n}\n\npub struct Handle {\n    shutdown: oneshot::Sender<()>,\n    join: std::thread::JoinHandle<()>,\n}\n\nimpl Handle {\n    pub fn shutdown(self) {\n        let _ = self.shutdown.send(());\n        let _ = self.join.join();\n    }\n}\n\npub fn with_server<F>(tls: Tls, f: F) -> Result<(), BoxError>\nwhere\n    F: FnOnce(SocketAddr) -> Result<(), BoxError>,\n{\n    let server = Server::new(tls)?;\n    let addr = server.local_addr()?;\n\n    let (shutdown_tx, shutdown_rx) = oneshot::channel();\n\n    let join = std::thread::spawn(move || {\n        let rt = multi_thread_runtime();\n        rt.block_on(server.run(shutdown_rx))\n            .expect(\"Failed to run server with shutdown\");\n    });\n\n    std::thread::sleep(Duration::from_millis(100));\n\n    let handle = Handle {\n        shutdown: shutdown_tx,\n        join,\n    };\n\n    f(addr)?;\n    handle.shutdown();\n\n    std::thread::sleep(Duration::from_millis(100));\n    Ok(())\n}\n\nasync fn serve<S>(builder: Builder<TokioExecutor>, stream: S)\nwhere\n    S: AsyncRead + AsyncWrite + Unpin + Send + 'static,\n{\n    let _ = builder\n        .serve_connection(\n            TokioIo::new(stream),\n            service_fn(|req: http::Request<Incoming>| async {\n                let bytes = req\n                    .into_body()\n                    .collect()\n                    .await\n                    .map(Collected::<Bytes>::to_bytes);\n                let bytes = bytes.unwrap_or_else(|_| Bytes::new());\n                Ok::<_, Infallible>(http::Response::new(Full::new(bytes)))\n            }),\n        )\n        .await;\n}\n\nasync fn handle_connection(\n    socket: TcpStream,\n    tls_acceptor: Option<Arc<SslAcceptor>>,\n    builder: Builder<TokioExecutor>,\n) {\n    if let Some(acceptor) = tls_acceptor {\n        let ssl = Ssl::new(acceptor.context()).expect(\"failed to create Ssl\");\n        let mut stream = SslStream::new(ssl, socket).expect(\"failed to create SslStream\");\n\n        // The client (or its connection pool) may proactively close the connection,\n        // especially during benchmarks or when cleaning up idle connections.\n        // This can cause TLS handshake failures (e.g., ConnectionReset, ConnectionAborted).\n        // Such errors are expected and should be handled gracefully to avoid panicking\n        // and to ensure the server remains robust under load.\n        if Pin::new(&mut stream).accept().await.is_err() {\n            return;\n        }\n        serve(builder, stream).await;\n    } else {\n        serve(builder, socket).await;\n    }\n}\n"
  },
  {
    "path": "bench/support.rs",
    "content": "pub mod bench;\npub mod client;\npub mod server;\n\nuse std::fmt;\n\npub type BoxError = Box<dyn std::error::Error + Send + Sync>;\n\n#[allow(unused)]\n#[derive(Clone, Copy, Debug)]\npub enum HttpVersion {\n    Http1,\n    Http2,\n}\n\nimpl fmt::Display for HttpVersion {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        let value = match self {\n            HttpVersion::Http1 => \"h1\",\n            HttpVersion::Http2 => \"h2\",\n        };\n        f.write_str(value)\n    }\n}\n\n#[allow(unused)]\n#[derive(Clone, Copy, Debug)]\npub enum Tls {\n    Enabled,\n    Disabled,\n}\n\nimpl fmt::Display for Tls {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        let value = match self {\n            Tls::Enabled => \"https\",\n            Tls::Disabled => \"http\",\n        };\n        f.write_str(value)\n    }\n}\n\npub fn current_thread_runtime() -> tokio::runtime::Runtime {\n    tokio::runtime::Builder::new_current_thread()\n        .enable_all()\n        .build()\n        .expect(\"Failed to build current-thread runtime\")\n}\n\npub fn multi_thread_runtime() -> tokio::runtime::Runtime {\n    tokio::runtime::Builder::new_multi_thread()\n        .worker_threads(4)\n        .enable_all()\n        .build()\n        .expect(\"Failed to build multi-thread runtime\")\n}\n"
  },
  {
    "path": "cliff.toml",
    "content": "# git-cliff ~ configuration file\n# https://git-cliff.org/docs/configuration\n\n[remote.github]\nowner = \"0x676e67\"\nrepo = \"wreq\"\n\n[changelog]\n# A Tera template to be rendered for each release in the changelog.\n# See https://keats.github.io/tera/docs/#introduction\nbody = \"\"\"\n{%- macro remote_url() -%}\n  https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }}\n{%- endmacro -%}\n\n{% macro print_commit(commit) -%}\n    - {% if commit.scope %}*({{ commit.scope }})* {% endif %}\\\n        {% if commit.breaking %}[**breaking**] {% endif %}\\\n        {{ commit.message | upper_first }} - \\\n        ([{{ commit.id | truncate(length=7, end=\"\") }}]({{ self::remote_url() }}/commit/{{ commit.id }}))\\\n{% endmacro -%}\n\n{% if version %}\\\n    {% if previous.version %}\\\n        ## [{{ version | trim_start_matches(pat=\"v\") }}]\\\n          ({{ self::remote_url() }}/compare/{{ previous.version }}..{{ version }}) - {{ timestamp | date(format=\"%Y-%m-%d\") }}\n    {% else %}\\\n        ## [{{ version | trim_start_matches(pat=\"v\") }}] - {{ timestamp | date(format=\"%Y-%m-%d\") }}\n    {% endif %}\\\n{% else %}\\\n    ## [unreleased]\n{% endif %}\\\n\n{% for group, commits in commits | group_by(attribute=\"group\") %}\n    ### {{ group | striptags | trim | upper_first }}\n    {% for commit in commits\n    | filter(attribute=\"scope\")\n    | sort(attribute=\"scope\") %}\n        {{ self::print_commit(commit=commit) }}\n    {%- endfor %}\n    {% for commit in commits %}\n        {%- if not commit.scope -%}\n            {{ self::print_commit(commit=commit) }}\n        {% endif -%}\n    {% endfor -%}\n{% endfor -%}\n{%- if github -%}\n{% if github.contributors | filter(attribute=\"is_first_time\", value=true) | length != 0 %}\n  ## New Contributors ❤️\n{% endif %}\\\n{% for contributor in github.contributors | filter(attribute=\"is_first_time\", value=true) %}\n  * @{{ contributor.username }} made their first contribution\n    {%- if contributor.pr_number %} in \\\n      [#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \\\n    {%- endif %}\n{%- endfor -%}\n{%- endif %}\n\n\n\"\"\"\n# A Tera template to be rendered as the changelog's footer.\n# See https://keats.github.io/tera/docs/#introduction\nfooter = \"\"\"\n<!-- generated by git-cliff -->\n\"\"\"\n# Remove leading and trailing whitespaces from the changelog's body.\ntrim = true\n# An array of regex based postprocessors to modify the changelog.\npostprocessors = [\n  # Replace the placeholder `<REPO>` with a URL.\n  { pattern = '<REPO>', replace = \"https://github.com/0x676e67/wreq\" }, # replace repository URL\n]\n\n[git]\n# Parse commits according to the conventional commits specification.\n# See https://www.conventionalcommits.org\nconventional_commits = true\n# Exclude commits that do not match the conventional commits specification.\nfilter_unconventional = true\n# Split commits on newlines, treating each line as an individual commit.\nsplit_commits = false\n# An array of regex based parsers to modify commit messages prior to further processing.\ncommit_preprocessors = [\n  # Replace issue numbers with link templates to be updated in `changelog.postprocessors`.\n  { pattern = '\\((\\w+\\s)?#([0-9]+)\\)', replace = \"([#${2}](<REPO>/issues/${2}))\" },\n]\n# An array of regex based parsers for extracting data from the commit message.\n# Assigns commits to groups.\n# Optionally sets the commit's scope and can decide to exclude commits from further processing.\ncommit_parsers = [\n  { message = \"^feat\", group = \"<!-- 0 -->Features\" },\n  { message = \"^fix\", group = \"<!-- 1 -->Bug Fixes\" },\n  { message = \"^doc\", group = \"<!-- 3 -->Documentation\" },\n  { message = \"^perf\", group = \"<!-- 4 -->Performance\" },\n  { message = \"^refactor\\\\(clippy\\\\)\", skip = true },\n  { message = \"^refactor\", group = \"<!-- 2 -->Refactor\" },\n  { message = \"^style\", group = \"<!-- 5 -->Styling\" },\n  { message = \"^test\", group = \"<!-- 6 -->Testing\" },\n  { message = \"^chore\\\\(release\\\\): prepare for\", skip = true },\n  { message = \"^chore\\\\(deps.*\\\\)\", skip = true },\n  { message = \"^chore\\\\(pr\\\\)\", skip = true },\n  { message = \"^chore\\\\(pull\\\\)\", skip = true },\n  { message = \"^chore\\\\(npm\\\\).*yarn\\\\.lock\", skip = true },\n  { message = \"^chore|^ci\", group = \"<!-- 7 -->Miscellaneous Tasks\" },\n  { body = \".*security\", group = \"<!-- 8 -->Security\" },\n  { message = \"^revert\", group = \"<!-- 9 -->Revert\" },\n]\n# Prevent commits that are breaking from being excluded by commit parsers.\nprotect_breaking_commits = false\n# Exclude commits that are not matched by any commit parser.\nfilter_commits = false\n# Regex to select git tags that represent releases.\ntag_pattern = \"v[0-9].*\"\n# Regex to select git tags that do not represent proper releases.\n# Takes precedence over `tag_pattern`.\n# Changes belonging to these releases will be included in the next release.\nskip_tags = \"\"\n# Regex to exclude git tags after applying the tag_pattern.\nignore_tags = \"v2.1.0|v2.1.1\"\n# Order releases topologically instead of chronologically.\ntopo_order = false\n# Order of commits in each group/release within the changelog.\n# Allowed values: newest, oldest\nsort_commits = \"newest\"\n"
  },
  {
    "path": "examples/cert_store.rs",
    "content": "use std::time::Duration;\n\nuse wreq::{\n    Client,\n    tls::{TlsInfo, trust::CertStore},\n};\n\n/// Certificate Store Example\n///\n/// In most cases, you don't need to manually configure certificate stores. wreq automatically\n/// uses appropriate default certificates:\n/// - With `webpki-roots` feature enabled: Uses Mozilla's maintained root certificate collection\n/// - Without this feature: Uses system default certificate store paths\n///\n/// Manual certificate store configuration is only needed in the following special cases:\n///\n/// ## Scenarios requiring custom certificate store:\n///\n/// ### 1. Self-signed Certificates\n/// - Connect to internal services using self-signed certificates\n/// - Test servers in development environments\n///\n/// ### 2. Enterprise Internal CA\n/// - Add root certificates from enterprise internal certificate authorities\n/// - Access HTTPS services on corporate intranets\n///\n/// ### 3. Certificate Updates and Management\n/// - Dynamically update certificates in the certificate store\n/// - Remove revoked or expired certificates\n///\n/// ### 4. Compliance Requirements\n/// - Special compliance requirements for certain industries or regions\n/// - Need to use specific certificate collections\n///\n/// ### 5. Performance Optimization\n/// - Reduce certificate store size to improve TLS handshake performance\n/// - Include only necessary root certificates\n#[tokio::main]\nasync fn main() -> wreq::Result<()> {\n    // Create a client with a custom certificate store using webpki-roots\n    let client = Client::builder()\n        .tls_cert_store(CertStore::from_der_certs(\n            webpki_root_certs::TLS_SERVER_ROOT_CERTS,\n        )?)\n        .build()?;\n\n    // Use the API you're already familiar with\n    client.get(\"https://www.google.com\").send().await?;\n\n    // Self-signed certificate Client\n    // Skip certificate verification for self-signed certificates\n    let client = Client::builder()\n        .tls_info(true)\n        .tls_cert_verification(false)\n        .build()?;\n\n    // Use the API you're already familiar with\n    let resp = client.get(\"https://self-signed.badssl.com/\").send().await?;\n    if let Some(tls_info) = resp.extensions().get::<TlsInfo>() {\n        if let Some(peer_cert_der) = tls_info.peer_certificate() {\n            // Create self-signed certificate Store\n            let self_signed_store = CertStore::from_der_certs(&[peer_cert_der])?;\n\n            // Create a client with self-signed certificate store\n            let client = Client::builder()\n                .tls_cert_store(self_signed_store)\n                .connect_timeout(Duration::from_secs(10))\n                .build()?;\n\n            // Use the API you're already familiar with\n            let resp = client.get(\"https://self-signed.badssl.com/\").send().await?;\n            println!(\"{}\", resp.text().await?);\n        }\n    }\n\n    Ok(())\n}\n"
  },
  {
    "path": "examples/connect_via_lower_priority_tokio_runtime.rs",
    "content": "// This example demonstrates how to delegate the connect calls, which contain TLS handshakes,\n// to a secondary tokio runtime of lower OS thread priority using a custom tower layer.\n// This helps to ensure that long-running futures during handshake crypto operations don't block\n// other I/O futures.\n//\n// This does introduce overhead of additional threads, channels, extra vtables, etc,\n// so it is best suited to services with large numbers of incoming connections or that\n// are otherwise very sensitive to any blocking futures.  Or, you might want fewer threads\n// and/or to use the current_thread runtime.\n//\n// This is using the `tokio` runtime and certain other dependencies:\n//\n// `tokio = { version = \"1\", features = [\"full\"] }`\n// `libc = \"0\"`\n// `pin-project-lite = \"0.2\"`\n// `tower = { version = \"0.5\", default-features = false}`\n\n#[tokio::main]\nasync fn main() -> wreq::Result<()> {\n    tracing_subscriber::fmt()\n        .with_max_level(tracing::Level::TRACE)\n        .init();\n    background_threadpool::init_background_runtime();\n    tokio::time::sleep(std::time::Duration::from_millis(10)).await;\n\n    let client = wreq::Client::builder()\n        .connector_layer(background_threadpool::BackgroundProcessorLayer::new())\n        .build()\n        .expect(\"should be able to build wreq client\");\n\n    let url = if let Some(url) = std::env::args().nth(1) {\n        url\n    } else {\n        println!(\"No CLI URL provided, using default.\");\n        \"https://hyper.rs\".into()\n    };\n\n    eprintln!(\"Fetching {url:?}...\");\n\n    let res = client.get(url).send().await?;\n\n    eprintln!(\"Response: {:?} {}\", res.version(), res.status());\n    eprintln!(\"Headers: {:#?}\\n\", res.headers());\n\n    let body = res.text().await?;\n\n    println!(\"{body}\");\n\n    Ok(())\n}\n\n// separating out for convenience to avoid a million\nmod background_threadpool {\n    use std::{\n        future::Future,\n        pin::Pin,\n        sync::OnceLock,\n        task::{Context, Poll},\n    };\n\n    use futures_util::TryFutureExt;\n    use pin_project_lite::pin_project;\n    use tokio::{runtime::Handle, select, sync::mpsc::error::TrySendError};\n    use tower::{BoxError, Layer, Service};\n\n    static CPU_HEAVY_THREAD_POOL: OnceLock<\n        tokio::sync::mpsc::Sender<Pin<Box<dyn Future<Output = ()> + Send + 'static>>>,\n    > = OnceLock::new();\n\n    pub(crate) fn init_background_runtime() {\n        std::thread::Builder::new()\n            .name(\"cpu-heavy-background-threadpool\".to_string())\n            .spawn(move || {\n                let rt = tokio::runtime::Builder::new_multi_thread()\n                    .thread_name(\"cpu-heavy-background-pool-thread\")\n                    .worker_threads(std::thread::available_parallelism().unwrap().get())\n                    // ref: https://github.com/tokio-rs/tokio/issues/4941\n                    // consider uncommenting if seeing heavy task contention\n                    // .disable_lifo_slot()\n                    .on_thread_start(move || {\n                        #[cfg(target_os = \"linux\")]\n                        unsafe {\n                            // Increase thread pool thread niceness, so they are lower priority\n                            // than the foreground executor and don't interfere with I/O tasks\n                            {\n                                *libc::__errno_location() = 0;\n                                if libc::nice(10) == -1 && *libc::__errno_location() != 0 {\n                                    let error = std::io::Error::last_os_error();\n                                    tracing::error!(\"failed to set threadpool niceness: {error}\");\n                                }\n                            }\n                        }\n                    })\n                    .enable_all()\n                    .build()\n                    .unwrap_or_else(|e| panic!(\"cpu heavy runtime failed_to_initialize: {e}\"));\n                rt.block_on(async {\n                    tracing::debug!(\"starting background cpu-heavy work\");\n                    process_cpu_work().await;\n                });\n            })\n            .unwrap_or_else(|e| panic!(\"cpu heavy thread failed_to_initialize: {e}\"));\n    }\n\n    async fn process_cpu_work() {\n        // we only use this channel for routing work, it should move pretty quick, it can be small\n        let (tx, mut rx) = tokio::sync::mpsc::channel(10);\n        // share the handle to the background channel globally\n        CPU_HEAVY_THREAD_POOL.set(tx).unwrap();\n\n        while let Some(work) = rx.recv().await {\n            tokio::task::spawn(work);\n        }\n    }\n\n    // retrieve the sender to the background channel, and send the future over to it for execution\n    fn send_to_background_runtime(future: impl Future<Output = ()> + Send + 'static) {\n        let tx = CPU_HEAVY_THREAD_POOL.get().expect(\n            \"start up the secondary tokio runtime before sending to `CPU_HEAVY_THREAD_POOL`\",\n        );\n\n        match tx.try_send(Box::pin(future)) {\n            Ok(_) => (),\n            Err(TrySendError::Closed(_)) => {\n                panic!(\"background cpu heavy runtime channel is closed\")\n            }\n            Err(TrySendError::Full(msg)) => {\n                tracing::warn!(\n                    \"background cpu heavy runtime channel is full, task spawning loop delayed\"\n                );\n                let tx = tx.clone();\n                Handle::current().spawn(async move {\n                    tx.send(msg)\n                        .await\n                        .expect(\"background cpu heavy runtime channel is closed\")\n                });\n            }\n        }\n    }\n\n    // This tower layer injects futures with a oneshot channel, and then sends them to the\n    // background runtime for processing. We don't use the Buffer service because that is\n    // intended to process sequentially on a single task, whereas we want to spawn a new task\n    // per call.\n    #[derive(Copy, Clone)]\n    pub struct BackgroundProcessorLayer {}\n    impl BackgroundProcessorLayer {\n        pub fn new() -> Self {\n            Self {}\n        }\n    }\n    impl<S> Layer<S> for BackgroundProcessorLayer {\n        type Service = BackgroundProcessor<S>;\n        fn layer(&self, service: S) -> Self::Service {\n            BackgroundProcessor::new(service)\n        }\n    }\n\n    impl std::fmt::Debug for BackgroundProcessorLayer {\n        fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {\n            f.debug_struct(\"BackgroundProcessorLayer\").finish()\n        }\n    }\n\n    // This tower service injects futures with a oneshot channel, and then sends them to the\n    // background runtime for processing.\n    #[derive(Debug, Clone)]\n    pub struct BackgroundProcessor<S> {\n        inner: S,\n    }\n\n    impl<S> BackgroundProcessor<S> {\n        pub fn new(inner: S) -> Self {\n            BackgroundProcessor { inner }\n        }\n    }\n\n    impl<S, Request> Service<Request> for BackgroundProcessor<S>\n    where\n        S: Service<Request>,\n        S::Response: Send + 'static,\n        S::Error: Into<BoxError> + Send,\n        S::Future: Send + 'static,\n    {\n        type Response = S::Response;\n\n        type Error = BoxError;\n\n        type Future = BackgroundResponseFuture<S::Response>;\n\n        fn poll_ready(\n            &mut self,\n            cx: &mut std::task::Context<'_>,\n        ) -> std::task::Poll<Result<(), Self::Error>> {\n            match self.inner.poll_ready(cx) {\n                Poll::Pending => Poll::Pending,\n                Poll::Ready(r) => Poll::Ready(r.map_err(Into::into)),\n            }\n        }\n\n        fn call(&mut self, req: Request) -> Self::Future {\n            let response = self.inner.call(req);\n\n            // wrap our inner service's future with a future that writes to this oneshot channel\n            let (mut tx, rx) = tokio::sync::oneshot::channel();\n            let future = async move {\n                select!(\n                    _ = tx.closed() => {\n                        // receiver already dropped, don't need to do anything\n                    }\n                    result = response.map_err(Into::<BoxError>::into) => {\n                        // if this fails, the receiver already dropped, so we don't need to do anything\n                        let _ = tx.send(result);\n                    }\n                )\n            };\n            // send the wrapped future to the background\n            send_to_background_runtime(future);\n\n            BackgroundResponseFuture::new(rx)\n        }\n    }\n\n    // `BackgroundProcessor` response future\n    pin_project! {\n        #[derive(Debug)]\n        pub struct BackgroundResponseFuture<S> {\n            #[pin]\n            rx: tokio::sync::oneshot::Receiver<Result<S, BoxError>>,\n        }\n    }\n\n    impl<S> BackgroundResponseFuture<S> {\n        pub(crate) fn new(rx: tokio::sync::oneshot::Receiver<Result<S, BoxError>>) -> Self {\n            BackgroundResponseFuture { rx }\n        }\n    }\n\n    impl<S> Future for BackgroundResponseFuture<S>\n    where\n        S: Send + 'static,\n    {\n        type Output = Result<S, BoxError>;\n\n        fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {\n            let this = self.project();\n\n            // now poll on the receiver end of the oneshot to get the result\n            match this.rx.poll(cx) {\n                Poll::Ready(v) => match v {\n                    Ok(v) => Poll::Ready(v),\n                    Err(err) => Poll::Ready(Err(Box::new(err) as BoxError)),\n                },\n                Poll::Pending => Poll::Pending,\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "examples/emulate.rs",
    "content": "use wreq::{\n    Client, Emulation,\n    header::{self, HeaderMap, HeaderValue, OrigHeaderMap},\n    http2::{Http2Options, PseudoId, PseudoOrder},\n    tls::{AlpnProtocol, TlsOptions, TlsVersion},\n};\n\nmacro_rules! join {\n    ($sep:expr, $first:expr $(, $rest:expr)*) => {\n        concat!($first $(, $sep, $rest)*)\n    };\n}\n\n#[tokio::main]\nasync fn main() -> wreq::Result<()> {\n    tracing_subscriber::fmt()\n        .with_max_level(tracing::Level::TRACE)\n        .init();\n\n    //  TLS options config\n    let tls = TlsOptions::builder()\n        .enable_ocsp_stapling(true)\n        .curves_list(join!(\":\", \"X25519\", \"P-256\", \"P-384\"))\n        .cipher_list(join!(\n            \":\",\n            \"TLS_AES_128_GCM_SHA256\",\n            \"TLS_AES_256_GCM_SHA384\",\n            \"TLS_CHACHA20_POLY1305_SHA256\",\n            \"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n            \"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n            \"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\",\n            \"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n            \"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n            \"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\"\n        ))\n        .sigalgs_list(join!(\n            \":\",\n            \"ecdsa_secp256r1_sha256\",\n            \"rsa_pss_rsae_sha256\",\n            \"rsa_pkcs1_sha256\",\n            \"ecdsa_secp384r1_sha384\",\n            \"rsa_pss_rsae_sha384\",\n            \"rsa_pkcs1_sha384\",\n            \"rsa_pss_rsae_sha512\",\n            \"rsa_pkcs1_sha512\",\n            \"rsa_pkcs1_sha1\"\n        ))\n        .alpn_protocols([AlpnProtocol::HTTP2, AlpnProtocol::HTTP1])\n        .min_tls_version(TlsVersion::TLS_1_2)\n        .max_tls_version(TlsVersion::TLS_1_3)\n        .build();\n\n    // HTTP/2 options config\n    let http2 = Http2Options::builder()\n        .initial_stream_id(3)\n        .initial_window_size(16777216)\n        .initial_connection_window_size(16711681 + 65535)\n        .headers_pseudo_order(\n            PseudoOrder::builder()\n                .extend([\n                    PseudoId::Method,\n                    PseudoId::Path,\n                    PseudoId::Authority,\n                    PseudoId::Scheme,\n                ])\n                .build(),\n        )\n        .build();\n\n    // Default headers\n    let headers = {\n        let mut headers = HeaderMap::new();\n        headers.insert(header::USER_AGENT, HeaderValue::from_static(\"TwitterAndroid/10.89.0-release.0 (310890000-r-0) G011A/9 (google;G011A;google;G011A;0;;1;2016)\"));\n        headers.insert(header::ACCEPT_LANGUAGE, HeaderValue::from_static(\"en-US\"));\n        headers.insert(\n            header::ACCEPT_ENCODING,\n            HeaderValue::from_static(\"br, gzip, deflate\"),\n        );\n        headers.insert(header::ACCEPT, HeaderValue::from_static(\"application/json\"));\n        headers.insert(header::CACHE_CONTROL, HeaderValue::from_static(\"no-store\"));\n        headers.insert(\n            header::COOKIE,\n            HeaderValue::from_static(\"ct0=YOUR_CT0_VALUE;\"),\n        );\n        headers\n    };\n\n    // The headers keep the original case and order\n    let orig_headers = {\n        let mut orig_headers = OrigHeaderMap::new();\n        orig_headers.insert(\"cookie\");\n        orig_headers.insert(\"content-length\");\n        orig_headers.insert(\"User-Agent\");\n        orig_headers.insert(\"Accept-Language\");\n        orig_headers.insert(\"Accept-Encoding\");\n        orig_headers\n    };\n\n    // This provider encapsulates TLS, HTTP/1, HTTP/2, default headers, and original headers\n    let emulation = Emulation::builder()\n        .tls_options(tls)\n        .http2_options(http2)\n        .headers(headers)\n        .orig_headers(orig_headers)\n        .build(Default::default());\n\n    // Build a client with emulation config\n    let client = Client::builder()\n        .emulation(emulation)\n        .tls_cert_verification(false)\n        .build()?;\n\n    // Use the API you're already familiar with\n    let resp = client.get(\"https://tls.browserleaks.com/\").send().await?;\n    println!(\"{}\", resp.text().await?);\n\n    Ok(())\n}\n"
  },
  {
    "path": "examples/form.rs",
    "content": "// Short example of a POST request with form data.\n//\n// This is using the `tokio` runtime. You'll need the following dependency:\n//\n// `tokio = { version = \"1\", features = [\"full\"] }`\n#[tokio::main]\nasync fn main() {\n    let response = wreq::post(\"http://www.baidu.com\")\n        .form(&[(\"one\", \"1\")])\n        .send()\n        .await\n        .expect(\"send\");\n    println!(\"Response status {}\", response.status());\n}\n"
  },
  {
    "path": "examples/http1_websocket.rs",
    "content": "use futures_util::{SinkExt, StreamExt, TryStreamExt};\nuse wreq::{header, ws::message::Message};\n\n#[tokio::main]\nasync fn main() -> wreq::Result<()> {\n    // Use the API you're already familiar with\n    let resp = wreq::websocket(\"wss://echo.websocket.org\")\n        .header(header::USER_AGENT, env!(\"CARGO_PKG_NAME\"))\n        .read_buffer_size(1024 * 1024)\n        .send()\n        .await?;\n\n    assert_eq!(resp.version(), http::Version::HTTP_11);\n\n    let websocket = resp.into_websocket().await?;\n    if let Some(protocol) = websocket.protocol() {\n        println!(\"WebSocket subprotocol: {:?}\", protocol);\n    }\n\n    let (mut tx, mut rx) = websocket.split();\n\n    tokio::spawn(async move {\n        for i in 1..11 {\n            if let Err(err) = tx.send(Message::text(format!(\"Hello, World! {i}\"))).await {\n                eprintln!(\"failed to send message: {err}\");\n            }\n        }\n    });\n\n    while let Some(message) = rx.try_next().await? {\n        if let Message::Text(text) = message {\n            println!(\"received: {text}\");\n        }\n    }\n\n    Ok(())\n}\n"
  },
  {
    "path": "examples/http2_websocket.rs",
    "content": "//! Run websocket server\n//!\n//! ```not_rust\n//! git clone https://github.com/tokio-rs/axum && cd axum\n//! cargo run -p example-websockets-http2\n//! ```\n\nuse futures_util::{SinkExt, StreamExt, TryStreamExt};\nuse wreq::{Version, header, ws::message::Message};\n\n#[tokio::main]\nasync fn main() -> wreq::Result<()> {\n    // Use the API you're already familiar with\n    let resp = wreq::websocket(\"wss://127.0.0.1:3000/ws\")\n        .version(Version::HTTP_2)\n        .header(header::USER_AGENT, env!(\"CARGO_PKG_NAME\"))\n        .read_buffer_size(1024 * 1024)\n        .send()\n        .await?;\n\n    assert_eq!(resp.version(), http::Version::HTTP_2);\n\n    let websocket = resp.into_websocket().await?;\n    if let Some(protocol) = websocket.protocol() {\n        println!(\"WebSocket subprotocol: {:?}\", protocol);\n    }\n\n    let (mut tx, mut rx) = websocket.split();\n\n    tokio::spawn(async move {\n        for i in 1..11 {\n            if let Err(err) = tx.send(Message::text(format!(\"Hello, World! #{i}\"))).await {\n                eprintln!(\"failed to send message: {err}\");\n            }\n        }\n    });\n\n    while let Some(message) = rx.try_next().await? {\n        if let Message::Text(text) = message {\n            println!(\"received: {text}\");\n        }\n    }\n\n    Ok(())\n}\n"
  },
  {
    "path": "examples/json_dynamic.rs",
    "content": "//! This example illustrates the way to send and receive arbitrary JSON.\n//!\n//! This is useful for some ad-hoc experiments and situations when you don't\n//! really care about the structure of the JSON and just need to display it or\n//! process it at runtime.\n\n// This is using the `tokio` runtime. You'll need the following dependency:\n//\n// `tokio = { version = \"1\", features = [\"full\"] }`\n#[tokio::main]\nasync fn main() -> wreq::Result<()> {\n    let echo_json: serde_json::Value = wreq::post(\"https://jsonplaceholder.typicode.com/posts\")\n        .json(&serde_json::json!({\n            \"title\": \"wreq.rs\",\n            \"body\": \"https://docs.rs/wreq\",\n            \"userId\": 1\n        }))\n        .send()\n        .await?\n        .json()\n        .await?;\n\n    println!(\"{echo_json:#?}\");\n    // Object(\n    //     {\n    //         \"body\": String(\n    //             \"https://docs.rs/wreq\"\n    //         ),\n    //         \"id\": Number(\n    //             101\n    //         ),\n    //         \"title\": String(\n    //             \"wreq.rs\"\n    //         ),\n    //         \"userId\": Number(\n    //             1\n    //         )\n    //     }\n    // )\n    Ok(())\n}\n"
  },
  {
    "path": "examples/json_typed.rs",
    "content": "//! This example illustrates the way to send and receive statically typed JSON.\n//!\n//! In contrast to the arbitrary JSON example, this brings up the full power of\n//! Rust compile-time type system guaranties though it requires a little bit\n//! more code.\n\n// These require the `serde` dependency.\nuse serde::{Deserialize, Serialize};\n\n#[derive(Debug, Serialize, Deserialize)]\nstruct Post {\n    id: Option<i32>,\n    title: String,\n    body: String,\n    #[serde(rename = \"userId\")]\n    user_id: i32,\n}\n\n// This is using the `tokio` runtime. You'll need the following dependency:\n//\n// `tokio = { version = \"1\", features = [\"full\"] }`\n#[tokio::main]\nasync fn main() -> wreq::Result<()> {\n    let new_post = Post {\n        id: None,\n        title: \"wreq.rs\".into(),\n        body: \"https://docs.rs/wreq\".into(),\n        user_id: 1,\n    };\n    let new_post: Post = wreq::post(\"https://jsonplaceholder.typicode.com/posts\")\n        .json(&new_post)\n        .send()\n        .await?\n        .json()\n        .await?;\n\n    println!(\"{new_post:#?}\");\n    // Post {\n    //     id: Some(\n    //         101\n    //     ),\n    //     title: \"wreq.rs\",\n    //     body: \"https://docs.rs/wreq\",\n    //     user_id: 1\n    // }\n    Ok(())\n}\n"
  },
  {
    "path": "examples/keylog.rs",
    "content": "use wreq::tls::keylog::KeyLog;\n\n#[tokio::main]\nasync fn main() -> wreq::Result<()> {\n    // Build a client\n    let client = wreq::Client::builder()\n        .tls_keylog(KeyLog::from_file(\"keylog.txt\"))\n        .tls_cert_verification(false)\n        .build()?;\n\n    // Use the API you're already familiar with\n    let resp = client.get(\"https://yande.re/post.json\").send().await?;\n    println!(\"{}\", resp.text().await?);\n\n    Ok(())\n}\n"
  },
  {
    "path": "examples/request_with_emulate.rs",
    "content": "use wreq::{\n    Emulation,\n    header::{self, HeaderMap, HeaderValue, OrigHeaderMap},\n    http2::{Http2Options, PseudoId, PseudoOrder},\n    tls::{AlpnProtocol, TlsOptions, TlsVersion},\n};\n\nmacro_rules! join {\n    ($sep:expr, $first:expr $(, $rest:expr)*) => {\n        concat!($first $(, $sep, $rest)*)\n    };\n}\n\n#[tokio::main]\nasync fn main() -> wreq::Result<()> {\n    tracing_subscriber::fmt()\n        .with_max_level(tracing::Level::TRACE)\n        .init();\n\n    //  TLS options config\n    let tls = TlsOptions::builder()\n        .enable_ocsp_stapling(true)\n        .curves_list(join!(\":\", \"X25519\", \"P-256\", \"P-384\"))\n        .cipher_list(join!(\n            \":\",\n            \"TLS_AES_128_GCM_SHA256\",\n            \"TLS_AES_256_GCM_SHA384\",\n            \"TLS_CHACHA20_POLY1305_SHA256\",\n            \"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n            \"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n            \"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\",\n            \"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n            \"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n            \"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\"\n        ))\n        .sigalgs_list(join!(\n            \":\",\n            \"ecdsa_secp256r1_sha256\",\n            \"rsa_pss_rsae_sha256\",\n            \"rsa_pkcs1_sha256\",\n            \"ecdsa_secp384r1_sha384\",\n            \"rsa_pss_rsae_sha384\",\n            \"rsa_pkcs1_sha384\",\n            \"rsa_pss_rsae_sha512\",\n            \"rsa_pkcs1_sha512\",\n            \"rsa_pkcs1_sha1\"\n        ))\n        .alpn_protocols([AlpnProtocol::HTTP2, AlpnProtocol::HTTP1])\n        .min_tls_version(TlsVersion::TLS_1_2)\n        .max_tls_version(TlsVersion::TLS_1_3)\n        .build();\n\n    // HTTP/2 options config\n    let http2 = Http2Options::builder()\n        .initial_stream_id(3)\n        .initial_window_size(16777216)\n        .initial_connection_window_size(16711681 + 65535)\n        .headers_pseudo_order(\n            PseudoOrder::builder()\n                .extend([\n                    PseudoId::Method,\n                    PseudoId::Path,\n                    PseudoId::Authority,\n                    PseudoId::Scheme,\n                ])\n                .build(),\n        )\n        .build();\n\n    // Default headers\n    let headers = {\n        let mut headers = HeaderMap::new();\n        headers.insert(header::USER_AGENT, HeaderValue::from_static(\"TwitterAndroid/10.89.0-release.0 (310890000-r-0) G011A/9 (google;G011A;google;G011A;0;;1;2016)\"));\n        headers.insert(header::ACCEPT_LANGUAGE, HeaderValue::from_static(\"en-US\"));\n        headers.insert(\n            header::ACCEPT_ENCODING,\n            HeaderValue::from_static(\"br, gzip, deflate\"),\n        );\n        headers.insert(header::ACCEPT, HeaderValue::from_static(\"application/json\"));\n        headers.insert(header::CACHE_CONTROL, HeaderValue::from_static(\"no-store\"));\n        headers.insert(\n            header::COOKIE,\n            HeaderValue::from_static(\"ct0=YOUR_CT0_VALUE;\"),\n        );\n        headers\n    };\n\n    // The headers keep the original case and order\n    let orig_headers = {\n        let mut orig_headers = OrigHeaderMap::new();\n        orig_headers.insert(\"cookie\");\n        orig_headers.insert(\"content-length\");\n        orig_headers.insert(\"User-Agent\");\n        orig_headers.insert(\"Accept-Language\");\n        orig_headers.insert(\"Accept-Encoding\");\n        orig_headers\n    };\n\n    // This provider encapsulates TLS, HTTP/1, HTTP/2, default headers, and original headers\n    let emulation = Emulation::builder()\n        .tls_options(tls)\n        .http2_options(http2)\n        .orig_headers(orig_headers)\n        .headers(headers)\n        .build(Default::default());\n\n    // Use the API you're already familiar with\n    let resp = wreq::get(\"https://tls.peet.ws/api/all\")\n        .emulation(emulation)\n        .send()\n        .await?;\n    println!(\"{}\", resp.text().await?);\n\n    Ok(())\n}\n"
  },
  {
    "path": "examples/request_with_interface.rs",
    "content": "#[cfg(any(\n    target_os = \"android\",\n    target_os = \"fuchsia\",\n    target_os = \"illumos\",\n    target_os = \"ios\",\n    target_os = \"linux\",\n    target_os = \"macos\",\n    target_os = \"solaris\",\n    target_os = \"tvos\",\n    target_os = \"visionos\",\n    target_os = \"watchos\",\n))]\n#[tokio::main]\nasync fn main() -> wreq::Result<()> {\n    // Use the API you're already familiar with\n    let resp = wreq::get(\"https://api.ip.sb/ip\")\n        .interface(\"utun4\")\n        .send()\n        .await?;\n    println!(\"{}\", resp.text().await?);\n\n    Ok(())\n}\n\n#[cfg(not(any(\n    target_os = \"android\",\n    target_os = \"fuchsia\",\n    target_os = \"illumos\",\n    target_os = \"ios\",\n    target_os = \"linux\",\n    target_os = \"macos\",\n    target_os = \"solaris\",\n    target_os = \"tvos\",\n    target_os = \"visionos\",\n    target_os = \"watchos\",\n)))]\nfn main() {}\n"
  },
  {
    "path": "examples/request_with_local_address.rs",
    "content": "use std::net::IpAddr;\n\nuse wreq::redirect::Policy;\n\n#[tokio::main]\nasync fn main() -> wreq::Result<()> {\n    // Use the API you're already familiar with\n    let resp = wreq::get(\"http://www.baidu.com\")\n        .redirect(Policy::default())\n        .local_address(IpAddr::from([192, 168, 1, 226]))\n        .send()\n        .await?;\n    println!(\"{}\", resp.text().await?);\n\n    Ok(())\n}\n"
  },
  {
    "path": "examples/request_with_proxy.rs",
    "content": "use wreq::Proxy;\n\n#[tokio::main]\nasync fn main() -> wreq::Result<()> {\n    // Use the API you're already familiar with\n    let resp = wreq::get(\"https://api.ip.sb/ip\")\n        .proxy(Proxy::all(\"socks5h://localhost:6153\")?)\n        .send()\n        .await?;\n    println!(\"{}\", resp.text().await?);\n\n    Ok(())\n}\n"
  },
  {
    "path": "examples/request_with_redirect.rs",
    "content": "use wreq::redirect::Policy;\n\n#[tokio::main]\nasync fn main() -> wreq::Result<()> {\n    // Use the API you're already familiar with\n    let resp = wreq::get(\"https://google.com/\")\n        .redirect(Policy::custom(|attempt| {\n            // we can inspect the redirect attempt\n            println!(\n                \"Redirecting (status: {}) to {:?} and headers: {:#?}\",\n                attempt.status, attempt.uri, attempt.headers\n            );\n\n            // we can follow redirects as normal\n            attempt.follow()\n        }))\n        .send()\n        .await?;\n    println!(\"{}\", resp.text().await?);\n\n    Ok(())\n}\n"
  },
  {
    "path": "examples/request_with_version.rs",
    "content": "use http::Version;\n\n#[tokio::main]\nasync fn main() -> wreq::Result<()> {\n    // Use the API you're already familiar with\n    let resp = wreq::get(\"https://www.google.com\")\n        .version(Version::HTTP_11)\n        .send()\n        .await?;\n\n    assert_eq!(resp.version(), Version::HTTP_11);\n    println!(\"{}\", resp.text().await?);\n\n    Ok(())\n}\n"
  },
  {
    "path": "examples/tor_socks.rs",
    "content": "#![deny(warnings)]\n\n// This is using the `tokio` runtime. You'll need the following dependency:\n//\n// `tokio = { version = \"1\", features = [\"full\"] }`\n#[tokio::main]\nasync fn main() -> wreq::Result<()> {\n    // Make sure you are running tor and this is your socks port\n    let proxy = wreq::Proxy::all(\"socks5h://127.0.0.1:9050\").expect(\"tor proxy should be there\");\n    let client = wreq::Client::builder()\n        .proxy(proxy)\n        .build()\n        .expect(\"should be able to build wreq client\");\n\n    let res = client.get(\"https://check.torproject.org\").send().await?;\n    println!(\"Status: {}\", res.status());\n\n    let text = res.text().await?;\n    let is_tor = text.contains(\"Congratulations. This emulation is configured to use Tor.\");\n    println!(\"Is Tor: {is_tor}\");\n    assert!(is_tor);\n\n    Ok(())\n}\n"
  },
  {
    "path": "examples/unix_socket.rs",
    "content": "#[cfg(unix)]\n#[tokio::main]\nasync fn main() -> wreq::Result<()> {\n    // Create a Unix socket proxy\n    let proxy = wreq::Proxy::unix(\"/var/run/docker.sock\")?;\n\n    // Build a client\n    let client = wreq::Client::builder()\n        // Specify the Unix socket path\n        .proxy(proxy.clone())\n        .timeout(std::time::Duration::from_secs(10))\n        .build()?;\n\n    // Use the API you're already familiar with\n    let resp = client\n        .get(\"http://localhost/v1.41/containers/json\")\n        .send()\n        .await?;\n    println!(\"{}\", resp.text().await?);\n\n    // Or specify the Unix socket directly in the request\n    let resp = client\n        .get(\"http://localhost/v1.41/containers/json\")\n        .proxy(proxy)\n        .send()\n        .await?;\n    println!(\"{}\", resp.text().await?);\n\n    Ok(())\n}\n\n#[cfg(not(unix))]\nfn main() {}\n"
  },
  {
    "path": "rustfmt.toml",
    "content": "group_imports = \"StdExternalCrate\"\nimports_granularity = \"Crate\"\nreorder_imports = true\nwrap_comments = true\ncomment_width = 100"
  },
  {
    "path": "src/client/body.rs",
    "content": "use std::{\n    pin::Pin,\n    task::{Context, Poll, ready},\n};\n\nuse bytes::Bytes;\nuse http_body::{Body as HttpBody, SizeHint};\nuse http_body_util::{BodyExt, Either, Full, combinators::BoxBody};\nuse pin_project_lite::pin_project;\n#[cfg(feature = \"stream\")]\nuse {tokio::fs::File, tokio_util::io::ReaderStream};\n\nuse crate::error::{BoxError, Error};\n\n/// An request body.\n#[derive(Debug)]\npub struct Body(Either<Full<Bytes>, BoxBody<Bytes, BoxError>>);\n\npin_project! {\n    /// We can't use `map_frame()` because that loses the hint data (for good reason).\n    /// But we aren't transforming the data.\n    struct IntoBytesBody<B> {\n        #[pin]\n        inner: B,\n    }\n}\n\n// ===== impl Body =====\n\nimpl Body {\n    /// Wrap a [`HttpBody`] in a box inside `Body`.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # use wreq::Body;\n    /// # use futures_util;\n    /// # fn main() {\n    /// let content = \"hello,world!\".to_string();\n    ///\n    /// let body = Body::wrap(content);\n    /// # }\n    /// ```\n    pub fn wrap<B>(inner: B) -> Body\n    where\n        B: HttpBody + Send + Sync + 'static,\n        B::Data: Into<Bytes>,\n        B::Error: Into<BoxError>,\n    {\n        Body(Either::Right(\n            IntoBytesBody { inner }.map_err(Into::into).boxed(),\n        ))\n    }\n\n    /// Wrap a futures `Stream` in a box inside `Body`.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # use wreq::Body;\n    /// # use futures_util;\n    /// # fn main() {\n    /// let chunks: Vec<Result<_, ::std::io::Error>> = vec![Ok(\"hello\"), Ok(\" \"), Ok(\"world\")];\n    ///\n    /// let stream = futures_util::stream::iter(chunks);\n    ///\n    /// let body = Body::wrap_stream(stream);\n    /// # }\n    /// ```\n    ///\n    /// # Optional\n    ///\n    /// This requires the `stream` feature to be enabled.\n    #[cfg(feature = \"stream\")]\n    #[cfg_attr(docsrs, doc(cfg(feature = \"stream\")))]\n    pub fn wrap_stream<S>(stream: S) -> Body\n    where\n        S: futures_util::stream::TryStream + Send + 'static,\n        S::Error: Into<BoxError>,\n        Bytes: From<S::Ok>,\n    {\n        Body::stream(stream)\n    }\n\n    #[cfg(any(feature = \"stream\", feature = \"multipart\"))]\n    pub(crate) fn stream<S>(stream: S) -> Body\n    where\n        S: futures_util::stream::TryStream + Send + 'static,\n        S::Error: Into<BoxError>,\n        Bytes: From<S::Ok>,\n    {\n        use futures_util::TryStreamExt;\n        use http_body::Frame;\n        use http_body_util::StreamBody;\n        use sync_wrapper::SyncStream;\n\n        let body = StreamBody::new(SyncStream::new(\n            stream\n                .map_ok(Bytes::from)\n                .map_ok(Frame::data)\n                .map_err(Into::into),\n        ));\n        Body(Either::Right(body.boxed()))\n    }\n\n    #[inline]\n    pub(crate) fn empty() -> Body {\n        Body::reusable(Bytes::new())\n    }\n\n    #[inline]\n    pub(crate) fn reusable(chunk: Bytes) -> Body {\n        Body(Either::Left(Full::new(chunk)))\n    }\n\n    #[inline]\n    #[cfg(feature = \"multipart\")]\n    pub(crate) fn content_length(&self) -> Option<u64> {\n        self.0.size_hint().exact()\n    }\n\n    #[inline]\n    pub(crate) fn try_clone(&self) -> Option<Body> {\n        match self.0 {\n            Either::Left(ref chunk) => Some(Body(Either::Left(chunk.clone()))),\n            Either::Right { .. } => None,\n        }\n    }\n}\n\nimpl Default for Body {\n    #[inline]\n    fn default() -> Body {\n        Body::empty()\n    }\n}\n\nimpl From<BoxBody<Bytes, BoxError>> for Body {\n    #[inline]\n    fn from(body: BoxBody<Bytes, BoxError>) -> Self {\n        Self(Either::Right(body))\n    }\n}\n\nimpl From<Bytes> for Body {\n    #[inline]\n    fn from(bytes: Bytes) -> Body {\n        Body::reusable(bytes)\n    }\n}\n\nimpl From<Vec<u8>> for Body {\n    #[inline]\n    fn from(vec: Vec<u8>) -> Body {\n        Body::reusable(vec.into())\n    }\n}\n\nimpl From<&'static [u8]> for Body {\n    #[inline]\n    fn from(s: &'static [u8]) -> Body {\n        Body::reusable(Bytes::from_static(s))\n    }\n}\n\nimpl From<String> for Body {\n    #[inline]\n    fn from(s: String) -> Body {\n        Body::reusable(s.into())\n    }\n}\n\nimpl From<&'static str> for Body {\n    #[inline]\n    fn from(s: &'static str) -> Body {\n        s.as_bytes().into()\n    }\n}\n\n#[cfg(feature = \"stream\")]\nimpl From<File> for Body {\n    #[inline]\n    fn from(file: File) -> Body {\n        Body::wrap_stream(ReaderStream::new(file))\n    }\n}\n\nimpl HttpBody for Body {\n    type Data = Bytes;\n    type Error = Error;\n\n    #[inline]\n    fn poll_frame(\n        mut self: Pin<&mut Self>,\n        cx: &mut Context,\n    ) -> Poll<Option<Result<http_body::Frame<Self::Data>, Self::Error>>> {\n        Pin::new(&mut self.0).poll_frame(cx).map_err(|err| {\n            err.downcast::<Error>()\n                .map_or_else(Error::request, |err| *err)\n        })\n    }\n\n    #[inline]\n    fn size_hint(&self) -> SizeHint {\n        self.0.size_hint()\n    }\n\n    #[inline]\n    fn is_end_stream(&self) -> bool {\n        self.0.is_end_stream()\n    }\n}\n\n// ===== impl IntoBytesBody =====\n\nimpl<B> HttpBody for IntoBytesBody<B>\nwhere\n    B: HttpBody,\n    B::Data: Into<Bytes>,\n{\n    type Data = Bytes;\n    type Error = B::Error;\n\n    fn poll_frame(\n        self: Pin<&mut Self>,\n        cx: &mut Context,\n    ) -> Poll<Option<Result<http_body::Frame<Self::Data>, Self::Error>>> {\n        match ready!(self.project().inner.poll_frame(cx)) {\n            Some(Ok(f)) => Poll::Ready(Some(Ok(f.map_data(Into::into)))),\n            Some(Err(e)) => Poll::Ready(Some(Err(e))),\n            None => Poll::Ready(None),\n        }\n    }\n\n    #[inline]\n    fn size_hint(&self) -> SizeHint {\n        self.inner.size_hint()\n    }\n\n    #[inline]\n    fn is_end_stream(&self) -> bool {\n        self.inner.is_end_stream()\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use http_body::Body as _;\n\n    use super::Body;\n\n    #[test]\n    fn body_exact_length() {\n        let empty_body = Body::empty();\n        assert!(empty_body.is_end_stream());\n        assert_eq!(empty_body.size_hint().exact(), Some(0));\n\n        let bytes_body = Body::reusable(\"abc\".into());\n        assert!(!bytes_body.is_end_stream());\n        assert_eq!(bytes_body.size_hint().exact(), Some(3));\n\n        // can delegate even when wrapped\n        let stream_body = Body::wrap(empty_body);\n        assert!(stream_body.is_end_stream());\n        assert_eq!(stream_body.size_hint().exact(), Some(0));\n    }\n}\n"
  },
  {
    "path": "src/client/conn/connector.rs",
    "content": "use std::{\n    borrow::Cow,\n    future::Future,\n    pin::Pin,\n    sync::Arc,\n    task::{Context, Poll},\n    time::Duration,\n};\n\nuse tokio_btls::SslStream;\nuse tower::{\n    BoxError, Service, ServiceBuilder, ServiceExt,\n    timeout::TimeoutLayer,\n    util::{BoxCloneSyncService, MapRequestLayer},\n};\n\n#[cfg(unix)]\nuse super::uds::UnixConnector;\nuse super::{\n    AsyncConnWithInfo, BoxedConnectorLayer, BoxedConnectorService, Conn, Connection, HttpConnector,\n    TlsConn, TlsInfoFactory, Unnameable, http::HttpTransport, proxy, verbose::Verbose,\n};\nuse crate::{\n    client::conn::{TokioTcpConnector, descriptor::ConnectionDescriptor},\n    dns::DynResolver,\n    error::{ProxyConnect, TimedOut, map_timeout_to_connector_error},\n    ext::UriExt,\n    proxy::{Intercepted, Matcher as ProxyMatcher, matcher::Intercept},\n    tls::{\n        TlsOptions,\n        conn::{\n            EstablishedConn, HttpsConnector, MaybeHttpsStream, TlsConnector, TlsConnectorBuilder,\n        },\n    },\n};\n\ntype Connecting = Pin<Box<dyn Future<Output = Result<Conn, BoxError>> + Send>>;\n\n/// Configuration for the connector service.\n#[derive(Clone)]\nstruct Config {\n    proxies: Arc<Vec<ProxyMatcher>>,\n    verbose: Verbose,\n    nodelay: bool,\n    tls_info: bool,\n    /// When there is a single timeout layer and no other layers,\n    /// we embed it directly inside our base Service::call().\n    /// This lets us avoid an extra `Box::pin` indirection layer\n    /// since `tokio::time::Timeout` is `Unpin`\n    timeout: Option<Duration>,\n}\n\n/// Builder for `Connector`.\npub struct ConnectorBuilder {\n    config: Config,\n    #[cfg(feature = \"socks\")]\n    resolver: DynResolver,\n    http: HttpConnector,\n    builder: TlsConnectorBuilder,\n}\n\n/// Connector service that establishes connections.\n#[derive(Clone)]\npub enum Connector {\n    Simple(ConnectorService),\n    WithLayers(BoxedConnectorService),\n}\n\n/// Service that establishes connections to HTTP servers.\n#[derive(Clone)]\npub struct ConnectorService {\n    config: Config,\n    #[cfg(feature = \"socks\")]\n    resolver: DynResolver,\n    tls: TlsConnector,\n    http: HttpConnector,\n    builder: Arc<TlsConnectorBuilder>,\n}\n\n// ===== impl ConnectorBuilder =====\n\nimpl ConnectorBuilder {\n    /// Set the HTTP connector to use.\n    #[inline]\n    pub fn with_http<F>(mut self, call: F) -> ConnectorBuilder\n    where\n        F: FnOnce(&mut HttpConnector),\n    {\n        call(&mut self.http);\n        self\n    }\n\n    /// Set the TLS connector builder to use.\n    #[inline]\n    pub fn with_tls<F>(mut self, call: F) -> ConnectorBuilder\n    where\n        F: FnOnce(TlsConnectorBuilder) -> TlsConnectorBuilder,\n    {\n        self.builder = call(self.builder);\n        self\n    }\n\n    /// Set the connect timeout.\n    ///\n    /// If a domain resolves to multiple IP addresses, the timeout will be\n    /// evenly divided across them.\n    #[inline]\n    pub fn timeout(mut self, timeout: Option<Duration>) -> ConnectorBuilder {\n        self.config.timeout = timeout;\n        self\n    }\n\n    /// Set connecting verbose mode.\n    #[inline]\n    pub fn verbose(mut self, enabled: bool) -> ConnectorBuilder {\n        self.config.verbose.0 = enabled;\n        self\n    }\n\n    /// Sets the TLS info flag.\n    #[inline]\n    pub fn tls_info(mut self, enabled: bool) -> ConnectorBuilder {\n        self.config.tls_info = enabled;\n        self\n    }\n\n    /// Sets the TCP_NODELAY option for connections.\n    #[inline]\n    pub fn tcp_nodelay(mut self, enabled: bool) -> ConnectorBuilder {\n        self.config.nodelay = enabled;\n        self\n    }\n\n    /// Build a [`Connector`] with the provided layers.\n    pub fn build(\n        self,\n        tls_options: Option<TlsOptions>,\n        layers: Vec<BoxedConnectorLayer>,\n    ) -> crate::Result<Connector> {\n        let mut service = ConnectorService {\n            config: self.config,\n            #[cfg(feature = \"socks\")]\n            resolver: self.resolver.clone(),\n            http: self.http,\n            tls: self\n                .builder\n                .build(tls_options.map(Cow::Owned).unwrap_or_default())?,\n            builder: Arc::new(self.builder),\n        };\n\n        // we have no user-provided layers, only use concrete types\n        if layers.is_empty() {\n            return Ok(Connector::Simple(service));\n        }\n\n        // user-provided layers exist, the timeout will be applied as an additional layer.\n        let timeout = service.config.timeout.take();\n\n        // otherwise we have user provided layers\n        // so we need type erasure all the way through\n        // as well as mapping the unnameable type of the layers back to ConnectionDescriptor for the\n        // inner service\n        let service = layers.into_iter().fold(\n            BoxCloneSyncService::new(\n                ServiceBuilder::new()\n                    .layer(MapRequestLayer::new(|request: Unnameable| request.0))\n                    .service(service),\n            ),\n            |service, layer| ServiceBuilder::new().layer(layer).service(service),\n        );\n\n        // now we handle the concrete stuff - any `connect_timeout`,\n        // plus a final map_err layer we can use to cast default tower layer\n        // errors to internal errors\n        match timeout {\n            Some(timeout) => {\n                let service = ServiceBuilder::new()\n                    .layer(TimeoutLayer::new(timeout))\n                    .service(service)\n                    .map_err(map_timeout_to_connector_error);\n\n                Ok(Connector::WithLayers(BoxCloneSyncService::new(service)))\n            }\n            None => {\n                // no timeout, but still map err\n                // no named timeout layer but we still map errors since\n                // we might have user-provided timeout layer\n                let service = ServiceBuilder::new()\n                    .service(service)\n                    .map_err(map_timeout_to_connector_error);\n\n                Ok(Connector::WithLayers(BoxCloneSyncService::new(service)))\n            }\n        }\n    }\n}\n\n// ===== impl Connector =====\n\nimpl Connector {\n    /// Creates a new [`Connector`] with the provided configuration and optional layers.\n    pub(crate) fn builder(proxies: Vec<ProxyMatcher>, resolver: DynResolver) -> ConnectorBuilder {\n        ConnectorBuilder {\n            config: Config {\n                proxies: Arc::new(proxies),\n                verbose: Verbose::OFF,\n                nodelay: true,\n                tls_info: false,\n                timeout: None,\n            },\n            #[cfg(feature = \"socks\")]\n            resolver: resolver.clone(),\n            http: HttpConnector::new(resolver, TokioTcpConnector::new()),\n            builder: TlsConnector::builder(),\n        }\n    }\n}\n\nimpl Service<ConnectionDescriptor> for Connector {\n    type Response = Conn;\n    type Error = BoxError;\n    type Future = Connecting;\n\n    #[inline]\n    fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {\n        match self {\n            Connector::Simple(service) => service.poll_ready(cx),\n            Connector::WithLayers(service) => service.poll_ready(cx),\n        }\n    }\n\n    #[inline]\n    fn call(&mut self, descriptor: ConnectionDescriptor) -> Self::Future {\n        match self {\n            Connector::Simple(service) => service.call(descriptor),\n            Connector::WithLayers(service) => service.call(Unnameable(descriptor)),\n        }\n    }\n}\n\n// ===== impl ConnectorService =====\n\nimpl ConnectorService {\n    fn build_https_connector(\n        &self,\n        https: bool,\n        descriptor: &ConnectionDescriptor,\n    ) -> Result<HttpsConnector<HttpConnector>, BoxError> {\n        let mut http = self.http.clone();\n\n        // Disable Nagle's algorithm for TLS handshake\n        //\n        // https://www.openssl.org/docs/man1.1.1/man3/SSL_connect.html#NOTES\n        if https && !self.config.nodelay {\n            http.set_nodelay(true);\n        }\n\n        // Apply TCP options if provided in metadata\n        if let Some(socket_opts) = descriptor.socket_bind_options() {\n            http.set_local_addresses(socket_opts.ipv4_address, socket_opts.ipv6_address);\n            #[cfg(any(\n                target_os = \"android\",\n                target_os = \"fuchsia\",\n                target_os = \"illumos\",\n                target_os = \"ios\",\n                target_os = \"linux\",\n                target_os = \"macos\",\n                target_os = \"solaris\",\n                target_os = \"tvos\",\n                target_os = \"visionos\",\n                target_os = \"watchos\",\n            ))]\n            if let Some(interface) = &socket_opts.interface {\n                http.set_interface(interface.clone());\n            }\n        }\n\n        // Prefer TLS options from metadata, fallback to default\n        let tls = descriptor\n            .tls_options()\n            .map(|opts| self.builder.build(Cow::Borrowed(opts)))\n            .transpose()?\n            .unwrap_or_else(|| self.tls.clone());\n\n        Ok(HttpsConnector::new(http, tls))\n    }\n\n    fn tunnel_conn_from_stream<IO>(&self, io: MaybeHttpsStream<IO>) -> Result<Conn, BoxError>\n    where\n        IO: AsyncConnWithInfo,\n        TlsConn<IO>: Connection,\n        SslStream<IO>: TlsInfoFactory,\n    {\n        let conn = match io {\n            MaybeHttpsStream::Http(stream) => Conn {\n                stream: self.config.verbose.wrap(stream),\n                tls_info: false,\n                proxy: None,\n            },\n            MaybeHttpsStream::Https(stream) => Conn {\n                stream: self.config.verbose.wrap(TlsConn { stream }),\n                tls_info: self.config.tls_info,\n                proxy: None,\n            },\n        };\n\n        Ok(conn)\n    }\n\n    fn conn_from_stream<IO, P>(&self, io: MaybeHttpsStream<IO>, proxy: P) -> Result<Conn, BoxError>\n    where\n        IO: AsyncConnWithInfo,\n        TlsConn<IO>: Connection,\n        SslStream<IO>: TlsInfoFactory,\n        P: Into<Option<Intercept>>,\n    {\n        let conn = match io {\n            MaybeHttpsStream::Http(stream) => self.config.verbose.wrap(stream),\n            MaybeHttpsStream::Https(stream) => self.config.verbose.wrap(TlsConn { stream }),\n        };\n\n        Ok(Conn {\n            stream: conn,\n            tls_info: self.config.tls_info,\n            proxy: proxy.into(),\n        })\n    }\n\n    async fn connect_auto_proxy<P: Into<Option<Intercept>>>(\n        self,\n        descriptor: ConnectionDescriptor,\n        proxy: P,\n    ) -> Result<Conn, BoxError> {\n        let is_https = descriptor.uri().is_https();\n        let proxy = proxy.into();\n\n        trace!(\"connect with maybe proxy: {:?}\", proxy);\n\n        let mut connector = self.build_https_connector(is_https, &descriptor)?;\n\n        // When using a proxy for HTTPS targets, disable ALPN to avoid protocol negotiation issues\n        if proxy.is_some() && is_https {\n            connector.no_alpn();\n        }\n\n        let io = connector.call(descriptor).await?;\n\n        // Re-enable Nagle's algorithm if it was disabled earlier\n        if is_https && !self.config.nodelay {\n            io.as_ref().set_nodelay(false)?;\n        }\n\n        self.conn_from_stream(io, proxy)\n    }\n\n    async fn connect_via_proxy(\n        self,\n        mut descriptor: ConnectionDescriptor,\n        proxy: Intercepted,\n    ) -> Result<Conn, BoxError> {\n        let uri = descriptor.uri().clone();\n\n        match proxy {\n            Intercepted::Proxy(proxy) => {\n                let is_https = uri.is_https();\n                let proxy_uri = proxy.uri().clone();\n\n                #[cfg(feature = \"socks\")]\n                {\n                    use proxy::socks::{DnsResolve, SocksConnector, Version};\n\n                    if let Some((version, dns_resolve)) = match proxy_uri.scheme_str() {\n                        Some(\"socks4\") => Some((Version::V4, DnsResolve::Local)),\n                        Some(\"socks4a\") => Some((Version::V4, DnsResolve::Remote)),\n                        Some(\"socks5\") => Some((Version::V5, DnsResolve::Local)),\n                        Some(\"socks5h\") => Some((Version::V5, DnsResolve::Remote)),\n                        _ => None,\n                    } {\n                        trace!(\"connecting via SOCKS proxy: {:?}\", proxy_uri);\n\n                        // Connect to the proxy and establish the SOCKS connection.\n                        let conn = {\n                            // Build a SOCKS connector.\n                            let mut socks = SocksConnector::new(\n                                proxy_uri,\n                                self.http.clone(),\n                                self.resolver.clone(),\n                            );\n                            socks.set_auth(proxy.raw_auth());\n                            socks.set_version(version);\n                            socks.set_dns_mode(dns_resolve);\n                            socks.call(uri).await?\n                        };\n\n                        // Build an HTTPS connector.\n                        let mut connector = self.build_https_connector(is_https, &descriptor)?;\n\n                        // Wrap the established SOCKS connection with TLS if needed.\n                        let io = connector\n                            .call(EstablishedConn::new(conn, descriptor))\n                            .await?;\n\n                        // Re-enable Nagle's algorithm if it was disabled earlier\n                        if is_https && !self.config.nodelay {\n                            io.as_ref().set_nodelay(false)?;\n                        }\n\n                        return self.tunnel_conn_from_stream(io);\n                    }\n                }\n\n                if is_https {\n                    trace!(\"tunneling over HTTP(s) proxy: {:?}\", proxy_uri);\n\n                    // Build an HTTPS connector.\n                    let mut connector = self.build_https_connector(is_https, &descriptor)?;\n\n                    // Build a tunnel connector to establish the CONNECT tunnel.\n                    let tunneled = {\n                        let mut tunnel =\n                            proxy::tunnel::TunnelConnector::new(proxy_uri, connector.clone());\n\n                        // If the proxy requires basic authentication, add it to the tunnel.\n                        if let Some(auth) = proxy.basic_auth() {\n                            tunnel = tunnel.with_auth(auth.clone());\n                        }\n\n                        // If the proxy has custom headers, add them to the tunnel.\n                        if let Some(headers) = proxy.custom_headers() {\n                            tunnel = tunnel.with_headers(headers.clone());\n                        }\n\n                        // Connect to the proxy and establish the tunnel.\n                        tunnel.call(uri).await?\n                    };\n\n                    // Wrap the established tunneled stream with TLS.\n                    let io = connector\n                        .call(EstablishedConn::new(tunneled, descriptor))\n                        .await?;\n\n                    // Re-enable Nagle's algorithm if it was disabled earlier\n                    if !self.config.nodelay {\n                        io.as_ref().as_ref().set_nodelay(false)?;\n                    }\n\n                    return self.tunnel_conn_from_stream(io);\n                }\n\n                *descriptor.uri_mut() = proxy_uri;\n                self.connect_auto_proxy(descriptor, proxy)\n                    .await\n                    .map_err(ProxyConnect)\n                    .map_err(Into::into)\n            }\n            #[cfg(unix)]\n            Intercepted::Unix(unix_socket) => {\n                trace!(\"connecting via Unix socket: {:?}\", unix_socket);\n\n                // Create a Unix connector with the specified socket path.\n                let mut connector =\n                    HttpsConnector::new(UnixConnector::new(unix_socket), self.tls.clone());\n\n                // If the target URI is HTTPS, establish a CONNECT tunnel over the Unix socket,\n                // then upgrade the tunneled stream to TLS.\n                if uri.is_https() {\n                    // Use a dummy HTTP URI so the HTTPS connector works over the Unix socket.\n                    let proxy_uri = http::Uri::from_static(\"http://localhost\");\n\n                    // The tunnel connector will first establish a CONNECT tunnel,\n                    // then perform the TLS handshake over the tunneled stream.\n                    let tunneled = {\n                        // Create a tunnel connector using the Unix socket and the HTTPS connector.\n                        let mut tunnel =\n                            proxy::tunnel::TunnelConnector::new(proxy_uri, connector.clone());\n\n                        tunnel.call(uri).await?\n                    };\n\n                    // Wrap the established tunneled stream with TLS.\n                    let io = connector\n                        .call(EstablishedConn::new(tunneled, descriptor))\n                        .await?;\n\n                    return self.tunnel_conn_from_stream(io);\n                }\n\n                // For plain HTTP, use the Unix connector directly.\n                let io = connector.call(descriptor).await?;\n\n                self.conn_from_stream(io, None)\n            }\n        }\n    }\n\n    async fn connect_auto(self, req: ConnectionDescriptor) -> Result<Conn, BoxError> {\n        debug!(\"starting new connection: {:?}\", req.uri());\n\n        let timeout = self.config.timeout;\n\n        // Determine if a proxy should be used for this request.\n        let fut = async {\n            let intercepted = req\n                .proxy()\n                .and_then(|prox| prox.intercept(req.uri()))\n                .or_else(|| {\n                    self.config\n                        .proxies\n                        .iter()\n                        .find_map(|prox| prox.intercept(req.uri()))\n                });\n\n            // If a proxy is matched, connect via proxy; otherwise, connect directly.\n            if let Some(intercepted) = intercepted {\n                self.connect_via_proxy(req, intercepted).await\n            } else {\n                self.connect_auto_proxy(req, None).await\n            }\n        };\n\n        // Apply timeout if configured.\n        if let Some(to) = timeout {\n            tokio::time::timeout(to, fut).await.map_err(|_| TimedOut)?\n        } else {\n            fut.await\n        }\n    }\n}\n\nimpl Service<ConnectionDescriptor> for ConnectorService {\n    type Response = Conn;\n    type Error = BoxError;\n    type Future = Connecting;\n\n    #[inline]\n    fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {\n        Poll::Ready(Ok(()))\n    }\n\n    #[inline]\n    fn call(&mut self, descriptor: ConnectionDescriptor) -> Self::Future {\n        Box::pin(self.clone().connect_auto(descriptor))\n    }\n}\n"
  },
  {
    "path": "src/client/conn/descriptor.rs",
    "content": "use std::{\n    hash::{BuildHasher, Hash, Hasher},\n    num::NonZeroU64,\n    sync::{\n        Arc, LazyLock,\n        atomic::{AtomicU64, Ordering},\n    },\n};\n\nuse http::{Uri, Version};\nuse lru::DefaultHasher;\n\nuse crate::{\n    client::{conn::SocketBindOptions, group::Group},\n    proxy::Matcher as ProxyMacher,\n    tls::TlsOptions,\n};\n\n/// A key that uniquely identifies a group of interchangeable connections for pooling.\n///\n/// This ID is derived from all parameters that define a connection endpoint,\n/// such as URI, proxy, and local socket bindings. Connections with the same\n/// ID are considered equivalent and can be reused.\n#[derive(Debug, Clone)]\npub(crate) struct ConnectionId(Arc<(Group, AtomicU64)>);\n\n/// A blueprint for creating a new client connection, containing all necessary parameters.\n///\n/// This descriptor bundles the target `Uri`, HTTP version, `TlsOptions`, proxy settings,\n/// and other configurations needed to establish a connection.\n#[must_use]\n#[derive(Clone)]\npub(crate) struct ConnectionDescriptor {\n    uri: Uri,\n    version: Option<Version>,\n    proxy: Option<ProxyMacher>,\n    tls_options: Option<TlsOptions>,\n    socket_bind: Option<SocketBindOptions>,\n    connection_id: ConnectionId,\n}\n\n// ===== impl ConnectionId =====\n\nimpl Hash for ConnectionId {\n    fn hash<H: Hasher>(&self, state: &mut H) {\n        let hash = self.0.1.load(Ordering::Relaxed);\n        if hash != 0 {\n            state.write_u64(hash);\n            return;\n        }\n\n        static HASHER: LazyLock<DefaultHasher> = LazyLock::new(DefaultHasher::default);\n        let computed_hash = NonZeroU64::new(HASHER.hash_one(&self.0.0))\n            .map(NonZeroU64::get)\n            .unwrap_or(1);\n\n        let _ = self.0.1.compare_exchange(\n            u64::MIN,\n            computed_hash,\n            Ordering::Relaxed,\n            Ordering::Relaxed,\n        );\n        state.write_u64(computed_hash);\n    }\n}\n\nimpl PartialEq for ConnectionId {\n    #[inline]\n    fn eq(&self, other: &Self) -> bool {\n        self.0.0.eq(&other.0.0)\n    }\n}\n\nimpl Eq for ConnectionId {}\n\n// ===== impl ConnectionDescriptor =====\n\nimpl ConnectionDescriptor {\n    /// Create a new [`ConnectionDescriptor`].\n    pub(crate) fn new(\n        uri: Uri,\n        mut group: Group,\n        proxy: Option<ProxyMacher>,\n        version: Option<Version>,\n        tls_options: Option<TlsOptions>,\n        socket_bind: Option<SocketBindOptions>,\n    ) -> ConnectionDescriptor {\n        let connection_id = {\n            group\n                .uri(uri.clone())\n                .version(version)\n                .proxy(proxy.clone())\n                .socket_bind(socket_bind.clone());\n            ConnectionId(Arc::new((group, AtomicU64::new(u64::MIN))))\n        };\n\n        ConnectionDescriptor {\n            uri,\n            proxy,\n            version,\n            tls_options,\n            socket_bind,\n            connection_id,\n        }\n    }\n\n    /// Returns a [`ConnectionId`] group ID for this descriptor.\n    #[inline]\n    pub(crate) fn id(&self) -> ConnectionId {\n        self.connection_id.clone()\n    }\n\n    /// Returns a reference to the [`Uri`].\n    #[inline]\n    pub(crate) fn uri(&self) -> &Uri {\n        &self.uri\n    }\n\n    /// Returns a mutable reference to the [`Uri`].\n    #[inline]\n    pub(crate) fn uri_mut(&mut self) -> &mut Uri {\n        &mut self.uri\n    }\n\n    /// Return the negotiated HTTP version, if any.\n    pub(crate) fn version(&self) -> Option<Version> {\n        self.version\n    }\n\n    /// Return a reference to the [`TlsOptions`].\n    #[inline]\n    pub(crate) fn tls_options(&self) -> Option<&TlsOptions> {\n        self.tls_options.as_ref()\n    }\n\n    /// Return a reference to the [`ProxyMacher`].\n    #[inline]\n    pub(crate) fn proxy(&self) -> Option<&ProxyMacher> {\n        self.proxy.as_ref()\n    }\n\n    /// Return a reference to the [`SocketBindOptions`].\n    #[inline]\n    pub(crate) fn socket_bind_options(&self) -> Option<&SocketBindOptions> {\n        self.socket_bind.as_ref()\n    }\n}\n"
  },
  {
    "path": "src/client/conn/http.rs",
    "content": "use std::{\n    future::Future,\n    marker::PhantomData,\n    net::{Ipv4Addr, Ipv6Addr, SocketAddr},\n    pin::Pin,\n    sync::Arc,\n    task::{self, Poll},\n    time::Duration,\n};\n\nuse http::uri::{Scheme, Uri};\nuse pin_project_lite::pin_project;\nuse tokio::io::{AsyncRead, AsyncWrite};\nuse tower::{BoxError, Service};\n\nuse super::{\n    Connection,\n    tcp::{\n        ConnectError, ConnectingTcp, SocketBindOptions, TcpConnector, TcpKeepaliveOptions,\n        TcpOptions,\n    },\n};\nuse crate::dns::{self, InternalResolve};\n\nstatic INVALID_NOT_HTTP: &str = \"invalid URI, scheme is not http\";\nstatic INVALID_MISSING_SCHEME: &str = \"invalid URI, scheme is missing\";\nstatic INVALID_MISSING_HOST: &str = \"invalid URI, host is missing\";\n\ntype ConnectResult<S> = Result<<S as TcpConnector>::Connection, ConnectError>;\ntype BoxConnecting<S> = Pin<Box<dyn Future<Output = ConnectResult<S>> + Send>>;\n\n/// A trait for configuring HTTP transport options on a [`Service<Uri>`] connector.\n///\n/// Provides methods to adjust TCP/socket-level settings such as keepalive,\n/// timeouts, buffer sizes, and local address binding. [`HttpConnector`]\n/// is the default implementation.\npub trait HttpTransport: Service<Uri> + Clone + Send + Sized + 'static\nwhere\n    Self::Response: AsyncRead + AsyncWrite + Connection + Unpin + Send + 'static,\n    Self::Error: Into<BoxError>,\n    Self::Future: Unpin + Send + 'static,\n{\n    /// Set that all sockets have `SO_KEEPALIVE` set with the supplied duration\n    /// to remain idle before sending TCP keepalive probes.\n    fn enforce_http(&mut self, enforced: bool);\n\n    /// Set that all sockets have `SO_NODELAY` set to the supplied value `nodelay`.\n    fn set_nodelay(&mut self, nodelay: bool);\n\n    /// Sets the value of the `SO_SNDBUF` option on the socket.\n    fn set_send_buffer_size(&mut self, size: Option<usize>);\n\n    /// Sets the value of the `SO_RCVBUF` option on the socket.\n    fn set_recv_buffer_size(&mut self, size: Option<usize>);\n\n    /// Set that all socket have `SO_REUSEADDR` set to the supplied value `reuse_address`.\n    fn set_reuse_address(&mut self, reuse: bool);\n\n    /// Sets the value of the `TCP_USER_TIMEOUT` option on the socket.\n    #[cfg(any(target_os = \"android\", target_os = \"fuchsia\", target_os = \"linux\"))]\n    fn set_tcp_user_timeout(&mut self, time: Option<Duration>);\n\n    /// Set the connect timeout.\n    fn set_connect_timeout(&mut self, dur: Option<Duration>);\n\n    /// Set timeout for [RFC 6555 (Happy Eyeballs)][RFC 6555] algorithm.\n    ///\n    /// [RFC 6555]: https://tools.ietf.org/html/rfc6555\n    fn set_happy_eyeballs_timeout(&mut self, dur: Option<Duration>);\n\n    /// Set that all sockets have `SO_KEEPALIVE` set with the supplied duration\n    /// to remain idle before sending TCP keepalive probes.\n    fn set_keepalive(&mut self, time: Option<Duration>);\n\n    /// Set the duration between two successive TCP keepalive retransmissions,\n    /// if acknowledgement to the previous keepalive transmission is not received.\n    fn set_keepalive_interval(&mut self, interval: Option<Duration>);\n\n    /// Set the number of retransmissions to be carried out before declaring that remote end is not\n    /// available.\n    fn set_keepalive_retries(&mut self, retries: Option<u32>);\n\n    /// Sets the name of the interface to bind sockets produced.\n    #[cfg(any(\n        target_os = \"android\",\n        target_os = \"fuchsia\",\n        target_os = \"illumos\",\n        target_os = \"ios\",\n        target_os = \"linux\",\n        target_os = \"macos\",\n        target_os = \"solaris\",\n        target_os = \"tvos\",\n        target_os = \"visionos\",\n        target_os = \"watchos\",\n    ))]\n    fn set_interface<I: Into<std::borrow::Cow<'static, str>>>(&mut self, interface: I);\n\n    /// Set that all sockets are bound to the configured IPv4 or IPv6 address (depending on host's\n    /// preferences) before connection.\n    fn set_local_addresses<V4, V6>(&mut self, ipv4_address: V4, ipv6_address: V6)\n    where\n        V4: Into<Option<Ipv4Addr>>,\n        V6: Into<Option<Ipv6Addr>>;\n}\n\n/// A connector for the `http` scheme.\n///\n/// Performs DNS resolution in a thread pool, and then connects over TCP.\n///\n/// # Note\n///\n/// Sets the [`HttpInfo`] value on responses, which includes\n/// transport information such as the remote socket address used.\n#[derive(Clone)]\npub struct HttpConnector<R, S> {\n    options: Arc<TcpOptions>,\n    resolver: R,\n    connector: S,\n}\n\n/// Extra information about the transport when an HttpConnector is used.\n///\n/// # Example\n///\n/// ```\n/// # fn doc(res: http::Response<()>) {\n/// use crate::util::client::connect::HttpInfo;\n///\n/// // res = http::Response\n/// res.extensions().get::<HttpInfo>().map(|info| {\n///     println!(\"remote addr = {}\", info.remote_addr());\n/// });\n/// # }\n/// ```\n///\n/// # Note\n///\n/// If a different connector is used besides [`HttpConnector`],\n/// this value will not exist in the extensions. Consult that specific\n/// connector to see what \"extra\" information it might provide to responses.\n#[derive(Clone, Debug)]\npub struct HttpInfo {\n    pub(crate) remote_addr: SocketAddr,\n    pub(crate) local_addr: SocketAddr,\n}\n\n// ===== impl HttpConnector =====\n\nimpl<R, S> HttpConnector<R, S> {\n    /// Construct a new [`HttpConnector`].\n    pub fn new(resolver: R, connector: S) -> HttpConnector<R, S> {\n        HttpConnector {\n            options: Arc::new(TcpOptions {\n                enforce_http: true,\n                connect_timeout: None,\n                happy_eyeballs_timeout: Some(Duration::from_millis(300)),\n                nodelay: false,\n                reuse_address: false,\n                send_buffer_size: None,\n                recv_buffer_size: None,\n                #[cfg(any(target_os = \"android\", target_os = \"fuchsia\", target_os = \"linux\"))]\n                tcp_user_timeout: None,\n                tcp_keepalive: TcpKeepaliveOptions::default(),\n                socket_bind: SocketBindOptions::default(),\n            }),\n            resolver,\n            connector,\n        }\n    }\n\n    fn config_mut(&mut self) -> &mut TcpOptions {\n        // If the are HttpConnector clones, this will clone the inner\n        // config. So mutating the config won't ever affect previous\n        // clones.\n        Arc::make_mut(&mut self.options)\n    }\n}\n\nimpl<R, S> HttpTransport for HttpConnector<R, S>\nwhere\n    R: InternalResolve + Clone + Send + Sync + 'static,\n    R::Future: Send,\n    S: TcpConnector,\n{\n    /// Option to enforce all `Uri`s have the `http` scheme.\n    ///\n    /// Enabled by default.\n    #[inline]\n    fn enforce_http(&mut self, is_enforced: bool) {\n        self.config_mut().enforce_http = is_enforced;\n    }\n\n    /// Set that all sockets have `SO_NODELAY` set to the supplied value `nodelay`.\n    ///\n    /// Default is `false`.\n    #[inline]\n    fn set_nodelay(&mut self, nodelay: bool) {\n        self.config_mut().nodelay = nodelay;\n    }\n\n    /// Sets the value of the SO_SNDBUF option on the socket.\n    #[inline]\n    fn set_send_buffer_size(&mut self, size: Option<usize>) {\n        self.config_mut().send_buffer_size = size;\n    }\n\n    /// Sets the value of the SO_RCVBUF option on the socket.\n    #[inline]\n    fn set_recv_buffer_size(&mut self, size: Option<usize>) {\n        self.config_mut().recv_buffer_size = size;\n    }\n\n    /// Set that all socket have `SO_REUSEADDR` set to the supplied value `reuse_address`.\n    ///\n    /// Default is `false`.\n    #[inline]\n    fn set_reuse_address(&mut self, reuse_address: bool) {\n        self.config_mut().reuse_address = reuse_address;\n    }\n\n    /// Sets the value of the TCP_USER_TIMEOUT option on the socket.\n    #[inline]\n    #[cfg(any(target_os = \"android\", target_os = \"fuchsia\", target_os = \"linux\"))]\n    fn set_tcp_user_timeout(&mut self, time: Option<Duration>) {\n        self.config_mut().tcp_user_timeout = time;\n    }\n\n    /// Set the connect timeout.\n    ///\n    /// If a domain resolves to multiple IP addresses, the timeout will be\n    /// evenly divided across them.\n    ///\n    /// Default is `None`.\n    #[inline]\n    fn set_connect_timeout(&mut self, dur: Option<Duration>) {\n        self.config_mut().connect_timeout = dur;\n    }\n\n    /// Set timeout for [RFC 6555 (Happy Eyeballs)][RFC 6555] algorithm.\n    ///\n    /// If hostname resolves to both IPv4 and IPv6 addresses and connection\n    /// cannot be established using preferred address family before timeout\n    /// elapses, then connector will in parallel attempt connection using other\n    /// address family.\n    ///\n    /// If `None`, parallel connection attempts are disabled.\n    ///\n    /// Default is 300 milliseconds.\n    ///\n    /// [RFC 6555]: https://tools.ietf.org/html/rfc6555\n    #[inline]\n    fn set_happy_eyeballs_timeout(&mut self, dur: Option<Duration>) {\n        self.config_mut().happy_eyeballs_timeout = dur;\n    }\n\n    /// Set that all sockets have `SO_KEEPALIVE` set with the supplied duration\n    /// to remain idle before sending TCP keepalive probes.\n    ///\n    /// If `None`, keepalive is disabled.\n    ///\n    /// Default is `None`.\n    #[inline]\n    fn set_keepalive(&mut self, time: Option<Duration>) {\n        self.config_mut().tcp_keepalive.time = time;\n    }\n\n    /// Set the duration between two successive TCP keepalive retransmissions,\n    /// if acknowledgement to the previous keepalive transmission is not received.\n    #[inline]\n    fn set_keepalive_interval(&mut self, interval: Option<Duration>) {\n        self.config_mut().tcp_keepalive.interval = interval;\n    }\n\n    /// Set the number of retransmissions to be carried out before declaring that remote end is not\n    /// available.\n    #[inline]\n    fn set_keepalive_retries(&mut self, retries: Option<u32>) {\n        self.config_mut().tcp_keepalive.retries = retries;\n    }\n\n    /// Sets the name of the interface to bind sockets produced by this\n    /// connector.\n    ///\n    /// On Linux, this sets the `SO_BINDTODEVICE` option on this socket (see\n    /// [`man 7 socket`] for details). On macOS (and macOS-derived systems like\n    /// iOS), illumos, and Solaris, this will instead use the `IP_BOUND_IF`\n    /// socket option (see [`man 7p ip`]).\n    ///\n    /// If a socket is bound to an interface, only packets received from that particular\n    /// interface are processed by the socket. Note that this only works for some socket\n    /// types, particularly `AF_INET`` sockets.\n    ///\n    /// On Linux it can be used to specify a [VRF], but the binary needs\n    /// to either have `CAP_NET_RAW` or to be run as root.\n    ///\n    /// This function is only available on the following operating systems:\n    /// - Linux, including Android\n    /// - Fuchsia\n    /// - illumos and Solaris\n    /// - macOS, iOS, visionOS, watchOS, and tvOS\n    ///\n    /// [VRF]: https://www.kernel.org/doc/Documentation/networking/vrf.txt\n    /// [`man 7 socket`]: https://man7.org/linux/man-pages/man7/socket.7.html\n    /// [`man 7p ip`]: https://docs.oracle.com/cd/E86824_01/html/E54777/ip-7p.html\n    #[cfg(any(\n        target_os = \"android\",\n        target_os = \"fuchsia\",\n        target_os = \"illumos\",\n        target_os = \"ios\",\n        target_os = \"linux\",\n        target_os = \"macos\",\n        target_os = \"solaris\",\n        target_os = \"tvos\",\n        target_os = \"visionos\",\n        target_os = \"watchos\",\n    ))]\n    fn set_interface<I: Into<std::borrow::Cow<'static, str>>>(&mut self, interface: I) {\n        self.config_mut().socket_bind.set_interface(interface);\n    }\n\n    /// Set that all sockets are bound to the configured IPv4 or IPv6 address (depending on host's\n    /// preferences) before connection.\n    ///\n    /// If `None`, the sockets will not be bound.\n    ///\n    /// Default is `None`.\n    fn set_local_addresses<V4, V6>(&mut self, ipv4_address: V4, ipv6_address: V6)\n    where\n        V4: Into<Option<Ipv4Addr>>,\n        V6: Into<Option<Ipv6Addr>>,\n    {\n        self.config_mut()\n            .socket_bind\n            .set_local_addresses(ipv4_address, ipv6_address);\n    }\n}\n\nimpl<R, S> Service<Uri> for HttpConnector<R, S>\nwhere\n    R: InternalResolve + Clone + Send + Sync + 'static,\n    R::Future: Send,\n    S: TcpConnector,\n    S::TcpStream: From<socket2::Socket>,\n{\n    type Response = S::Connection;\n    type Error = ConnectError;\n    type Future = HttpConnecting<R, S>;\n\n    #[inline]\n    fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {\n        self.resolver.poll_ready(cx).map_err(ConnectError::dns)\n    }\n\n    fn call(&mut self, dst: Uri) -> Self::Future {\n        let mut this = self.clone();\n\n        let fut = async move {\n            let options = &this.options;\n\n            let (host, port) = get_host_port(options, &dst)?;\n            let host = host.trim_start_matches('[').trim_end_matches(']');\n\n            let addrs = if let Some(addrs) = dns::SocketAddrs::try_parse(host, port) {\n                addrs\n            } else {\n                let addrs = dns::resolve(&mut this.resolver, dns::Name::new(host.into()))\n                    .await\n                    .map_err(ConnectError::dns)?;\n                let addrs = addrs\n                    .map(|mut addr| {\n                        set_port(&mut addr, port, dst.port().is_some());\n                        addr\n                    })\n                    .collect();\n                dns::SocketAddrs::new(addrs)\n            };\n\n            ConnectingTcp::new(addrs, options, this.connector)\n                .connect(options)\n                .await\n        };\n\n        HttpConnecting {\n            fut: Box::pin(fut),\n            _marker: PhantomData,\n        }\n    }\n}\n\nfn get_host_port<'u>(options: &TcpOptions, dst: &'u Uri) -> Result<(&'u str, u16), ConnectError> {\n    trace!(\n        \"Http::connect; scheme={:?}, host={:?}, port={:?}\",\n        dst.scheme(),\n        dst.host(),\n        dst.port(),\n    );\n\n    if options.enforce_http {\n        if dst.scheme() != Some(&Scheme::HTTP) {\n            return Err(ConnectError {\n                msg: INVALID_NOT_HTTP,\n                addr: None,\n                cause: None,\n            });\n        }\n    } else if dst.scheme().is_none() {\n        return Err(ConnectError {\n            msg: INVALID_MISSING_SCHEME,\n            addr: None,\n            cause: None,\n        });\n    }\n\n    let host = match dst.host() {\n        Some(s) => s,\n        None => {\n            return Err(ConnectError {\n                msg: INVALID_MISSING_HOST,\n                addr: None,\n                cause: None,\n            });\n        }\n    };\n    let port = match dst.port() {\n        Some(port) => port.as_u16(),\n        None => {\n            if dst.scheme() == Some(&Scheme::HTTPS) {\n                443\n            } else {\n                80\n            }\n        }\n    };\n\n    Ok((host, port))\n}\n\n/// Respect explicit ports in the URI, if none, either\n/// keep non `0` ports resolved from a custom dns resolver,\n/// or use the default port for the scheme.\nfn set_port(addr: &mut SocketAddr, host_port: u16, explicit: bool) {\n    if explicit || addr.port() == 0 {\n        addr.set_port(host_port)\n    };\n}\n\nimpl HttpInfo {\n    /// Get the remote address of the transport used.\n    pub fn remote_addr(&self) -> SocketAddr {\n        self.remote_addr\n    }\n\n    /// Get the local address of the transport used.\n    pub fn local_addr(&self) -> SocketAddr {\n        self.local_addr\n    }\n}\n\npin_project! {\n    // Not publicly exported (so missing_docs doesn't trigger).\n    //\n    // We return this `Future` instead of the `Pin<Box<dyn Future>>` directly\n    // so that users don't rely on it fitting in a `Pin<Box<dyn Future>>` slot\n    // (and thus we can change the type in the future).\n    #[must_use = \"futures do nothing unless polled\"]\n    pub struct HttpConnecting<R, S: TcpConnector> {\n        #[pin]\n        fut: BoxConnecting<S>,\n        _marker: PhantomData<R>,\n    }\n}\n\nimpl<R, S> Future for HttpConnecting<R, S>\nwhere\n    R: InternalResolve,\n    S: TcpConnector,\n{\n    type Output = ConnectResult<S>;\n\n    #[inline]\n    fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {\n        self.project().fut.poll(cx)\n    }\n}\n"
  },
  {
    "path": "src/client/conn/proxy/socks.rs",
    "content": "use std::{\n    borrow::Cow,\n    task::{Context, Poll},\n};\n\nuse bytes::Bytes;\nuse http::Uri;\nuse tokio::io::{AsyncRead, AsyncWrite};\nuse tokio_socks::{\n    TargetAddr,\n    tcp::{Socks4Stream, Socks5Stream},\n};\nuse tower::Service;\n\nuse super::Tunneling;\nuse crate::{\n    dns::{GaiResolver, InternalResolve, Name},\n    error::BoxError,\n    ext::UriExt,\n};\n\n#[derive(Debug)]\npub enum SocksError {\n    ConnectFailed(BoxError),\n    DnsResolveFailure(BoxError),\n    Socks(tokio_socks::Error),\n    Io(std::io::Error),\n    Utf8(std::str::Utf8Error),\n    DnsFailure,\n    MissingHost,\n}\n\nimpl std::fmt::Display for SocksError {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        f.write_str(\"SOCKS error: \")?;\n\n        match self {\n            Self::ConnectFailed(e) => {\n                f.write_fmt(format_args!(\"failed to create underlying connection: {e}\"))\n            }\n            Self::Socks(e) => f.write_fmt(format_args!(\"error during SOCKS handshake: {e}\")),\n            Self::Io(e) => f.write_fmt(format_args!(\"io error during SOCKS handshake: {e}\")),\n            Self::Utf8(e) => f.write_fmt(format_args!(\n                \"invalid UTF-8 during SOCKS authentication: {e}\"\n            )),\n            Self::DnsResolveFailure(e) => {\n                f.write_fmt(format_args!(\"failed to resolve DNS for SOCKS target: {e}\"))\n            }\n            Self::DnsFailure => f.write_str(\"could not resolve to acceptable address type\"),\n            Self::MissingHost => f.write_str(\"missing destination host\"),\n        }\n    }\n}\n\nimpl std::error::Error for SocksError {}\n\nimpl From<std::io::Error> for SocksError {\n    fn from(err: std::io::Error) -> Self {\n        Self::Io(err)\n    }\n}\n\nimpl From<std::str::Utf8Error> for SocksError {\n    fn from(err: std::str::Utf8Error) -> Self {\n        Self::Utf8(err)\n    }\n}\n\nimpl From<tokio_socks::Error> for SocksError {\n    fn from(err: tokio_socks::Error) -> Self {\n        Self::Socks(err)\n    }\n}\n\n/// Represents the SOCKS protocol version.\n#[derive(Clone, Copy)]\n#[repr(u8)]\npub enum Version {\n    V4,\n    V5,\n}\n\n/// Represents the DNS resolution strategy for SOCKS connections.\n#[derive(Clone, Copy)]\n#[repr(u8)]\npub enum DnsResolve {\n    Local,\n    Remote,\n}\n\n/// A connector that establishes connections through a SOCKS proxy.\npub struct SocksConnector<C, R = GaiResolver> {\n    inner: C,\n    resolver: R,\n    proxy_dst: Uri,\n    auth: Option<(Bytes, Bytes)>,\n    version: Version,\n    dns_resolve: DnsResolve,\n}\n\nimpl<C, R> SocksConnector<C, R>\nwhere\n    R: InternalResolve + Clone,\n{\n    /// Create a new [`SocksConnector`].\n    pub fn new(proxy_dst: Uri, inner: C, resolver: R) -> Self {\n        SocksConnector {\n            inner,\n            resolver,\n            proxy_dst,\n            version: Version::V5,\n            dns_resolve: DnsResolve::Local,\n            auth: None,\n        }\n    }\n\n    /// Sets the authentication credentials for the SOCKS proxy connection.\n    #[inline]\n    pub fn set_auth(&mut self, auth: Option<(Bytes, Bytes)>) {\n        self.auth = auth;\n    }\n\n    /// Sets whether to use the SOCKS5 protocol for the proxy connection.\n    #[inline]\n    pub fn set_version(&mut self, version: Version) {\n        self.version = version;\n    }\n\n    /// Sets whether to resolve DNS locally or let the proxy handle DNS resolution.\n    #[inline]\n    pub fn set_dns_mode(&mut self, dns_resolve: DnsResolve) {\n        self.dns_resolve = dns_resolve;\n    }\n}\n\nimpl<C, R> Service<Uri> for SocksConnector<C, R>\nwhere\n    C: Service<Uri>,\n    C::Future: Send + 'static,\n    C::Response: AsyncRead + AsyncWrite + Unpin + Send + 'static,\n    C::Error: Into<BoxError>,\n    R: InternalResolve + Clone + Send + 'static,\n    <R as InternalResolve>::Future: Send + 'static,\n{\n    type Response = C::Response;\n    type Error = SocksError;\n    type Future = Tunneling<C::Future, C::Response, Self::Error>;\n\n    #[inline]\n    fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {\n        self.inner\n            .poll_ready(cx)\n            .map_err(Into::into)\n            .map_err(SocksError::ConnectFailed)\n    }\n\n    fn call(&mut self, dst: Uri) -> Self::Future {\n        let connecting = self.inner.call(self.proxy_dst.clone());\n\n        let version = self.version;\n        let dns_resolve = self.dns_resolve;\n        let auth = self.auth.clone();\n        let mut resolver = self.resolver.clone();\n\n        let fut = async move {\n            let host = dst.host().ok_or(SocksError::MissingHost)?;\n            let port = dst.port_or_default();\n\n            // Attempt to tcp connect to the proxy server.\n            // This will return a `tokio::net::TcpStream` if successful.\n            let socket = connecting\n                .await\n                .map_err(Into::into)\n                .map_err(SocksError::ConnectFailed)?;\n\n            // Resolve the target address using the provided resolver.\n            let target_addr = match dns_resolve {\n                DnsResolve::Local => {\n                    let mut socket_addr = resolver\n                        .resolve(Name::new(host.into()))\n                        .await\n                        .map(|mut s| s.next())\n                        .transpose()\n                        .ok_or(SocksError::DnsFailure)?\n                        .map_err(Into::into)\n                        .map_err(SocksError::DnsResolveFailure)?;\n                    socket_addr.set_port(port);\n                    TargetAddr::Ip(socket_addr)\n                }\n                DnsResolve::Remote => TargetAddr::Domain(Cow::Borrowed(host), port),\n            };\n\n            match version {\n                Version::V4 => {\n                    // For SOCKS4, we connect directly to the target address.\n                    let stream = Socks4Stream::connect_with_socket(socket, target_addr).await?;\n                    Ok(stream.into_inner())\n                }\n                Version::V5 => {\n                    // For SOCKS5, we need to handle authentication if provided.\n                    // The `auth` is an optional tuple of (username, password).\n                    let stream = match auth {\n                        Some((username, password)) => {\n                            let username = std::str::from_utf8(&username)?;\n                            let password = std::str::from_utf8(&password)?;\n                            Socks5Stream::connect_with_password_and_socket(\n                                socket,\n                                target_addr,\n                                username,\n                                password,\n                            )\n                            .await?\n                        }\n                        None => Socks5Stream::connect_with_socket(socket, target_addr).await?,\n                    };\n                    Ok(stream.into_inner())\n                }\n            }\n        };\n\n        Tunneling {\n            fut: Box::pin(fut),\n            _marker: Default::default(),\n        }\n    }\n}\n"
  },
  {
    "path": "src/client/conn/proxy/tunnel.rs",
    "content": "use std::{\n    marker::{PhantomData, Unpin},\n    task::{self, Poll},\n};\n\nuse bytes::BytesMut;\nuse http::{HeaderMap, HeaderValue, Uri};\nuse tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};\nuse tower::{BoxError, Service};\n\nuse super::Tunneling;\nuse crate::ext::UriExt;\n\n/// Tunnel Proxy via HTTP CONNECT\n///\n/// This is a connector that can be used by the `Client`. It wraps\n/// another connector, and after getting an underlying connection, it creates\n/// an HTTP CONNECT tunnel over it.\n#[derive(Debug)]\npub struct TunnelConnector<C> {\n    headers: Headers,\n    inner: C,\n    proxy_dst: Uri,\n}\n\n#[derive(Clone, Debug)]\nenum Headers {\n    Empty,\n    Auth(HeaderValue),\n    Extra(HeaderMap),\n}\n\n#[derive(Debug)]\npub enum TunnelError {\n    ConnectFailed(BoxError),\n    Io(std::io::Error),\n    Parse(httparse::Error),\n    MissingHost,\n    ProxyAuthRequired,\n    TunnelUnexpectedEof,\n    TunnelUnsuccessful,\n}\n\nimpl<C> TunnelConnector<C> {\n    /// Create a new tunnel connector.\n    ///\n    /// This wraps an underlying connector, and stores the address of a\n    /// tunneling proxy server.\n    ///\n    /// A `TunnelConnector` can then be called with any destination. The `proxy_dst` passed to\n    /// `call` will not be used to create the underlying connection, but will\n    /// be used in an HTTP CONNECT request sent to the proxy destination.\n    pub fn new(proxy_dst: Uri, connector: C) -> Self {\n        Self {\n            headers: Headers::Empty,\n            inner: connector,\n            proxy_dst,\n        }\n    }\n\n    /// Add `proxy-authorization` header value to the CONNECT request.\n    pub fn with_auth(mut self, mut auth: HeaderValue) -> Self {\n        // just in case the user forgot\n        auth.set_sensitive(true);\n        match self.headers {\n            Headers::Empty => {\n                self.headers = Headers::Auth(auth);\n            }\n            Headers::Auth(ref mut existing) => {\n                *existing = auth;\n            }\n            Headers::Extra(ref mut extra) => {\n                extra.insert(http::header::PROXY_AUTHORIZATION, auth);\n            }\n        }\n\n        self\n    }\n\n    /// Add extra headers to be sent with the CONNECT request.\n    ///\n    /// If existing headers have been set, these will be merged.\n    pub fn with_headers(mut self, mut headers: HeaderMap) -> Self {\n        match self.headers {\n            Headers::Empty => {\n                self.headers = Headers::Extra(headers);\n            }\n            Headers::Auth(auth) => {\n                headers\n                    .entry(http::header::PROXY_AUTHORIZATION)\n                    .or_insert(auth);\n                self.headers = Headers::Extra(headers);\n            }\n            Headers::Extra(ref mut extra) => {\n                extra.extend(headers);\n            }\n        }\n\n        self\n    }\n}\n\nimpl<C> Service<Uri> for TunnelConnector<C>\nwhere\n    C: Service<Uri>,\n    C::Future: Send + 'static,\n    C::Response: AsyncRead + AsyncWrite + Unpin + Send + 'static,\n    C::Error: Into<BoxError>,\n{\n    type Response = C::Response;\n    type Error = TunnelError;\n    type Future = Tunneling<C::Future, C::Response, Self::Error>;\n\n    #[inline]\n    fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {\n        self.inner\n            .poll_ready(cx)\n            .map_err(Into::into)\n            .map_err(TunnelError::ConnectFailed)\n    }\n\n    fn call(&mut self, dst: Uri) -> Self::Future {\n        let connecting = self.inner.call(self.proxy_dst.clone());\n        let headers = self.headers.clone();\n\n        Tunneling {\n            fut: Box::pin(async move {\n                let conn = connecting\n                    .await\n                    .map_err(Into::into)\n                    .map_err(TunnelError::ConnectFailed)?;\n                tunnel(\n                    conn,\n                    dst.host().ok_or(TunnelError::MissingHost)?,\n                    dst.port_or_default(),\n                    &headers,\n                )\n                .await\n            }),\n            _marker: PhantomData,\n        }\n    }\n}\n\nasync fn tunnel<T>(mut conn: T, host: &str, port: u16, headers: &Headers) -> Result<T, TunnelError>\nwhere\n    T: AsyncRead + AsyncWrite + Unpin,\n{\n    let mut buf = format!(\n        \"\\\n         CONNECT {host}:{port} HTTP/1.1\\r\\n\\\n         Host: {host}:{port}\\r\\n\\\n         \"\n    )\n    .into_bytes();\n\n    match headers {\n        Headers::Auth(auth) => {\n            buf.extend_from_slice(b\"Proxy-Authorization: \");\n            buf.extend_from_slice(auth.as_bytes());\n            buf.extend_from_slice(b\"\\r\\n\");\n        }\n        Headers::Extra(extra) => {\n            for (name, value) in extra {\n                buf.extend_from_slice(name.as_str().as_bytes());\n                buf.extend_from_slice(b\": \");\n                buf.extend_from_slice(value.as_bytes());\n                buf.extend_from_slice(b\"\\r\\n\");\n            }\n        }\n        Headers::Empty => (),\n    }\n\n    // headers end\n    buf.extend_from_slice(b\"\\r\\n\");\n\n    conn.write_all(&buf).await.map_err(TunnelError::Io)?;\n    conn.flush().await.map_err(TunnelError::Io)?;\n\n    let mut buf = BytesMut::with_capacity(8192);\n\n    loop {\n        if conn.read_buf(&mut buf).await.map_err(TunnelError::Io)? == 0 {\n            return Err(TunnelError::TunnelUnexpectedEof);\n        }\n\n        let mut headers = [httparse::EMPTY_HEADER; 64];\n        let mut res = httparse::Response::new(&mut headers);\n        match res.parse(&buf).map_err(TunnelError::Parse)? {\n            httparse::Status::Partial => continue,\n            httparse::Status::Complete(_) => match res.code {\n                Some(200) => return Ok(conn),\n                Some(407) => return Err(TunnelError::ProxyAuthRequired),\n                Some(_) | None => return Err(TunnelError::TunnelUnsuccessful),\n            },\n        }\n    }\n}\n\nimpl std::fmt::Display for TunnelError {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        f.write_str(\"tunnel error: \")?;\n\n        f.write_str(match self {\n            TunnelError::MissingHost => \"missing destination host\",\n            TunnelError::ProxyAuthRequired => \"proxy authorization required\",\n            TunnelError::Parse(_) => \"invalid proxy response\",\n            TunnelError::TunnelUnexpectedEof => \"unexpected end of file\",\n            TunnelError::TunnelUnsuccessful => \"unsuccessful\",\n            TunnelError::ConnectFailed(_) => \"failed to create underlying connection\",\n            TunnelError::Io(_) => \"io error establishing tunnel\",\n        })\n    }\n}\n\nimpl std::error::Error for TunnelError {\n    fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {\n        match self {\n            TunnelError::Io(e) => Some(e),\n            TunnelError::Parse(e) => Some(e),\n            TunnelError::ConnectFailed(e) => Some(&**e),\n            _ => None,\n        }\n    }\n}\n"
  },
  {
    "path": "src/client/conn/proxy.rs",
    "content": "//! Proxy helpers\n\n#[cfg(feature = \"socks\")]\npub mod socks;\npub mod tunnel;\n\nuse std::{\n    marker::PhantomData,\n    pin::Pin,\n    task::{Context, Poll},\n};\n\nuse pin_project_lite::pin_project;\n\npin_project! {\n    // Not publicly exported (so missing_docs doesn't trigger).\n    //\n    // We return this `Future` instead of the `Pin<Box<dyn Future>>` directly\n    // so that users don't rely on it fitting in a `Pin<Box<dyn Future>>` slot\n    // (and thus we can change the type in the future).\n    #[must_use = \"futures do nothing unless polled\"]\n    pub struct Tunneling<Fut, T, E> {\n        #[pin]\n        fut: Pin<Box<dyn Future<Output = Result<T, E>> + Send>>,\n        _marker: PhantomData<Fut>,\n    }\n}\n\nimpl<F, T, E1, E2> Future for Tunneling<F, T, E2>\nwhere\n    F: Future<Output = Result<T, E1>>,\n{\n    type Output = Result<T, E2>;\n\n    #[inline]\n    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {\n        self.project().fut.poll(cx)\n    }\n}\n"
  },
  {
    "path": "src/client/conn/tcp/tokio.rs",
    "content": "use std::{future::Future, io, net::SocketAddr, pin::Pin, time::Duration};\n\nuse tokio::net::{TcpSocket, TcpStream};\n\nuse super::TcpConnector;\nuse crate::client::{Connected, Connection, conn::HttpInfo};\n\n/// A connector that uses `tokio` for TCP connections.\n#[derive(Clone, Copy, Debug, Default)]\npub struct TokioTcpConnector {\n    _priv: (),\n}\n\nimpl TokioTcpConnector {\n    /// Create a new [`TokioTcpConnector`].\n    pub fn new() -> Self {\n        Self { _priv: () }\n    }\n}\n\nimpl TcpConnector for TokioTcpConnector {\n    type TcpStream = std::net::TcpStream;\n    type Connection = TcpStream;\n    type Error = io::Error;\n    type Future = Pin<Box<dyn Future<Output = Result<Self::Connection, Self::Error>> + Send>>;\n    type Sleep = tokio::time::Sleep;\n\n    #[inline]\n    fn connect(&self, socket: Self::TcpStream, addr: SocketAddr) -> Self::Future {\n        let socket = TcpSocket::from_std_stream(socket);\n        Box::pin(socket.connect(addr))\n    }\n\n    #[inline]\n    fn sleep(&self, duration: Duration) -> Self::Sleep {\n        tokio::time::sleep(duration)\n    }\n}\n\nimpl Connection for TcpStream {\n    fn connected(&self) -> Connected {\n        let connected = Connected::new();\n        if let (Ok(remote_addr), Ok(local_addr)) = (self.peer_addr(), self.local_addr()) {\n            connected.extra(HttpInfo {\n                remote_addr,\n                local_addr,\n            })\n        } else {\n            connected\n        }\n    }\n}\n"
  },
  {
    "path": "src/client/conn/tcp.rs",
    "content": "pub mod tokio;\n\n#[cfg(any(\n    target_os = \"illumos\",\n    target_os = \"ios\",\n    target_os = \"macos\",\n    target_os = \"solaris\",\n    target_os = \"tvos\",\n    target_os = \"visionos\",\n    target_os = \"watchos\",\n    target_os = \"android\",\n    target_os = \"fuchsia\",\n    target_os = \"linux\",\n))]\nuse std::borrow::Cow;\nuse std::{\n    error::Error as StdError,\n    fmt,\n    future::Future,\n    io,\n    net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr},\n    pin::pin,\n    time::Duration,\n};\n\nuse futures_util::future::Either;\nuse socket2::TcpKeepalive;\n\nuse super::Connection;\nuse crate::{dns, error::BoxError};\n\n/// A builder for tcp connections.\npub trait TcpConnector: Clone + Send + Sync + 'static {\n    /// The underlying stream type.\n    type TcpStream: From<socket2::Socket> + Send + Sync + 'static;\n\n    /// The type of connection returned by this builder.\n    type Connection: ::tokio::io::AsyncRead\n        + ::tokio::io::AsyncWrite\n        + Connection\n        + Send\n        + Unpin\n        + 'static;\n\n    /// The type of error returned by this builder.\n    type Error: Into<Box<dyn StdError + Send + Sync>>;\n\n    /// The future type returned by this builder.\n    type Future: Future<Output = Result<Self::Connection, Self::Error>> + Send + 'static;\n\n    /// The future type returned by this builder's sleep.\n    type Sleep: Future<Output = ()> + Send + 'static;\n\n    /// Build a connection from the given socket and connect to the address.\n    fn connect(&self, socket: Self::TcpStream, addr: SocketAddr) -> Self::Future;\n\n    /// Return a future that sleeps for the given duration.\n    fn sleep(&self, duration: Duration) -> Self::Sleep;\n}\n\npub(super) struct ConnectingTcp<S: TcpConnector> {\n    preferred: ConnectingTcpRemote<S>,\n    fallback: Option<ConnectingTcpFallback<S>>,\n}\n\nstruct ConnectingTcpFallback<S: TcpConnector> {\n    delay: S::Sleep,\n    remote: ConnectingTcpRemote<S>,\n}\n\nstruct ConnectingTcpRemote<S: TcpConnector> {\n    addrs: dns::SocketAddrs,\n    connect_timeout: Option<Duration>,\n    connector: S,\n}\n\nimpl<S: TcpConnector> ConnectingTcp<S>\nwhere\n    S::TcpStream: From<socket2::Socket>,\n{\n    pub(super) fn new(remote_addrs: dns::SocketAddrs, config: &TcpOptions, connector: S) -> Self {\n        if let Some(fallback_timeout) = config.happy_eyeballs_timeout {\n            let (preferred_addrs, fallback_addrs) = remote_addrs.split_by_preference(\n                config.socket_bind.ipv4_address,\n                config.socket_bind.ipv6_address,\n            );\n            if fallback_addrs.is_empty() {\n                return ConnectingTcp {\n                    preferred: ConnectingTcpRemote::new(\n                        preferred_addrs,\n                        config.connect_timeout,\n                        connector,\n                    ),\n                    fallback: None,\n                };\n            }\n\n            ConnectingTcp {\n                preferred: ConnectingTcpRemote::new(\n                    preferred_addrs,\n                    config.connect_timeout,\n                    connector.clone(),\n                ),\n                fallback: Some(ConnectingTcpFallback {\n                    delay: connector.sleep(fallback_timeout),\n                    remote: ConnectingTcpRemote::new(\n                        fallback_addrs,\n                        config.connect_timeout,\n                        connector,\n                    ),\n                }),\n            }\n        } else {\n            ConnectingTcp {\n                preferred: ConnectingTcpRemote::new(\n                    remote_addrs,\n                    config.connect_timeout,\n                    connector,\n                ),\n                fallback: None,\n            }\n        }\n    }\n}\n\nimpl<S: TcpConnector> ConnectingTcpRemote<S>\nwhere\n    S::TcpStream: From<socket2::Socket>,\n{\n    fn new(addrs: dns::SocketAddrs, connect_timeout: Option<Duration>, connector: S) -> Self {\n        let connect_timeout = connect_timeout.and_then(|t| t.checked_div(addrs.len() as u32));\n\n        Self {\n            addrs,\n            connect_timeout,\n            connector,\n        }\n    }\n\n    async fn connect(&mut self, config: &TcpOptions) -> Result<S::Connection, ConnectError> {\n        let mut err = None;\n        for addr in &mut self.addrs {\n            debug!(\"connecting to {}\", addr);\n            match connect(&addr, config, self.connect_timeout, &self.connector) {\n                Ok(fut) => match fut.await {\n                    Ok(tcp) => {\n                        debug!(\"connected to {}\", addr);\n                        return Ok(tcp);\n                    }\n                    Err(mut e) => {\n                        trace!(\"connect error for {}: {:?}\", addr, e);\n                        e.addr = Some(addr);\n                        if err.is_none() {\n                            err = Some(e);\n                        }\n                    }\n                },\n                Err(mut e) => {\n                    trace!(\"connect error for {}: {:?}\", addr, e);\n                    e.addr = Some(addr);\n                    if err.is_none() {\n                        err = Some(e);\n                    }\n                }\n            }\n        }\n\n        match err {\n            Some(e) => Err(e),\n            None => Err(ConnectError::new(\n                \"tcp connect error\",\n                std::io::Error::new(std::io::ErrorKind::NotConnected, \"Network unreachable\"),\n            )),\n        }\n    }\n}\n\nfn bind_local_address(\n    socket: &socket2::Socket,\n    dst_addr: &SocketAddr,\n    local_addr_ipv4: &Option<Ipv4Addr>,\n    local_addr_ipv6: &Option<Ipv6Addr>,\n) -> io::Result<()> {\n    match (*dst_addr, local_addr_ipv4, local_addr_ipv6) {\n        (SocketAddr::V4(_), Some(addr), _) => {\n            socket.bind(&SocketAddr::new((*addr).into(), 0).into())?;\n        }\n        (SocketAddr::V6(_), _, Some(addr)) => {\n            socket.bind(&SocketAddr::new((*addr).into(), 0).into())?;\n        }\n        _ => {\n            if cfg!(windows) {\n                // Windows requires a socket be bound before calling connect\n                let any: SocketAddr = match *dst_addr {\n                    SocketAddr::V4(_) => ([0, 0, 0, 0], 0).into(),\n                    SocketAddr::V6(_) => ([0, 0, 0, 0, 0, 0, 0, 0], 0).into(),\n                };\n                socket.bind(&any.into())?;\n            }\n        }\n    }\n\n    Ok(())\n}\n\nfn connect<S: TcpConnector>(\n    addr: &SocketAddr,\n    config: &TcpOptions,\n    connect_timeout: Option<Duration>,\n    connector: &S,\n) -> Result<impl Future<Output = Result<S::Connection, ConnectError>>, ConnectError>\nwhere\n    S::TcpStream: From<socket2::Socket>,\n{\n    use socket2::{Domain, Protocol, Socket, Type};\n\n    let domain = Domain::for_address(*addr);\n    let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP))\n        .map_err(ConnectError::m(\"tcp open error\"))?;\n\n    // When constructing a Tokio `TcpSocket` from a raw fd/socket, the user is\n    // responsible for ensuring O_NONBLOCK is set.\n    socket\n        .set_nonblocking(true)\n        .map_err(ConnectError::m(\"tcp set_nonblocking error\"))?;\n\n    if let Some(tcp_keepalive) = &config.tcp_keepalive.into_tcpkeepalive() {\n        if let Err(_e) = socket.set_tcp_keepalive(tcp_keepalive) {\n            warn!(\"tcp set_keepalive error: {_e}\");\n        }\n    }\n\n    // That this only works for some socket types, particularly AF_INET sockets.\n    #[cfg(any(\n        target_os = \"android\",\n        target_os = \"fuchsia\",\n        target_os = \"illumos\",\n        target_os = \"ios\",\n        target_os = \"linux\",\n        target_os = \"macos\",\n        target_os = \"solaris\",\n        target_os = \"tvos\",\n        target_os = \"visionos\",\n        target_os = \"watchos\",\n    ))]\n    if let Some(interface) = &config.socket_bind.interface {\n        // On Linux-like systems, set the interface to bind using\n        // `SO_BINDTODEVICE`.\n        #[cfg(any(target_os = \"android\", target_os = \"fuchsia\", target_os = \"linux\"))]\n        socket\n            .bind_device(Some(interface.as_bytes()))\n            .map_err(ConnectError::m(\"tcp bind interface error\"))?;\n\n        // On macOS-like and Solaris-like systems, we instead use `IP_BOUND_IF`.\n        // This socket option desires an integer index for the interface, so we\n        // must first determine the index of the requested interface name using\n        // `if_nametoindex`.\n        #[cfg(any(\n            target_os = \"illumos\",\n            target_os = \"ios\",\n            target_os = \"macos\",\n            target_os = \"solaris\",\n            target_os = \"tvos\",\n            target_os = \"visionos\",\n            target_os = \"watchos\",\n        ))]\n        if let Ok(interface) = std::ffi::CString::new(interface.as_bytes()) {\n            #[allow(unsafe_code)]\n            let idx = unsafe { libc::if_nametoindex(interface.as_ptr()) };\n            let idx = std::num::NonZeroU32::new(idx).ok_or_else(|| {\n                // If the index is 0, check errno and return an I/O error.\n                ConnectError::new(\n                    \"error converting interface name to index\",\n                    io::Error::last_os_error(),\n                )\n            })?;\n\n            // Different setsockopt calls are necessary depending on whether the\n            // address is IPv4 or IPv6.\n            match addr {\n                SocketAddr::V4(_) => socket.bind_device_by_index_v4(Some(idx)),\n                SocketAddr::V6(_) => socket.bind_device_by_index_v6(Some(idx)),\n            }\n            .map_err(ConnectError::m(\"tcp bind interface error\"))?;\n        }\n    }\n\n    #[cfg(any(target_os = \"android\", target_os = \"fuchsia\", target_os = \"linux\"))]\n    if let Some(tcp_user_timeout) = &config.tcp_user_timeout {\n        if let Err(_e) = socket.set_tcp_user_timeout(Some(*tcp_user_timeout)) {\n            warn!(\"tcp set_tcp_user_timeout error: {_e}\");\n        }\n    }\n\n    bind_local_address(\n        &socket,\n        addr,\n        &config.socket_bind.ipv4_address,\n        &config.socket_bind.ipv6_address,\n    )\n    .map_err(ConnectError::m(\"tcp bind local error\"))?;\n\n    if config.reuse_address {\n        if let Err(_e) = socket.set_reuse_address(true) {\n            warn!(\"tcp set_reuse_address error: {_e}\");\n        }\n    }\n\n    if let Some(size) = config.send_buffer_size {\n        if let Err(_e) = socket.set_send_buffer_size(size) {\n            warn!(\"tcp set_buffer_size error: {_e}\");\n        }\n    }\n\n    if let Some(size) = config.recv_buffer_size {\n        if let Err(_e) = socket.set_recv_buffer_size(size) {\n            warn!(\"tcp set_recv_buffer_size error: {_e}\");\n        }\n    }\n\n    if let Err(_e) = socket.set_tcp_nodelay(config.nodelay) {\n        warn!(\"tcp set_tcp_nodelay error: {_e}\");\n    }\n\n    let connect = connector.connect(socket.into(), *addr);\n    let sleep = connect_timeout.map(|dur| connector.sleep(dur));\n\n    Ok(async move {\n        match sleep {\n            Some(sleep) => match futures_util::future::select(pin!(sleep), pin!(connect)).await {\n                Either::Left(((), _)) => {\n                    Err(io::Error::new(io::ErrorKind::TimedOut, \"connect timeout\").into())\n                }\n                Either::Right((Ok(s), _)) => Ok(s),\n                Either::Right((Err(e), _)) => Err(e.into()),\n            },\n            None => connect.await.map_err(Into::into),\n        }\n        .map_err(ConnectError::m(\"tcp connect error\"))\n    })\n}\n\nimpl<S: TcpConnector> ConnectingTcp<S>\nwhere\n    S::TcpStream: From<socket2::Socket>,\n{\n    pub(super) async fn connect(\n        mut self,\n        config: &TcpOptions,\n    ) -> Result<S::Connection, ConnectError> {\n        match self.fallback {\n            None => self.preferred.connect(config).await,\n            Some(mut fallback) => {\n                let preferred_fut = pin!(self.preferred.connect(config));\n                let fallback_fut = pin!(fallback.remote.connect(config));\n                let fallback_delay = pin!(fallback.delay);\n\n                let (result, future) =\n                    match futures_util::future::select(preferred_fut, fallback_delay).await {\n                        Either::Left((result, _fallback_delay)) => {\n                            (result, Either::Right(fallback_fut))\n                        }\n                        Either::Right(((), preferred_fut)) => {\n                            // Delay is done, start polling both the preferred and the fallback\n                            futures_util::future::select(preferred_fut, fallback_fut)\n                                .await\n                                .factor_first()\n                        }\n                    };\n\n                if result.is_err() {\n                    // Fallback to the remaining future (could be preferred or fallback)\n                    // if we get an error\n                    future.await\n                } else {\n                    result\n                }\n            }\n        }\n    }\n}\n\n// Not publicly exported (so missing_docs doesn't trigger).\npub struct ConnectError {\n    pub(super) msg: &'static str,\n    pub(super) addr: Option<SocketAddr>,\n    pub(super) cause: Option<BoxError>,\n}\n\nimpl ConnectError {\n    pub(super) fn new<E>(msg: &'static str, cause: E) -> ConnectError\n    where\n        E: Into<BoxError>,\n    {\n        ConnectError {\n            msg,\n            addr: None,\n            cause: Some(cause.into()),\n        }\n    }\n\n    pub(super) fn dns<E>(cause: E) -> ConnectError\n    where\n        E: Into<BoxError>,\n    {\n        ConnectError::new(\"dns error\", cause)\n    }\n\n    pub(super) fn m<E>(msg: &'static str) -> impl FnOnce(E) -> ConnectError\n    where\n        E: Into<BoxError>,\n    {\n        move |cause| ConnectError::new(msg, cause)\n    }\n}\n\nimpl fmt::Debug for ConnectError {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        let mut b = f.debug_tuple(\"ConnectError\");\n        b.field(&self.msg);\n        if let Some(ref addr) = self.addr {\n            b.field(addr);\n        }\n        if let Some(ref cause) = self.cause {\n            b.field(cause);\n        }\n        b.finish()\n    }\n}\n\nimpl fmt::Display for ConnectError {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        f.write_str(self.msg)\n    }\n}\n\nimpl StdError for ConnectError {\n    fn source(&self) -> Option<&(dyn StdError + 'static)> {\n        self.cause.as_ref().map(|e| &**e as _)\n    }\n}\n\n/// Options for configuring socket bind behavior for outbound connections.\n#[derive(Debug, Clone, Hash, PartialEq, Eq, Default)]\npub(crate) struct SocketBindOptions {\n    #[cfg(any(\n        target_os = \"illumos\",\n        target_os = \"ios\",\n        target_os = \"macos\",\n        target_os = \"solaris\",\n        target_os = \"tvos\",\n        target_os = \"visionos\",\n        target_os = \"watchos\",\n        target_os = \"android\",\n        target_os = \"fuchsia\",\n        target_os = \"linux\",\n    ))]\n    pub interface: Option<Cow<'static, str>>,\n    pub ipv4_address: Option<Ipv4Addr>,\n    pub ipv6_address: Option<Ipv6Addr>,\n}\n\nimpl SocketBindOptions {\n    /// Sets the name of the network interface to bind the socket to.\n    ///\n    /// ## Platform behavior\n    /// - On Linux/Fuchsia/Android: sets `SO_BINDTODEVICE`\n    /// - On macOS/illumos/Solaris/iOS/etc.: sets `IP_BOUND_IF`\n    ///\n    /// If `interface` is `None`, the socket will not be explicitly bound to any device.\n    ///\n    /// # Errors\n    ///\n    /// On platforms that require a `CString` (e.g. macOS), this will return an error if the\n    /// interface name contains an internal null byte (`\\0`), which is invalid in C strings.\n    ///\n    /// # See Also\n    /// - [VRF documentation](https://www.kernel.org/doc/Documentation/networking/vrf.txt)\n    /// - [`man 7 socket`](https://man7.org/linux/man-pages/man7/socket.7.html)\n    /// - [`man 7p ip`](https://docs.oracle.com/cd/E86824_01/html/E54777/ip-7p.html)\n    #[cfg(any(\n        target_os = \"android\",\n        target_os = \"fuchsia\",\n        target_os = \"illumos\",\n        target_os = \"ios\",\n        target_os = \"linux\",\n        target_os = \"macos\",\n        target_os = \"solaris\",\n        target_os = \"tvos\",\n        target_os = \"visionos\",\n        target_os = \"watchos\",\n    ))]\n    #[inline]\n    pub fn set_interface<I>(&mut self, interface: I) -> &mut Self\n    where\n        I: Into<std::borrow::Cow<'static, str>>,\n    {\n        self.interface = Some(interface.into());\n        self\n    }\n\n    /// Set that all sockets are bound to the configured address before connection.\n    ///\n    /// If `None`, the sockets will not be bound.\n    ///\n    /// Default is `None`.\n    #[inline]\n    pub fn set_local_address<V>(&mut self, local_address: V)\n    where\n        V: Into<Option<IpAddr>>,\n    {\n        match local_address.into() {\n            Some(IpAddr::V4(a)) => {\n                self.ipv4_address = Some(a);\n            }\n            Some(IpAddr::V6(a)) => {\n                self.ipv6_address = Some(a);\n            }\n            _ => {}\n        };\n    }\n\n    /// Set that all sockets are bound to the configured IPv4 or IPv6 address (depending on host's\n    /// preferences) before connection.\n    ///\n    /// If `None`, the sockets will not be bound.\n    ///\n    /// Default is `None`.\n    #[inline]\n    pub fn set_local_addresses<V4, V6>(&mut self, ipv4_address: V4, ipv6_address: V6)\n    where\n        V4: Into<Option<Ipv4Addr>>,\n        V6: Into<Option<Ipv6Addr>>,\n    {\n        if let Some(addr) = ipv4_address.into() {\n            self.ipv4_address = Some(addr);\n        }\n        if let Some(addr) = ipv6_address.into() {\n            self.ipv6_address = Some(addr);\n        }\n    }\n}\n\n#[derive(Clone)]\npub(crate) struct TcpOptions {\n    pub enforce_http: bool,\n    pub connect_timeout: Option<Duration>,\n    pub happy_eyeballs_timeout: Option<Duration>,\n    pub nodelay: bool,\n    pub reuse_address: bool,\n    pub send_buffer_size: Option<usize>,\n    pub recv_buffer_size: Option<usize>,\n    #[cfg(any(target_os = \"android\", target_os = \"fuchsia\", target_os = \"linux\"))]\n    pub tcp_user_timeout: Option<Duration>,\n    pub tcp_keepalive: TcpKeepaliveOptions,\n    pub socket_bind: SocketBindOptions,\n}\n\n#[derive(Default, Debug, Clone, Copy)]\npub(crate) struct TcpKeepaliveOptions {\n    pub time: Option<Duration>,\n    #[cfg(any(\n        target_os = \"android\",\n        target_os = \"dragonfly\",\n        target_os = \"freebsd\",\n        target_os = \"fuchsia\",\n        target_os = \"illumos\",\n        target_os = \"ios\",\n        target_os = \"visionos\",\n        target_os = \"linux\",\n        target_os = \"macos\",\n        target_os = \"netbsd\",\n        target_os = \"tvos\",\n        target_os = \"watchos\",\n        target_os = \"windows\",\n        target_os = \"cygwin\",\n    ))]\n    pub interval: Option<Duration>,\n    #[cfg(any(\n        target_os = \"android\",\n        target_os = \"dragonfly\",\n        target_os = \"freebsd\",\n        target_os = \"fuchsia\",\n        target_os = \"illumos\",\n        target_os = \"ios\",\n        target_os = \"visionos\",\n        target_os = \"linux\",\n        target_os = \"macos\",\n        target_os = \"netbsd\",\n        target_os = \"tvos\",\n        target_os = \"watchos\",\n        target_os = \"cygwin\",\n        target_os = \"windows\",\n    ))]\n    pub retries: Option<u32>,\n}\n\nimpl TcpKeepaliveOptions {\n    /// Converts into a `socket2::TcpKeealive` if there is any keep alive configuration.\n    pub(crate) fn into_tcpkeepalive(self) -> Option<TcpKeepalive> {\n        let mut dirty = false;\n        let mut ka = TcpKeepalive::new();\n        if let Some(time) = self.time {\n            ka = ka.with_time(time);\n            dirty = true\n        }\n\n        // Set the value of the `TCP_KEEPINTVL` option. On Windows, this sets the\n        // value of the `tcp_keepalive` struct's `keepaliveinterval` field.\n        //\n        // Sets the time interval between TCP keepalive probes.\n        //\n        // Some platforms specify this value in seconds, so sub-second\n        // specifications may be omitted.\n        #[cfg(any(\n            target_os = \"android\",\n            target_os = \"dragonfly\",\n            target_os = \"freebsd\",\n            target_os = \"fuchsia\",\n            target_os = \"illumos\",\n            target_os = \"ios\",\n            target_os = \"visionos\",\n            target_os = \"linux\",\n            target_os = \"macos\",\n            target_os = \"netbsd\",\n            target_os = \"tvos\",\n            target_os = \"watchos\",\n            target_os = \"windows\",\n            target_os = \"cygwin\",\n        ))]\n        {\n            if let Some(interval) = self.interval {\n                dirty = true;\n                ka = ka.with_interval(interval)\n            };\n        }\n\n        // Set the value of the `TCP_KEEPCNT` option.\n        //\n        // Set the maximum number of TCP keepalive probes that will be sent before\n        // dropping a connection, if TCP keepalive is enabled on this socket.\n        #[cfg(any(\n            target_os = \"android\",\n            target_os = \"dragonfly\",\n            target_os = \"freebsd\",\n            target_os = \"fuchsia\",\n            target_os = \"illumos\",\n            target_os = \"ios\",\n            target_os = \"visionos\",\n            target_os = \"linux\",\n            target_os = \"macos\",\n            target_os = \"netbsd\",\n            target_os = \"tvos\",\n            target_os = \"watchos\",\n            target_os = \"cygwin\",\n            target_os = \"windows\",\n        ))]\n        if let Some(retries) = self.retries {\n            dirty = true;\n            ka = ka.with_retries(retries)\n        };\n\n        if dirty { Some(ka) } else { None }\n    }\n}\n"
  },
  {
    "path": "src/client/conn/tls_info.rs",
    "content": "use bytes::Bytes;\nuse tokio::net::TcpStream;\n#[cfg(unix)]\nuse tokio::net::UnixStream;\nuse tokio_btls::SslStream;\n\nuse crate::tls::{TlsInfo, conn::MaybeHttpsStream};\n\n/// A trait for extracting TLS information from a connection.\n///\n/// Implementors can provide access to peer certificate data or other TLS-related metadata.\n/// For non-TLS connections, this typically returns `None`.\npub trait TlsInfoFactory {\n    fn tls_info(&self) -> Option<TlsInfo>;\n}\n\nfn extract_tls_info<S>(ssl_stream: &SslStream<S>) -> TlsInfo {\n    let ssl = ssl_stream.ssl();\n    TlsInfo {\n        peer_certificate: ssl\n            .peer_certificate()\n            .and_then(|cert| cert.to_der().ok())\n            .map(Bytes::from),\n        peer_certificate_chain: ssl.peer_cert_chain().map(|chain| {\n            chain\n                .iter()\n                .filter_map(|cert| cert.to_der().ok())\n                .map(Bytes::from)\n                .collect()\n        }),\n    }\n}\n\n// ===== impl TcpStream =====\n\nimpl TlsInfoFactory for TcpStream {\n    fn tls_info(&self) -> Option<TlsInfo> {\n        None\n    }\n}\n\nimpl TlsInfoFactory for SslStream<TcpStream> {\n    #[inline]\n    fn tls_info(&self) -> Option<TlsInfo> {\n        Some(extract_tls_info(self))\n    }\n}\n\nimpl TlsInfoFactory for MaybeHttpsStream<TcpStream> {\n    fn tls_info(&self) -> Option<TlsInfo> {\n        match self {\n            MaybeHttpsStream::Https(tls) => tls.tls_info(),\n            MaybeHttpsStream::Http(_) => None,\n        }\n    }\n}\n\nimpl TlsInfoFactory for SslStream<MaybeHttpsStream<TcpStream>> {\n    #[inline]\n    fn tls_info(&self) -> Option<TlsInfo> {\n        Some(extract_tls_info(self))\n    }\n}\n\n// ===== impl UnixStream =====\n\n#[cfg(unix)]\nimpl TlsInfoFactory for UnixStream {\n    fn tls_info(&self) -> Option<TlsInfo> {\n        None\n    }\n}\n\n#[cfg(unix)]\nimpl TlsInfoFactory for SslStream<UnixStream> {\n    #[inline]\n    fn tls_info(&self) -> Option<TlsInfo> {\n        Some(extract_tls_info(self))\n    }\n}\n\n#[cfg(unix)]\nimpl TlsInfoFactory for MaybeHttpsStream<UnixStream> {\n    fn tls_info(&self) -> Option<TlsInfo> {\n        match self {\n            MaybeHttpsStream::Https(tls) => tls.tls_info(),\n            MaybeHttpsStream::Http(_) => None,\n        }\n    }\n}\n\n#[cfg(unix)]\nimpl TlsInfoFactory for SslStream<MaybeHttpsStream<UnixStream>> {\n    #[inline]\n    fn tls_info(&self) -> Option<TlsInfo> {\n        Some(extract_tls_info(self))\n    }\n}\n"
  },
  {
    "path": "src/client/conn/uds.rs",
    "content": "use std::{\n    io,\n    path::Path,\n    pin::Pin,\n    sync::Arc,\n    task::{Context, Poll},\n};\n\nuse http::Uri;\nuse tokio::net::UnixStream;\n\nuse super::{Connected, Connection};\n\ntype ConnectResult = io::Result<UnixStream>;\ntype BoxConnecting = Pin<Box<dyn Future<Output = ConnectResult> + Send>>;\n\n#[derive(Clone)]\npub struct UnixConnector {\n    path: Arc<Path>,\n}\n\nimpl UnixConnector {\n    /// Create a new [`UnixConnector`].\n    pub fn new(path: impl Into<Arc<Path>>) -> Self {\n        Self { path: path.into() }\n    }\n}\n\nimpl tower::Service<Uri> for UnixConnector {\n    type Response = UnixStream;\n    type Error = io::Error;\n    type Future = BoxConnecting;\n\n    #[inline]\n    fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {\n        Poll::Ready(Ok(()))\n    }\n\n    fn call(&mut self, _: Uri) -> Self::Future {\n        let fut = UnixStream::connect(self.path.clone());\n        Box::pin(async move {\n            let io = fut.await?;\n            Ok::<_, io::Error>(io)\n        })\n    }\n}\n\nimpl Connection for UnixStream {\n    #[inline]\n    fn connected(&self) -> Connected {\n        Connected::new()\n    }\n}\n"
  },
  {
    "path": "src/client/conn/verbose.rs",
    "content": "use super::AsyncConnWithInfo;\n\n/// Controls whether to enable verbose tracing for connections.\n///\n/// When enabled (with the `tracing` feature), connections are wrapped to log I/O operations for\n/// debugging.\n#[derive(Clone, Copy)]\npub struct Verbose(pub(super) bool);\n\nimpl Verbose {\n    pub const OFF: Verbose = Verbose(false);\n\n    #[cfg_attr(not(feature = \"tracing\"), inline(always))]\n    pub(super) fn wrap<T>(&self, conn: T) -> Box<dyn AsyncConnWithInfo>\n    where\n        T: AsyncConnWithInfo + 'static,\n    {\n        #[cfg(feature = \"tracing\")]\n        if self.0 {\n            return Box::new(sealed::Wrapper {\n                id: crate::util::fast_random(),\n                inner: conn,\n            });\n        }\n\n        Box::new(conn)\n    }\n}\n\n#[cfg(feature = \"tracing\")]\nmod sealed {\n    use std::{\n        fmt,\n        io::{self, IoSlice},\n        pin::Pin,\n        task::{Context, Poll},\n    };\n\n    use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};\n\n    use super::super::{Connected, Connection, TlsInfoFactory};\n    use crate::{tls::TlsInfo, util::Escape};\n\n    pub(super) struct Wrapper<T> {\n        pub(super) id: u64,\n        pub(super) inner: T,\n    }\n\n    impl<T: Connection + AsyncRead + AsyncWrite + Unpin> Connection for Wrapper<T> {\n        #[inline]\n        fn connected(&self) -> Connected {\n            self.inner.connected()\n        }\n    }\n\n    impl<T: AsyncRead + AsyncWrite + Unpin> AsyncRead for Wrapper<T> {\n        fn poll_read(\n            mut self: Pin<&mut Self>,\n            cx: &mut Context,\n            buf: &mut ReadBuf<'_>,\n        ) -> Poll<std::io::Result<()>> {\n            match Pin::new(&mut self.inner).poll_read(cx, buf) {\n                Poll::Ready(Ok(())) => {\n                    trace!(\"{:08x} read: {:?}\", self.id, Escape::new(buf.filled()));\n                    Poll::Ready(Ok(()))\n                }\n                Poll::Ready(Err(e)) => Poll::Ready(Err(e)),\n                Poll::Pending => Poll::Pending,\n            }\n        }\n    }\n\n    impl<T: AsyncRead + AsyncWrite + Unpin> AsyncWrite for Wrapper<T> {\n        fn poll_write(\n            mut self: Pin<&mut Self>,\n            cx: &mut Context,\n            buf: &[u8],\n        ) -> Poll<io::Result<usize>> {\n            match Pin::new(&mut self.inner).poll_write(cx, buf) {\n                Poll::Ready(Ok(n)) => {\n                    trace!(\"{:08x} write: {:?}\", self.id, Escape::new(&buf[..n]));\n                    Poll::Ready(Ok(n))\n                }\n                Poll::Ready(Err(e)) => Poll::Ready(Err(e)),\n                Poll::Pending => Poll::Pending,\n            }\n        }\n\n        fn poll_write_vectored(\n            mut self: Pin<&mut Self>,\n            cx: &mut Context<'_>,\n            bufs: &[IoSlice<'_>],\n        ) -> Poll<io::Result<usize>> {\n            match Pin::new(&mut self.inner).poll_write_vectored(cx, bufs) {\n                Poll::Ready(Ok(nwritten)) => {\n                    trace!(\n                        \"{:08x} write (vectored): {:?}\",\n                        self.id,\n                        Vectored { bufs, nwritten }\n                    );\n                    Poll::Ready(Ok(nwritten))\n                }\n                Poll::Ready(Err(e)) => Poll::Ready(Err(e)),\n                Poll::Pending => Poll::Pending,\n            }\n        }\n\n        #[inline]\n        fn is_write_vectored(&self) -> bool {\n            self.inner.is_write_vectored()\n        }\n\n        #[inline]\n        fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<io::Result<()>> {\n            Pin::new(&mut self.inner).poll_flush(cx)\n        }\n\n        #[inline]\n        fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<io::Result<()>> {\n            Pin::new(&mut self.inner).poll_shutdown(cx)\n        }\n    }\n\n    impl<T: TlsInfoFactory> TlsInfoFactory for Wrapper<T> {\n        fn tls_info(&self) -> Option<TlsInfo> {\n            self.inner.tls_info()\n        }\n    }\n\n    struct Vectored<'a, 'b> {\n        bufs: &'a [IoSlice<'b>],\n        nwritten: usize,\n    }\n\n    impl fmt::Debug for Vectored<'_, '_> {\n        fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n            let mut left = self.nwritten;\n            for buf in self.bufs.iter() {\n                if left == 0 {\n                    break;\n                }\n                let n = std::cmp::min(left, buf.len());\n                Escape::new(&buf[..n]).fmt(f)?;\n                left -= n;\n            }\n            Ok(())\n        }\n    }\n}\n"
  },
  {
    "path": "src/client/conn.rs",
    "content": "mod connector;\nmod http;\nmod proxy;\nmod tcp;\nmod tls_info;\n#[cfg(unix)]\nmod uds;\nmod verbose;\n\npub mod descriptor;\n\nuse std::{\n    fmt::{self, Debug, Formatter},\n    io,\n    io::IoSlice,\n    pin::Pin,\n    sync::{\n        Arc,\n        atomic::{AtomicBool, Ordering},\n    },\n    task::{Context, Poll},\n};\n\nuse ::http::{Extensions, HeaderMap, HeaderValue};\nuse pin_project_lite::pin_project;\nuse tokio::io::{AsyncRead, AsyncWrite, ReadBuf};\nuse tokio_btls::SslStream;\nuse tower::{\n    BoxError,\n    util::{BoxCloneSyncService, BoxCloneSyncServiceLayer},\n};\n\n#[cfg(feature = \"socks\")]\npub(super) use self::proxy::socks;\npub(super) use self::{\n    connector::Connector,\n    http::{HttpInfo, HttpTransport},\n    proxy::tunnel,\n    tcp::{SocketBindOptions, tokio::TokioTcpConnector},\n    tls_info::TlsInfoFactory,\n};\nuse crate::{dns::DynResolver, proxy::matcher::Intercept, tls::TlsInfo};\n\n/// HTTP connector with dynamic DNS resolver.\npub type HttpConnector = self::http::HttpConnector<DynResolver, TokioTcpConnector>;\n\n/// Boxed connector service for establishing connections.\npub type BoxedConnectorService = BoxCloneSyncService<Unnameable, Conn, BoxError>;\n\n/// Boxed layer for building a boxed connector service.\npub type BoxedConnectorLayer =\n    BoxCloneSyncServiceLayer<BoxedConnectorService, Unnameable, Conn, BoxError>;\n\n/// A wrapper type for [`descriptor::ConnectionDescriptor`] used to erase its concrete type.\n///\n/// [`Unnameable`] allows passing connection requests through trait objects or\n/// type-erased interfaces where the concrete type of the request is not important.\n/// This is mainly used internally to simplify service composition and dynamic dispatch.\npub struct Unnameable(pub(super) descriptor::ConnectionDescriptor);\n\n/// A trait alias for types that can be used as async connections.\n///\n/// This trait is automatically implemented for any type that satisfies the required bounds:\n/// - [`AsyncRead`] + [`AsyncWrite`]: For I/O operations\n/// - [`Connection`]: For connection metadata\n/// - [`Send`] + [`Sync`] + [`Unpin`] + `'static`: For async/await compatibility\ntrait AsyncConn: AsyncRead + AsyncWrite + Connection + Send + Sync + Unpin + 'static {}\n\n/// An async connection that can also provide TLS information.\n///\n/// This extends [`AsyncConn`] with the ability to extract TLS certificate information\n/// when available. Useful for connections that may be either plain TCP or TLS-encrypted.\ntrait AsyncConnWithInfo: AsyncConn + TlsInfoFactory {}\n\nimpl<T> AsyncConn for T where T: AsyncRead + AsyncWrite + Connection + Send + Sync + Unpin + 'static {}\n\nimpl<T> AsyncConnWithInfo for T where T: AsyncConn + TlsInfoFactory {}\n\npin_project! {\n    /// Note: the `is_proxy` member means *is plain text HTTP proxy*.\n    /// This tells core whether the URI should be written in\n    /// * origin-form (`GET /just/a/path HTTP/1.1`), when `proxy == None`, or\n    /// * absolute-form (`GET http://foo.bar/and/a/path HTTP/1.1`), otherwise.\n    pub struct Conn {\n        tls_info: bool,\n        proxy: Option<Intercept>,\n        #[pin]\n        stream: Box<dyn AsyncConnWithInfo>,\n    }\n}\n\npin_project! {\n    /// A wrapper around `SslStream` that adapts it for use as a generic async connection.\n    ///\n    /// This type enables unified handling of plain TCP and TLS-encrypted streams by providing\n    /// implementations of `Connection`, `Read`, `Write`, and `TlsInfoFactory`.\n    /// It is mainly used internally to abstract over different connection types.\n    pub struct TlsConn<T> {\n        #[pin]\n        stream: SslStream<T>,\n    }\n}\n\n/// Describes a type returned by a connector.\npub trait Connection {\n    /// Return metadata describing the connection.\n    fn connected(&self) -> Connected;\n}\n\n/// Indicates the negotiated ALPN protocol.\n#[derive(Clone, Copy, Debug, PartialEq)]\nenum Alpn {\n    H2,\n    None,\n}\n\n/// A pill that can be poisoned to indicate that a connection should not be reused.\n#[derive(Clone)]\nstruct PoisonPill(Arc<AtomicBool>);\n\n/// A boxed asynchronous connection with associated information.\n#[derive(Debug)]\nstruct Extra(Box<dyn ExtraInner>);\n\n/// Inner trait for extra connection information.\ntrait ExtraInner: Send + Sync + Debug {\n    fn clone_box(&self) -> Box<dyn ExtraInner>;\n    fn set(&self, res: &mut Extensions);\n}\n\n// This indirection allows the `Connected` to have a type-erased \"extra\" value,\n// while that type still knows its inner extra type. This allows the correct\n// TypeId to be used when inserting into `res.extensions_mut()`.\n#[derive(Debug, Clone)]\nstruct ExtraEnvelope<T>(T);\n\n/// Chains two `ExtraInner` implementations together, inserting both into\n/// the extensions.\n#[derive(Debug)]\nstruct ExtraChain<T>(Box<dyn ExtraInner>, T);\n\n/// Information about an HTTP proxy identity.\n#[derive(Debug, Default, Clone)]\nstruct ProxyIdentity {\n    is_proxied: bool,\n    auth: Option<HeaderValue>,\n    headers: Option<HeaderMap>,\n}\n\n/// Extra information about the connected transport.\n///\n/// This can be used to inform recipients about things like if ALPN\n/// was used, or if connected to an HTTP proxy.\n#[derive(Debug, Clone)]\npub struct Connected {\n    alpn: Alpn,\n    proxy: Box<ProxyIdentity>,\n    extra: Option<Extra>,\n    poisoned: PoisonPill,\n}\n\n// ==== impl Conn ====\n\nimpl Connection for Conn {\n    fn connected(&self) -> Connected {\n        let mut connected = self.stream.connected();\n\n        if let Some(proxy) = &self.proxy {\n            connected = connected.proxy(proxy.clone());\n        }\n\n        if self.tls_info {\n            if let Some(tls_info) = self.stream.tls_info() {\n                connected.extra(tls_info)\n            } else {\n                connected\n            }\n        } else {\n            connected\n        }\n    }\n}\n\nimpl AsyncRead for Conn {\n    #[inline]\n    fn poll_read(\n        self: Pin<&mut Self>,\n        cx: &mut Context,\n        buf: &mut ReadBuf<'_>,\n    ) -> Poll<io::Result<()>> {\n        AsyncRead::poll_read(self.project().stream, cx, buf)\n    }\n}\n\nimpl AsyncWrite for Conn {\n    #[inline]\n    fn poll_write(\n        self: Pin<&mut Self>,\n        cx: &mut Context,\n        buf: &[u8],\n    ) -> Poll<Result<usize, io::Error>> {\n        AsyncWrite::poll_write(self.project().stream, cx, buf)\n    }\n\n    #[inline]\n    fn poll_write_vectored(\n        self: Pin<&mut Self>,\n        cx: &mut Context<'_>,\n        bufs: &[IoSlice<'_>],\n    ) -> Poll<Result<usize, io::Error>> {\n        AsyncWrite::poll_write_vectored(self.project().stream, cx, bufs)\n    }\n\n    #[inline]\n    fn is_write_vectored(&self) -> bool {\n        self.stream.is_write_vectored()\n    }\n\n    #[inline]\n    fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), io::Error>> {\n        AsyncWrite::poll_flush(self.project().stream, cx)\n    }\n\n    #[inline]\n    fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), io::Error>> {\n        AsyncWrite::poll_shutdown(self.project().stream, cx)\n    }\n}\n\n// ===== impl TlsConn =====\n\nimpl<T> Connection for TlsConn<T>\nwhere\n    T: Connection,\n{\n    fn connected(&self) -> Connected {\n        let connected = self.stream.get_ref().connected();\n        if self.stream.ssl().selected_alpn_protocol() == Some(b\"h2\") {\n            connected.negotiated_h2()\n        } else {\n            connected\n        }\n    }\n}\n\nimpl<T: AsyncRead + AsyncWrite + Unpin> AsyncRead for TlsConn<T> {\n    #[inline]\n    fn poll_read(\n        self: Pin<&mut Self>,\n        cx: &mut Context,\n        buf: &mut ReadBuf<'_>,\n    ) -> Poll<tokio::io::Result<()>> {\n        AsyncRead::poll_read(self.project().stream, cx, buf)\n    }\n}\n\nimpl<T: AsyncRead + AsyncWrite + Unpin> AsyncWrite for TlsConn<T> {\n    #[inline]\n    fn poll_write(\n        self: Pin<&mut Self>,\n        cx: &mut Context,\n        buf: &[u8],\n    ) -> Poll<Result<usize, tokio::io::Error>> {\n        AsyncWrite::poll_write(self.project().stream, cx, buf)\n    }\n\n    #[inline]\n    fn poll_write_vectored(\n        self: Pin<&mut Self>,\n        cx: &mut Context<'_>,\n        bufs: &[IoSlice<'_>],\n    ) -> Poll<Result<usize, io::Error>> {\n        AsyncWrite::poll_write_vectored(self.project().stream, cx, bufs)\n    }\n\n    #[inline]\n    fn is_write_vectored(&self) -> bool {\n        self.stream.is_write_vectored()\n    }\n\n    #[inline]\n    fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), tokio::io::Error>> {\n        AsyncWrite::poll_flush(self.project().stream, cx)\n    }\n\n    #[inline]\n    fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), tokio::io::Error>> {\n        AsyncWrite::poll_shutdown(self.project().stream, cx)\n    }\n}\n\nimpl<T> TlsInfoFactory for TlsConn<T>\nwhere\n    SslStream<T>: TlsInfoFactory,\n{\n    #[inline]\n    fn tls_info(&self) -> Option<TlsInfo> {\n        self.stream.tls_info()\n    }\n}\n\n// ===== impl PoisonPill =====\n\nimpl fmt::Debug for PoisonPill {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        // print the address of the pill—this makes debugging issues much easier\n        write!(\n            f,\n            \"PoisonPill@{:p} {{ poisoned: {} }}\",\n            self.0,\n            self.0.load(Ordering::Relaxed)\n        )\n    }\n}\n\nimpl PoisonPill {\n    /// Create a healthy (not poisoned) pill.\n    #[inline]\n    fn healthy() -> Self {\n        Self(Arc::new(AtomicBool::new(false)))\n    }\n}\n\n// ===== impl Connected =====\n\nimpl Connected {\n    /// Create new `Connected` type with empty metadata.\n    pub fn new() -> Connected {\n        Connected {\n            alpn: Alpn::None,\n            proxy: Box::new(ProxyIdentity::default()),\n            extra: None,\n            poisoned: PoisonPill::healthy(),\n        }\n    }\n\n    /// Set extra connection information to be set in the extensions of every `Response`.\n    pub fn extra<T: Clone + Send + Sync + Debug + 'static>(mut self, extra: T) -> Connected {\n        if let Some(prev) = self.extra {\n            self.extra = Some(Extra(Box::new(ExtraChain(prev.0, extra))));\n        } else {\n            self.extra = Some(Extra(Box::new(ExtraEnvelope(extra))));\n        }\n        self\n    }\n\n    /// Copies the extra connection information into an `Extensions` map.\n    #[inline]\n    pub fn set_extras(&self, extensions: &mut Extensions) {\n        if let Some(extra) = &self.extra {\n            extra.set(extensions);\n        }\n    }\n\n    /// Set that the proxy was used for this connected transport.\n    pub fn proxy(mut self, proxy: Intercept) -> Connected {\n        self.proxy.is_proxied = true;\n\n        if let Some(auth) = proxy.basic_auth() {\n            self.proxy.auth.replace(auth.clone());\n        }\n\n        if let Some(headers) = proxy.custom_headers() {\n            self.proxy.headers.replace(headers.clone());\n        }\n\n        self\n    }\n\n    /// Determines if the connected transport is to an HTTP proxy.\n    #[inline]\n    pub fn is_proxied(&self) -> bool {\n        self.proxy.is_proxied\n    }\n\n    /// Get the proxy identity information for the connected transport.\n    #[inline]\n    pub fn proxy_auth(&self) -> Option<&HeaderValue> {\n        self.proxy.auth.as_ref()\n    }\n\n    /// Get the custom proxy headers for the connected transport.\n    #[inline]\n    pub fn proxy_headers(&self) -> Option<&HeaderMap> {\n        self.proxy.headers.as_ref()\n    }\n\n    /// Set that the connected transport negotiated HTTP/2 as its next protocol.\n    #[inline]\n    pub fn negotiated_h2(mut self) -> Connected {\n        self.alpn = Alpn::H2;\n        self\n    }\n\n    /// Determines if the connected transport negotiated HTTP/2 as its next protocol.\n    #[inline]\n    pub fn is_negotiated_h2(&self) -> bool {\n        self.alpn == Alpn::H2\n    }\n\n    /// Determine if this connection is poisoned\n    #[inline]\n    pub fn poisoned(&self) -> bool {\n        self.poisoned.0.load(Ordering::Relaxed)\n    }\n\n    /// Poison this connection\n    ///\n    /// A poisoned connection will not be reused for subsequent requests by the pool\n    #[allow(unused)]\n    #[inline]\n    pub fn poison(&self) {\n        self.poisoned.0.store(true, Ordering::Relaxed);\n        debug!(\n            \"connection was poisoned. this connection will not be reused for subsequent requests\"\n        );\n    }\n}\n\n// ===== impl Extra =====\n\nimpl Extra {\n    #[inline]\n    fn set(&self, res: &mut Extensions) {\n        self.0.set(res);\n    }\n}\n\nimpl Clone for Extra {\n    fn clone(&self) -> Extra {\n        Extra(self.0.clone_box())\n    }\n}\n\n// ===== impl ExtraEnvelope =====\n\nimpl<T> ExtraInner for ExtraEnvelope<T>\nwhere\n    T: Clone + Send + Sync + Debug + 'static,\n{\n    fn clone_box(&self) -> Box<dyn ExtraInner> {\n        Box::new(self.clone())\n    }\n\n    fn set(&self, res: &mut Extensions) {\n        res.insert(self.0.clone());\n    }\n}\n\n// ===== impl ExtraChain =====\n\nimpl<T: Clone> Clone for ExtraChain<T> {\n    fn clone(&self) -> Self {\n        ExtraChain(self.0.clone_box(), self.1.clone())\n    }\n}\n\nimpl<T> ExtraInner for ExtraChain<T>\nwhere\n    T: Clone + Send + Sync + Debug + 'static,\n{\n    fn clone_box(&self) -> Box<dyn ExtraInner> {\n        Box::new(self.clone())\n    }\n\n    fn set(&self, res: &mut Extensions) {\n        self.0.set(res);\n        res.insert(self.1.clone());\n    }\n}\n"
  },
  {
    "path": "src/client/core/body/incoming.rs",
    "content": "use std::{\n    fmt,\n    pin::Pin,\n    task::{Context, Poll, ready},\n};\n\nuse bytes::Bytes;\nuse http::HeaderMap;\nuse http_body::{Body, Frame, SizeHint};\nuse tokio::sync::{mpsc, oneshot};\nuse tokio_util::sync::PollSender;\n\nuse super::{DecodedLength, watch};\nuse crate::client::core::{Error, Result, proto::http2::ping};\n\n/// A stream of [`Bytes`], used when receiving bodies from the network.\n///\n/// Note that Users should not instantiate this struct directly. When working with the client,\n/// [`Incoming`] is returned to you in responses.\n#[must_use = \"streams do nothing unless polled\"]\npub struct Incoming {\n    kind: Kind,\n}\n\nenum Kind {\n    H1 {\n        want_tx: watch::Sender,\n        data_rx: mpsc::Receiver<Result<Bytes, Error>>,\n        trailers_rx: oneshot::Receiver<HeaderMap>,\n        content_length: DecodedLength,\n        data_done: bool,\n    },\n    H2 {\n        ping: ping::Recorder,\n        recv: http2::RecvStream,\n        content_length: DecodedLength,\n        data_done: bool,\n    },\n    Empty,\n}\n\n/// A sender half created through [`Body::channel()`].\n///\n/// Useful when wanting to stream chunks from another thread.\n///\n/// ## Body Closing\n///\n/// Note that the request body will always be closed normally when the sender is dropped (meaning\n/// that the empty terminating chunk will be sent to the remote). If you desire to close the\n/// connection with an incomplete response (e.g. in the case of an error during asynchronous\n/// processing), call the [`Sender::abort()`] method to abort the body in an abnormal fashion.\n///\n/// [`Body::channel()`]: struct.Body.html#method.channel\n/// [`Sender::abort()`]: struct.Sender.html#method.abort\n#[must_use = \"Sender does nothing unless sent on\"]\npub(crate) struct Sender {\n    want_rx: watch::Receiver,\n    data_tx: PollSender<Result<Bytes, Error>>,\n    trailers_tx: Option<oneshot::Sender<HeaderMap>>,\n}\n\n// ===== impl Incoming =====\n\nimpl Incoming {\n    #[inline]\n    pub(crate) fn empty() -> Incoming {\n        Incoming { kind: Kind::Empty }\n    }\n\n    pub(crate) fn h1(content_length: DecodedLength, wanter: bool) -> (Sender, Incoming) {\n        let (data_tx, data_rx) = mpsc::channel(2);\n        let (trailers_tx, trailers_rx) = oneshot::channel();\n        // If wanter is true, `Sender::poll_ready()` won't becoming ready\n        // until the `Body` has been polled for data once.\n        let (want_tx, want_rx) = watch::channel(wanter);\n\n        (\n            Sender {\n                want_rx,\n                data_tx: PollSender::new(data_tx),\n                trailers_tx: Some(trailers_tx),\n            },\n            Incoming {\n                kind: Kind::H1 {\n                    want_tx,\n                    data_rx,\n                    trailers_rx,\n                    content_length,\n                    data_done: false,\n                },\n            },\n        )\n    }\n\n    pub(crate) fn h2(\n        recv: http2::RecvStream,\n        mut content_length: DecodedLength,\n        ping: ping::Recorder,\n    ) -> Self {\n        // If the stream is already EOS, then the \"unknown length\" is clearly\n        // actually ZERO.\n        if !content_length.is_exact() && recv.is_end_stream() {\n            content_length = DecodedLength::ZERO;\n        }\n\n        Incoming {\n            kind: Kind::H2 {\n                ping,\n                recv,\n                content_length,\n                data_done: false,\n            },\n        }\n    }\n}\n\nimpl Body for Incoming {\n    type Data = Bytes;\n    type Error = Error;\n\n    fn poll_frame(\n        mut self: Pin<&mut Self>,\n        cx: &mut Context<'_>,\n    ) -> Poll<Option<Result<Frame<Self::Data>, Self::Error>>> {\n        match self.kind {\n            Kind::H1 {\n                ref want_tx,\n                ref mut data_rx,\n                ref mut trailers_rx,\n                ref mut content_length,\n                ref mut data_done,\n            } => {\n                want_tx.ready();\n\n                if !*data_done {\n                    match ready!(data_rx.poll_recv(cx)) {\n                        Some(Ok(chunk)) => {\n                            content_length.sub_if(chunk.len() as u64);\n                            return Poll::Ready(Some(Ok(Frame::data(chunk))));\n                        }\n                        Some(Err(err)) => return Poll::Ready(Some(Err(err))),\n                        None => {\n                            // fall through to trailers\n                            *data_done = true;\n                        }\n                    }\n                }\n\n                // check trailers after data is terminated\n                if !trailers_rx.is_terminated() {\n                    if let Ok(trailers) = ready!(Pin::new(trailers_rx).poll(cx)) {\n                        return Poll::Ready(Some(Ok(Frame::trailers(trailers))));\n                    }\n                }\n\n                Poll::Ready(None)\n            }\n            Kind::H2 {\n                ref ping,\n                ref mut recv,\n                ref mut content_length,\n                ref mut data_done,\n            } => {\n                if !*data_done {\n                    match ready!(recv.poll_data(cx)) {\n                        Some(Ok(bytes)) => {\n                            let _ = recv.flow_control().release_capacity(bytes.len());\n                            content_length.sub_if(bytes.len() as u64);\n                            ping.record_data(bytes.len());\n                            return Poll::Ready(Some(Ok(Frame::data(bytes))));\n                        }\n                        Some(Err(e)) => {\n                            if let Some(http2::Reason::NO_ERROR) = e.reason() {\n                                // As mentioned in RFC 7540 Section 8.1, a RST_STREAM with NO_ERROR\n                                // indicates an early response, and should cause the body reading\n                                // to stop, but not fail it:\n                                return Poll::Ready(None);\n                            } else {\n                                return Poll::Ready(Some(Err(Error::new_body(e))));\n                            }\n                        }\n                        None => {\n                            // fall through to trailers\n                            *data_done = true;\n                        }\n                    }\n                }\n\n                // after data, check trailers\n                match ready!(recv.poll_trailers(cx)) {\n                    Ok(t) => {\n                        ping.record_non_data();\n                        Poll::Ready(Ok(t.map(Frame::trailers)).transpose())\n                    }\n                    Err(e) => {\n                        if let Some(http2::Reason::NO_ERROR) = e.reason() {\n                            // Same as above, a RST_STREAM with NO_ERROR indicates an early\n                            // response, and should cause reading the trailers to stop, but\n                            // not fail it:\n                            Poll::Ready(None)\n                        } else {\n                            Poll::Ready(Some(Err(Error::new_h2(e))))\n                        }\n                    }\n                }\n            }\n            Kind::Empty => Poll::Ready(None),\n        }\n    }\n\n    #[inline]\n    fn is_end_stream(&self) -> bool {\n        match self.kind {\n            Kind::H1 { content_length, .. } => content_length == DecodedLength::ZERO,\n            Kind::H2 { recv: ref h2, .. } => h2.is_end_stream(),\n            Kind::Empty => true,\n        }\n    }\n\n    #[inline]\n    fn size_hint(&self) -> SizeHint {\n        match self.kind {\n            Kind::H1 { content_length, .. } | Kind::H2 { content_length, .. } => content_length\n                .into_opt()\n                .map_or_else(SizeHint::default, SizeHint::with_exact),\n            Kind::Empty => SizeHint::with_exact(0),\n        }\n    }\n}\n\nimpl fmt::Debug for Incoming {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        let mut builder = f.debug_tuple(stringify!(Incoming));\n        match self.kind {\n            Kind::Empty => builder.field(&stringify!(Empty)),\n            _ => builder.field(&stringify!(Streaming)),\n        };\n        builder.finish()\n    }\n}\n\n// ===== impl Sender =====\n\nimpl Sender {\n    /// Check to see if this `Sender` can send more data.\n    #[inline]\n    pub(crate) fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<()>> {\n        // Check if the receiver end has tried polling for the body yet\n        ready!(self.want_rx.poll_ready(cx)?);\n        self.data_tx\n            .poll_reserve(cx)\n            .map_err(|_| Error::new_closed())\n    }\n\n    /// Send data on this channel.\n    ///\n    /// # Errors\n    ///\n    /// Returns `Err(Bytes)` if the channel could not (currently) accept\n    /// another `Bytes`.\n    ///\n    /// # Panics\n    ///\n    /// If `poll_ready` was not successfully called prior to calling `send_data`, then this method\n    /// will panic.\n    #[inline]\n    pub(crate) fn send_data(&mut self, chunk: Bytes) -> Result<(), Bytes> {\n        self.data_tx.send_item(Ok(chunk)).map_err(|err| {\n            err.into_inner()\n                .expect(\"value returned\")\n                .expect(\"just sent Ok\")\n        })\n    }\n\n    /// Send trailers on this channel.\n    ///\n    /// # Errors\n    ///\n    /// Returns `Err(HeaderMap)` if the channel could not (currently) accept\n    /// another `HeaderMap`.\n    #[inline]\n    pub(crate) fn send_trailers(&mut self, trailers: HeaderMap) -> Result<(), Option<HeaderMap>> {\n        self.trailers_tx\n            .take()\n            .ok_or(None)?\n            .send(trailers)\n            .map_err(Some)\n    }\n\n    /// Send an error on this channel, which will cause the body stream to end with an error.\n    #[inline]\n    pub(crate) fn send_error(&mut self, err: Error) {\n        self.data_tx\n            .get_ref()\n            .map(|sender| sender.try_send(Err(err)));\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::{mem, task::Poll};\n\n    use http_body_util::BodyExt;\n\n    use super::{Body, DecodedLength, Error, Incoming, Result, Sender, SizeHint};\n\n    impl Incoming {\n        /// Create a `Body` stream with an associated sender half.\n        ///\n        /// Useful when wanting to stream chunks from another thread.\n        pub(crate) fn channel() -> (Sender, Incoming) {\n            Self::h1(DecodedLength::CHUNKED, /* wanter = */ false)\n        }\n    }\n\n    impl Sender {\n        async fn ready(&mut self) -> Result<()> {\n            std::future::poll_fn(|cx| self.poll_ready(cx)).await\n        }\n\n        fn abort(mut self) {\n            self.send_error(Error::new_body_write_aborted());\n        }\n    }\n\n    #[test]\n    fn test_size_of() {\n        // These are mostly to help catch *accidentally* increasing\n        // the size by too much.\n\n        let body_size = mem::size_of::<Incoming>();\n        let body_expected_size = mem::size_of::<u64>() * 6;\n        assert!(\n            body_size <= body_expected_size,\n            \"Body size = {body_size} <= {body_expected_size}\",\n        );\n\n        //assert_eq!(body_size, mem::size_of::<Option<Incoming>>(), \"Option<Incoming>\");\n\n        assert_eq!(\n            mem::size_of::<Sender>(),\n            mem::size_of::<usize>() * 8,\n            \"Sender\"\n        );\n\n        assert_eq!(\n            mem::size_of::<Sender>(),\n            mem::size_of::<Option<Sender>>(),\n            \"Option<Sender>\"\n        );\n    }\n\n    #[test]\n    fn size_hint() {\n        fn eq(body: Incoming, b: SizeHint, note: &str) {\n            let a = body.size_hint();\n            assert_eq!(a.lower(), b.lower(), \"lower for {note:?}\");\n            assert_eq!(a.upper(), b.upper(), \"upper for {note:?}\");\n        }\n\n        eq(Incoming::empty(), SizeHint::with_exact(0), \"empty\");\n\n        eq(Incoming::channel().1, SizeHint::new(), \"channel\");\n\n        eq(\n            Incoming::h1(DecodedLength::new(4), /* wanter = */ false).1,\n            SizeHint::with_exact(4),\n            \"channel with length\",\n        );\n    }\n\n    #[tokio::test]\n    async fn channel_abort() {\n        let (tx, mut rx) = Incoming::channel();\n\n        tx.abort();\n\n        let err = rx.frame().await.unwrap().unwrap_err();\n        assert!(err.is_body_write_aborted(), \"{err:?}\");\n    }\n\n    #[tokio::test]\n    async fn channel_abort_when_buffer_is_full() {\n        let (mut tx, mut rx) = Incoming::channel();\n\n        tx.ready().await.expect(\"ready\");\n        tx.send_data(\"chunk 1\".into()).expect(\"send 1\");\n        // buffer is full, but can still send abort\n        tx.abort();\n\n        let chunk1 = rx\n            .frame()\n            .await\n            .expect(\"item 1\")\n            .expect(\"chunk 1\")\n            .into_data()\n            .unwrap();\n        assert_eq!(chunk1, \"chunk 1\");\n\n        let err = rx.frame().await.unwrap().unwrap_err();\n        assert!(err.is_body_write_aborted(), \"{err:?}\");\n    }\n\n    #[tokio::test]\n    async fn channel_buffers_two() {\n        let (mut tx, _rx) = Incoming::channel();\n\n        tx.ready().await.expect(\"ready\");\n        tx.send_data(\"chunk 1\".into()).expect(\"send 1\");\n        tx.ready().await.expect(\"ready\");\n        tx.send_data(\"chunk 2\".into()).expect(\"send 2\");\n\n        // buffer is now full, poll_ready should not be ready\n        let res = tokio::time::timeout(\n            std::time::Duration::from_millis(100),\n            std::future::poll_fn(|cx| tx.poll_ready(cx)),\n        )\n        .await;\n\n        assert!(res.is_err(), \"poll_ready unexpectedly became ready\");\n    }\n\n    #[tokio::test]\n    async fn channel_empty() {\n        let (_, mut rx) = Incoming::channel();\n        assert!(rx.frame().await.is_none());\n    }\n\n    #[test]\n    fn channel_ready() {\n        let (mut tx, _rx) = Incoming::h1(DecodedLength::CHUNKED, /* wanter = */ false);\n\n        let mut tx_ready = tokio_test::task::spawn(tx.ready());\n\n        assert!(tx_ready.poll().is_ready(), \"tx is ready immediately\");\n    }\n\n    #[test]\n    fn channel_wanter() {\n        let (mut tx, mut rx) = Incoming::h1(DecodedLength::CHUNKED, /* wanter = */ true);\n\n        let mut tx_ready = tokio_test::task::spawn(tx.ready());\n        let mut rx_data = tokio_test::task::spawn(rx.frame());\n\n        assert!(\n            tx_ready.poll().is_pending(),\n            \"tx isn't ready before rx has been polled\"\n        );\n\n        assert!(rx_data.poll().is_pending(), \"poll rx.data\");\n        assert!(tx_ready.is_woken(), \"rx poll wakes tx\");\n\n        assert!(\n            tx_ready.poll().is_ready(),\n            \"tx is ready after rx has been polled\"\n        );\n    }\n\n    #[test]\n    fn channel_notices_closure() {\n        let (mut tx, rx) = Incoming::h1(DecodedLength::CHUNKED, /* wanter = */ true);\n\n        let mut tx_ready = tokio_test::task::spawn(tx.ready());\n\n        assert!(\n            tx_ready.poll().is_pending(),\n            \"tx isn't ready before rx has been polled\"\n        );\n\n        drop(rx);\n        assert!(tx_ready.is_woken(), \"dropping rx wakes tx\");\n\n        match tx_ready.poll() {\n            Poll::Ready(Err(ref e)) if e.is_closed() => (),\n            unexpected => panic!(\"tx poll ready unexpected: {unexpected:?}\"),\n        }\n    }\n}\n"
  },
  {
    "path": "src/client/core/body/length.rs",
    "content": "use std::fmt;\n\nuse crate::client::core::error::Parse;\n\n#[derive(Clone, Copy, PartialEq, Eq)]\npub(crate) struct DecodedLength(u64);\n\nimpl DecodedLength {\n    pub(crate) const MAX_LEN: u64 = u64::MAX - 2;\n    pub(crate) const CLOSE_DELIMITED: DecodedLength = DecodedLength(u64::MAX);\n    pub(crate) const CHUNKED: DecodedLength = DecodedLength(u64::MAX - 1);\n    pub(crate) const ZERO: DecodedLength = DecodedLength(0);\n\n    /// Takes the length as a content-length without other checks.\n    ///\n    /// Should only be called if previously confirmed this isn't\n    /// CLOSE_DELIMITED or CHUNKED.\n    #[inline]\n    pub(crate) fn danger_len(self) -> u64 {\n        debug_assert!(self.0 < Self::CHUNKED.0);\n        self.0\n    }\n\n    /// Converts to an `Option<u64>` representing a Known or Unknown length.\n    #[inline]\n    pub(crate) fn into_opt(self) -> Option<u64> {\n        match self {\n            DecodedLength::CHUNKED | DecodedLength::CLOSE_DELIMITED => None,\n            DecodedLength(known) => Some(known),\n        }\n    }\n\n    /// Checks the `u64` is within the maximum allowed for content-length.\n    pub(crate) fn checked_new(len: u64) -> Result<Self, Parse> {\n        if len <= Self::MAX_LEN {\n            Ok(DecodedLength(len))\n        } else {\n            warn!(\n                \"content-length bigger than maximum: {} > {}\",\n                len,\n                Self::MAX_LEN\n            );\n            Err(Parse::TooLarge)\n        }\n    }\n\n    /// Subtracts the given amount from the length, if it's a known length.\n    pub(crate) fn sub_if(&mut self, amt: u64) {\n        match *self {\n            DecodedLength::CHUNKED | DecodedLength::CLOSE_DELIMITED => (),\n            DecodedLength(ref mut known) => {\n                *known -= amt;\n            }\n        }\n    }\n\n    /// Returns whether this represents an exact length.\n    ///\n    /// This includes 0, which of course is an exact known length.\n    ///\n    /// It would return false if \"chunked\" or otherwise size-unknown.\n    #[inline]\n    pub(crate) fn is_exact(&self) -> bool {\n        self.0 <= Self::MAX_LEN\n    }\n}\n\nimpl From<Option<u64>> for DecodedLength {\n    fn from(len: Option<u64>) -> Self {\n        // If the length is u64::MAX, oh well, just reported chunked.\n        len.and_then(|len| Self::checked_new(len).ok())\n            .unwrap_or(DecodedLength::CHUNKED)\n    }\n}\n\nimpl fmt::Debug for DecodedLength {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        match *self {\n            DecodedLength::CLOSE_DELIMITED => f.write_str(\"CLOSE_DELIMITED\"),\n            DecodedLength::CHUNKED => f.write_str(\"CHUNKED\"),\n            DecodedLength(n) => f.debug_tuple(\"DecodedLength\").field(&n).finish(),\n        }\n    }\n}\n\nimpl fmt::Display for DecodedLength {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        match *self {\n            DecodedLength::CLOSE_DELIMITED => f.write_str(\"close-delimited\"),\n            DecodedLength::CHUNKED => f.write_str(\"chunked encoding\"),\n            DecodedLength::ZERO => f.write_str(\"empty\"),\n            DecodedLength(n) => write!(f, \"content-length ({n} bytes)\"),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    impl DecodedLength {\n        pub(crate) fn new(len: u64) -> Self {\n            debug_assert!(len <= Self::MAX_LEN);\n            DecodedLength(len)\n        }\n    }\n\n    #[test]\n    fn sub_if_known() {\n        let mut len = DecodedLength::new(30);\n        len.sub_if(20);\n\n        assert_eq!(len.0, 10);\n    }\n\n    #[test]\n    fn sub_if_chunked() {\n        let mut len = DecodedLength::CHUNKED;\n        len.sub_if(20);\n\n        assert_eq!(len, DecodedLength::CHUNKED);\n    }\n}\n"
  },
  {
    "path": "src/client/core/body/watch.rs",
    "content": "//! An SPSC broadcast channel.\n//!\n//! - The value can only be a `u8`.\n//! - The consumer is only notified if the value is different.\n//! - The value `0` is reserved for closed.\n\nuse std::{\n    sync::{\n        Arc,\n        atomic::{AtomicU8, Ordering},\n    },\n    task::{self, Poll},\n};\n\nuse futures_util::task::AtomicWaker;\n\nuse crate::client::core::Error;\n\ntype Value = u8;\nconst READY: Value = 2;\nconst PENDING: Value = 1;\nconst CLOSED: Value = 0;\n\npub(super) fn channel(wanter: bool) -> (Sender, Receiver) {\n    let initial = if wanter { PENDING } else { READY };\n    let shared = Arc::new(Shared {\n        value: AtomicU8::new(initial),\n        waker: AtomicWaker::new(),\n    });\n\n    (\n        Sender {\n            shared: shared.clone(),\n        },\n        Receiver { shared },\n    )\n}\n\nstruct Shared {\n    value: AtomicU8,\n    waker: AtomicWaker,\n}\n\npub(super) struct Sender {\n    shared: Arc<Shared>,\n}\n\npub(super) struct Receiver {\n    shared: Arc<Shared>,\n}\n\n// ===== impl Sender =====\n\nimpl Sender {\n    #[inline(always)]\n    pub(super) fn ready(&self) {\n        self.send(READY);\n    }\n\n    fn send(&self, value: Value) {\n        if self.shared.value.swap(value, Ordering::SeqCst) != value {\n            self.shared.waker.wake();\n        }\n    }\n}\n\nimpl Drop for Sender {\n    #[inline(always)]\n    fn drop(&mut self) {\n        self.send(CLOSED);\n    }\n}\n\n// ===== impl Receiver =====\n\nimpl Receiver {\n    #[inline(always)]\n    pub(super) fn poll_ready(&self, cx: &mut task::Context<'_>) -> Poll<Result<(), Error>> {\n        self.shared.waker.register(cx.waker());\n        match self.shared.value.load(Ordering::SeqCst) {\n            READY => Poll::Ready(Ok(())),\n            PENDING => Poll::Pending,\n            CLOSED => Poll::Ready(Err(Error::new_closed())),\n            unexpected => unreachable!(\"watch value: {}\", unexpected),\n        }\n    }\n}\n"
  },
  {
    "path": "src/client/core/body.rs",
    "content": "//! Streaming bodies for Requests and Responses\n//!\n//! For both [Clients](crate::client), requests and\n//! responses use streaming bodies, instead of complete buffering. This\n//! allows applications to not use memory they don't need, and allows exerting\n//! back-pressure on connections by only reading when asked.\n//!\n//! There are two pieces to this in crate::core::\n//!\n//! - **The [\\`Body`\\] trait** describes all possible bodies. crate::core: allows any body type that\n//!   implements `Body`, allowing applications to have fine-grained control over their streaming.\n//! - **The [`Incoming`] concrete type**, which is an implementation of `Body`, and returned by\n//!   crate::core: as a \"receive stream\" (so, for server requests and client responses).\n//!\n//! There are additional implementations available in [`http-body-util`][],\n//! such as a `Full` or `Empty` body.\n//!\n//! [`http-body-util`]: https://docs.rs/http-body-util\n\nmod incoming;\nmod length;\nmod watch;\n\npub(crate) use self::{\n    incoming::{Incoming, Sender},\n    length::DecodedLength,\n};\n\nfn _assert_send_sync() {\n    fn _assert_send<T: Send>() {}\n    fn _assert_sync<T: Sync>() {}\n\n    _assert_send::<Incoming>();\n    _assert_sync::<Incoming>();\n}\n"
  },
  {
    "path": "src/client/core/conn/http1.rs",
    "content": "//! HTTP/1 client connections\n\nuse std::{\n    future::Future,\n    pin::Pin,\n    task::{Context, Poll, ready},\n};\n\nuse bytes::Bytes;\nuse http::{Request, Response};\nuse http_body::Body;\nuse httparse::ParserConfig;\nuse tokio::io::{AsyncRead, AsyncWrite};\n\nuse crate::client::core::{\n    Error, Result,\n    body::Incoming,\n    dispatch::{self, TrySendError},\n    error::BoxError,\n    proto::{\n        self,\n        http1::{self, Http1Options, conn::Conn, role::Client},\n    },\n};\n\n/// The sender side of an established connection.\npub struct SendRequest<B> {\n    dispatch: dispatch::Sender<Request<B>, Response<Incoming>>,\n}\n\n/// Deconstructed parts of a `Connection`.\n///\n/// This allows taking apart a `Connection` at a later time, in order to\n/// reclaim the IO object, and additional related pieces.\n#[derive(Debug)]\n#[non_exhaustive]\npub struct Parts<T> {\n    /// The original IO object used in the handshake.\n    pub io: T,\n    /// A buffer of bytes that have been read but not processed as HTTP.\n    ///\n    /// For instance, if the `Connection` is used for an HTTP upgrade request,\n    /// it is possible the server sent back the first bytes of the new protocol\n    /// along with the response upgrade.\n    ///\n    /// You will want to check for any existing bytes if you plan to continue\n    /// communicating on the IO object.\n    pub read_buf: Bytes,\n}\n\n/// A future that processes all HTTP state for the IO object.\n///\n/// In most cases, this should just be spawned into an executor, so that it\n/// can process incoming and outgoing messages, notice hangups, and the like.\n#[must_use = \"futures do nothing unless polled\"]\npub struct Connection<T, B>\nwhere\n    T: AsyncRead + AsyncWrite,\n    B: Body + 'static,\n{\n    inner: http1::dispatch::Dispatcher<http1::dispatch::Client<B>, B, T, Client>,\n}\n\nimpl<T, B> Connection<T, B>\nwhere\n    T: AsyncRead + AsyncWrite + Unpin,\n    B: Body + 'static,\n    B::Error: Into<BoxError>,\n{\n    /// Return the inner IO object, and additional information.\n    ///\n    /// Only works for HTTP/1 connections. HTTP/2 connections will panic.\n    #[inline]\n    pub fn into_parts(self) -> Parts<T> {\n        let (io, read_buf, _) = self.inner.into_inner();\n        Parts { io, read_buf }\n    }\n}\n\n/// A builder to configure an HTTP connection.\n///\n/// After setting options, the builder is used to create a handshake future.\n///\n/// **Note**: The default values of options are *not considered stable*. They\n/// are subject to change at any time.\n#[derive(Clone, Debug)]\npub struct Builder {\n    opts: Http1Options,\n}\n\n// ===== impl SendRequest\n\nimpl<B> SendRequest<B> {\n    /// Polls to determine whether this sender can be used yet for a request.\n    ///\n    /// If the associated connection is closed, this returns an Error.\n    #[inline]\n    pub fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<()>> {\n        self.dispatch.poll_ready(cx)\n    }\n\n    /// Waits until the dispatcher is ready\n    ///\n    /// If the associated connection is closed, this returns an Error.\n    #[inline]\n    pub async fn ready(&mut self) -> Result<()> {\n        std::future::poll_fn(|cx| self.poll_ready(cx)).await\n    }\n\n    /// Checks if the connection is currently ready to send a request.\n    ///\n    /// # Note\n    ///\n    /// This is mostly a hint. Due to inherent latency of networks, it is\n    /// possible that even after checking this is ready, sending a request\n    /// may still fail because the connection was closed in the meantime.\n    #[inline]\n    pub fn is_ready(&self) -> bool {\n        self.dispatch.is_ready()\n    }\n}\n\nimpl<B> SendRequest<B>\nwhere\n    B: Body + 'static,\n{\n    /// Sends a `Request` on the associated connection.\n    ///\n    /// Returns a future that if successful, yields the `Response`.\n    ///\n    /// # Error\n    ///\n    /// If there was an error before trying to serialize the request to the\n    /// connection, the message will be returned as part of this error.\n    pub fn try_send_request(\n        &mut self,\n        req: Request<B>,\n    ) -> impl Future<Output = Result<Response<Incoming>, TrySendError<Request<B>>>> {\n        let sent = self.dispatch.try_send(req);\n        async move {\n            match sent {\n                Ok(rx) => match rx.await {\n                    Ok(res) => res,\n                    // this is definite bug if it happens, but it shouldn't happen!\n                    Err(_) => panic!(\"dispatch dropped without returning error\"),\n                },\n                Err(req) => {\n                    debug!(\"connection was not ready\");\n                    Err(TrySendError {\n                        error: Error::new_canceled().with(\"connection was not ready\"),\n                        message: Some(req),\n                    })\n                }\n            }\n        }\n    }\n}\n\n// ===== impl Connection\n\nimpl<T, B> Connection<T, B>\nwhere\n    T: AsyncRead + AsyncWrite + Unpin + Send,\n    B: Body + 'static,\n    B::Error: Into<BoxError>,\n{\n    /// Enable this connection to support higher-level HTTP upgrades.\n    #[inline]\n    pub fn with_upgrades(self) -> upgrades::UpgradeableConnection<T, B> {\n        upgrades::UpgradeableConnection { inner: Some(self) }\n    }\n}\n\nimpl<T, B> Future for Connection<T, B>\nwhere\n    T: AsyncRead + AsyncWrite + Unpin,\n    B: Body + 'static,\n    B::Data: Send,\n    B::Error: Into<BoxError>,\n{\n    type Output = Result<()>;\n\n    fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {\n        match ready!(Pin::new(&mut self.inner).poll(cx))? {\n            proto::Dispatched::Shutdown => Poll::Ready(Ok(())),\n            proto::Dispatched::Upgrade(pending) => {\n                // With no `Send` bound on `I`, we can't try to do\n                // upgrades here. In case a user was trying to use\n                // `upgrade` with this API, send a special\n                // error letting them know about that.\n                pending.manual();\n                Poll::Ready(Ok(()))\n            }\n        }\n    }\n}\n\n// ===== impl Builder\n\nimpl Builder {\n    /// Creates a new connection builder.\n    #[inline]\n    pub fn new() -> Builder {\n        Builder {\n            opts: Default::default(),\n        }\n    }\n\n    /// Provide a options configuration for the HTTP/1 connection.\n    #[inline]\n    pub fn options(&mut self, opts: Http1Options) {\n        self.opts = opts;\n    }\n\n    /// Constructs a connection with the configured options and IO.\n    ///\n    /// Note, if [`Connection`] is not `await`-ed, [`SendRequest`] will\n    /// do nothing.\n    pub async fn handshake<T, B>(self, io: T) -> Result<(SendRequest<B>, Connection<T, B>)>\n    where\n        T: AsyncRead + AsyncWrite + Unpin,\n        B: Body + 'static,\n        B::Data: Send,\n        B::Error: Into<BoxError>,\n    {\n        trace!(\"client handshake HTTP/1\");\n\n        let (tx, rx) = dispatch::channel();\n        let mut conn = Conn::new(io);\n\n        // Set the HTTP/1 parser configuration\n        let h1_parser_config = {\n            let mut h1_parser_config = ParserConfig::default();\n            h1_parser_config\n                .ignore_invalid_headers_in_responses(self.opts.ignore_invalid_headers_in_responses)\n                .allow_spaces_after_header_name_in_responses(\n                    self.opts.allow_spaces_after_header_name_in_responses,\n                )\n                .allow_obsolete_multiline_headers_in_responses(\n                    self.opts.allow_obsolete_multiline_headers_in_responses,\n                );\n            h1_parser_config\n        };\n        conn.set_h1_parser_config(h1_parser_config);\n\n        // Set the h1 write strategy\n        if let Some(writev) = self.opts.h1_writev {\n            if writev {\n                conn.set_write_strategy_queue();\n            } else {\n                conn.set_write_strategy_flatten();\n            }\n        }\n\n        // Set the maximum size of the request headers\n        if let Some(max_headers) = self.opts.h1_max_headers {\n            conn.set_http1_max_headers(max_headers);\n        }\n\n        // Enable HTTP/0.9 responses if requested\n        if self.opts.h09_responses {\n            conn.set_h09_responses();\n        }\n\n        // Set the read buffer size if specified\n        if let Some(sz) = self.opts.h1_read_buf_exact_size {\n            conn.set_read_buf_exact_size(sz);\n        }\n\n        // Set the maximum buffer size for HTTP/1 connections\n        if let Some(max) = self.opts.h1_max_buf_size {\n            conn.set_max_buf_size(max);\n        }\n\n        let cd = http1::dispatch::Client::new(rx);\n        let proto = http1::dispatch::Dispatcher::new(cd, conn);\n\n        Ok((SendRequest { dispatch: tx }, Connection { inner: proto }))\n    }\n}\n\nmod upgrades {\n    use super::*;\n    use crate::client::core::upgrade::Upgraded;\n\n    // A future binding a connection with a Service with Upgrade support.\n    //\n    // This type is unnameable outside the crate.\n    #[must_use = \"futures do nothing unless polled\"]\n    pub struct UpgradeableConnection<T, B>\n    where\n        T: AsyncRead + AsyncWrite + Unpin + Send + 'static,\n        B: Body + 'static,\n        B::Error: Into<BoxError>,\n    {\n        pub(super) inner: Option<Connection<T, B>>,\n    }\n\n    impl<I, B> Future for UpgradeableConnection<I, B>\n    where\n        I: AsyncRead + AsyncWrite + Unpin + Send + 'static,\n        B: Body + 'static,\n        B::Data: Send,\n        B::Error: Into<BoxError>,\n    {\n        type Output = Result<()>;\n\n        fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {\n            match ready!(Pin::new(&mut self.inner.as_mut().unwrap().inner).poll(cx)) {\n                Ok(proto::Dispatched::Shutdown) => Poll::Ready(Ok(())),\n                Ok(proto::Dispatched::Upgrade(pending)) => {\n                    let Parts { io, read_buf } = self.inner.take().unwrap().into_parts();\n                    pending.fulfill(Upgraded::new(io, read_buf));\n                    Poll::Ready(Ok(()))\n                }\n                Err(e) => Poll::Ready(Err(e)),\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "src/client/core/conn/http2.rs",
    "content": "//! HTTP/2 client connections\n\nuse std::{\n    future::Future,\n    marker::PhantomData,\n    pin::Pin,\n    sync::Arc,\n    task::{Context, Poll, ready},\n};\n\nuse http::{Request, Response};\nuse http_body::Body;\nuse tokio::io::{AsyncRead, AsyncWrite};\n\nuse crate::client::core::{\n    Result,\n    body::Incoming,\n    dispatch::{self, TrySendError},\n    error::{BoxError, Error},\n    proto::{\n        self,\n        http2::{Http2Options, ping},\n    },\n    rt::{Time, Timer, bounds::Http2ClientConnExec},\n};\n\n/// The sender side of an established connection.\npub struct SendRequest<B> {\n    dispatch: dispatch::UnboundedSender<Request<B>, Response<Incoming>>,\n}\n\nimpl<B> Clone for SendRequest<B> {\n    #[inline]\n    fn clone(&self) -> SendRequest<B> {\n        SendRequest {\n            dispatch: self.dispatch.clone(),\n        }\n    }\n}\n\n/// A future that processes all HTTP state for the IO object.\n///\n/// In most cases, this should just be spawned into an executor, so that it\n/// can process incoming and outgoing messages, notice hangups, and the like.\n#[must_use = \"futures do nothing unless polled\"]\npub struct Connection<T, B, E>\nwhere\n    T: AsyncRead + AsyncWrite + Unpin,\n    B: Body + 'static,\n    E: Http2ClientConnExec<B, T> + Unpin,\n    B::Error: Into<BoxError>,\n{\n    inner: (PhantomData<T>, proto::http2::client::ClientTask<B, E, T>),\n}\n\n/// A builder to configure an HTTP connection.\n///\n/// After setting options, the builder is used to create a handshake future.\n///\n/// **Note**: The default values of options are *not considered stable*. They\n/// are subject to change at any time.\n#[derive(Clone)]\npub struct Builder<Ex> {\n    exec: Ex,\n    timer: Time,\n    opts: Http2Options,\n}\n\n// ===== impl SendRequest\n\nimpl<B> SendRequest<B> {\n    /// Polls to determine whether this sender can be used yet for a request.\n    ///\n    /// If the associated connection is closed, this returns an Error.\n    #[inline]\n    pub fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<()>> {\n        if self.is_closed() {\n            Poll::Ready(Err(Error::new_closed()))\n        } else {\n            Poll::Ready(Ok(()))\n        }\n    }\n\n    /// Waits until the dispatcher is ready\n    ///\n    /// If the associated connection is closed, this returns an Error.\n    #[inline]\n    pub async fn ready(&mut self) -> Result<()> {\n        std::future::poll_fn(|cx| self.poll_ready(cx)).await\n    }\n\n    /// Checks if the connection is currently ready to send a request.\n    ///\n    /// # Note\n    ///\n    /// This is mostly a hint. Due to inherent latency of networks, it is\n    /// possible that even after checking this is ready, sending a request\n    /// may still fail because the connection was closed in the meantime.\n    #[inline]\n    pub fn is_ready(&self) -> bool {\n        self.dispatch.is_ready()\n    }\n\n    /// Checks if the connection side has been closed.\n    #[inline]\n    pub fn is_closed(&self) -> bool {\n        self.dispatch.is_closed()\n    }\n}\n\nimpl<B> SendRequest<B>\nwhere\n    B: Body + 'static,\n{\n    /// Sends a `Request` on the associated connection.\n    ///\n    /// Returns a future that if successful, yields the `Response`.\n    ///\n    /// # Error\n    ///\n    /// If there was an error before trying to serialize the request to the\n    /// connection, the message will be returned as part of this error.\n    pub fn try_send_request(\n        &mut self,\n        req: Request<B>,\n    ) -> impl Future<Output = Result<Response<Incoming>, TrySendError<Request<B>>>> {\n        let sent = self.dispatch.try_send(req);\n        async move {\n            match sent {\n                Ok(rx) => match rx.await {\n                    Ok(Ok(res)) => Ok(res),\n                    Ok(Err(err)) => Err(err),\n                    // this is definite bug if it happens, but it shouldn't happen!\n                    Err(_) => panic!(\"dispatch dropped without returning error\"),\n                },\n                Err(req) => {\n                    debug!(\"connection was not ready\");\n                    let error = Error::new_canceled().with(\"connection was not ready\");\n                    Err(TrySendError {\n                        error,\n                        message: Some(req),\n                    })\n                }\n            }\n        }\n    }\n}\n\n// ===== impl Connection\n\nimpl<T, B, E> Future for Connection<T, B, E>\nwhere\n    T: AsyncRead + AsyncWrite + Unpin + 'static,\n    B: Body + 'static + Unpin,\n    B::Data: Send,\n    E: Unpin,\n    B::Error: Into<BoxError>,\n    E: Http2ClientConnExec<B, T> + Unpin,\n{\n    type Output = Result<()>;\n\n    #[inline]\n    fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {\n        match ready!(Pin::new(&mut self.inner.1).poll(cx))? {\n            proto::Dispatched::Shutdown => Poll::Ready(Ok(())),\n            proto::Dispatched::Upgrade(_pending) => unreachable!(\"http2 cannot upgrade\"),\n        }\n    }\n}\n\n// ===== impl Builder\n\nimpl<Ex> Builder<Ex>\nwhere\n    Ex: Clone,\n{\n    /// Creates a new connection builder.\n    #[inline]\n    pub fn new(exec: Ex) -> Builder<Ex> {\n        Builder {\n            exec,\n            timer: Time::Empty,\n            opts: Default::default(),\n        }\n    }\n\n    /// Provide a timer to execute background HTTP2 tasks.\n    #[inline]\n    pub fn timer<M>(&mut self, timer: M)\n    where\n        M: Timer + Send + Sync + 'static,\n    {\n        self.timer = Time::Timer(Arc::new(timer));\n    }\n\n    /// Provide a options configuration for the HTTP/2 connection.\n    #[inline]\n    pub fn options(&mut self, opts: Http2Options) {\n        self.opts = opts;\n    }\n\n    /// Constructs a connection with the configured options and IO.\n    ///\n    /// Note, if [`Connection`] is not `await`-ed, [`SendRequest`] will\n    /// do nothing.\n    pub async fn handshake<T, B>(self, io: T) -> Result<(SendRequest<B>, Connection<T, B, Ex>)>\n    where\n        T: AsyncRead + AsyncWrite + Unpin,\n        B: Body + 'static,\n        B::Data: Send,\n        B::Error: Into<BoxError>,\n        Ex: Http2ClientConnExec<B, T> + Unpin,\n    {\n        trace!(\"client handshake HTTP/2\");\n\n        // Crate the HTTP/2 client with the provided options.\n        let mut builder = http2::client::Builder::default();\n        builder\n            .initial_max_send_streams(self.opts.initial_max_send_streams)\n            .initial_window_size(self.opts.initial_window_size)\n            .initial_connection_window_size(self.opts.initial_conn_window_size)\n            .max_send_buffer_size(self.opts.max_send_buffer_size);\n        if let Some(id) = self.opts.initial_stream_id {\n            builder.initial_stream_id(id);\n        }\n        if let Some(max) = self.opts.max_pending_accept_reset_streams {\n            builder.max_pending_accept_reset_streams(max);\n        }\n        if let Some(max) = self.opts.max_concurrent_reset_streams {\n            builder.max_concurrent_reset_streams(max);\n        }\n        if let Some(max) = self.opts.max_concurrent_streams {\n            builder.max_concurrent_streams(max);\n        }\n        if let Some(max) = self.opts.max_header_list_size {\n            builder.max_header_list_size(max);\n        }\n        if let Some(opt) = self.opts.enable_push {\n            builder.enable_push(opt);\n        }\n        if let Some(max) = self.opts.max_frame_size {\n            builder.max_frame_size(max);\n        }\n        if let Some(max) = self.opts.header_table_size {\n            builder.header_table_size(max);\n        }\n        if let Some(v) = self.opts.enable_connect_protocol {\n            builder.enable_connect_protocol(v);\n        }\n        if let Some(v) = self.opts.no_rfc7540_priorities {\n            builder.no_rfc7540_priorities(v);\n        }\n        if let Some(order) = self.opts.settings_order {\n            builder.settings_order(order);\n        }\n        if let Some(stream_dependency) = self.opts.headers_stream_dependency {\n            builder.headers_stream_dependency(stream_dependency);\n        }\n        if let Some(order) = self.opts.headers_pseudo_order {\n            builder.headers_pseudo_order(order);\n        }\n        if let Some(priority) = self.opts.priorities {\n            builder.priorities(priority);\n        }\n\n        // Create the ping configuration for the connection.\n        let ping_config = ping::Config::new(\n            self.opts.adaptive_window,\n            self.opts.initial_window_size,\n            self.opts.keep_alive_interval,\n            self.opts.keep_alive_timeout,\n            self.opts.keep_alive_while_idle,\n        );\n\n        let (tx, rx) = dispatch::channel();\n        let h2 =\n            proto::http2::client::handshake(io, rx, builder, ping_config, self.exec, self.timer)\n                .await?;\n        Ok((\n            SendRequest {\n                dispatch: tx.unbound(),\n            },\n            Connection {\n                inner: (PhantomData, h2),\n            },\n        ))\n    }\n}\n"
  },
  {
    "path": "src/client/core/conn.rs",
    "content": "//! Lower-level client connection API.\n//!\n//! The types in this module are to provide a lower-level API based around a\n//! single connection. Connecting to a host, pooling connections, and the like\n//! are not handled at this level. This module provides the building blocks to\n//! customize those things externally.\n\npub mod http1;\npub mod http2;\n"
  },
  {
    "path": "src/client/core/dispatch.rs",
    "content": "use std::{\n    future::Future,\n    pin::Pin,\n    task::{Context, Poll},\n};\n\nuse http::{Request, Response};\nuse http_body::Body;\nuse pin_project_lite::pin_project;\nuse tokio::sync::{mpsc, oneshot};\n\nuse super::{Error, body::Incoming, proto::http2::client::ResponseFutMap};\n\ntype RetryPromise<T, U> = oneshot::Receiver<Result<U, TrySendError<T>>>;\n\npub(crate) fn channel<T, U>() -> (Sender<T, U>, Receiver<T, U>) {\n    let (tx, rx) = mpsc::unbounded_channel();\n    let (giver, taker) = want::new();\n    (\n        Sender {\n            buffered_once: false,\n            giver,\n            inner: tx,\n        },\n        Receiver { inner: rx, taker },\n    )\n}\n\n/// An error when calling `try_send_request`.\n///\n/// There is a possibility of an error occurring on a connection in-between the\n/// time that a request is queued and when it is actually written to the IO\n/// transport. If that happens, it is safe to return the request back to the\n/// caller, as it was never fully sent.\n#[derive(Debug)]\npub struct TrySendError<T> {\n    pub(crate) error: Error,\n    pub(crate) message: Option<T>,\n}\n\n/// A bounded sender of requests and callbacks for when responses are ready.\n///\n/// While the inner sender is unbounded, the Giver is used to determine\n/// if the Receiver is ready for another request.\npub(crate) struct Sender<T, U> {\n    /// One message is always allowed, even if the Receiver hasn't asked\n    /// for it yet. This boolean keeps track of whether we've sent one\n    /// without notice.\n    buffered_once: bool,\n    /// The Giver helps watch that the Receiver side has been polled\n    /// when the queue is empty. This helps us know when a request and\n    /// response have been fully processed, and a connection is ready\n    /// for more.\n    giver: want::Giver,\n    /// Actually bounded by the Giver, plus `buffered_once`.\n    inner: mpsc::UnboundedSender<Envelope<T, U>>,\n}\n\n/// An unbounded version.\n///\n/// Cannot poll the Giver, but can still use it to determine if the Receiver\n/// has been dropped. However, this version can be cloned.\npub(crate) struct UnboundedSender<T, U> {\n    /// Only used for `is_closed`, since mpsc::UnboundedSender cannot be checked.\n    giver: want::SharedGiver,\n    inner: mpsc::UnboundedSender<Envelope<T, U>>,\n}\n\nimpl<T, U> Sender<T, U> {\n    #[inline]\n    pub(crate) fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<super::Result<()>> {\n        self.giver.poll_want(cx).map_err(|_| Error::new_closed())\n    }\n\n    #[inline]\n    pub(crate) fn is_ready(&self) -> bool {\n        self.giver.is_wanting()\n    }\n\n    pub(crate) fn try_send(&mut self, val: T) -> Result<RetryPromise<T, U>, T> {\n        if self.giver.give() || !self.buffered_once {\n            // If the receiver is ready *now*, then of course we can send.\n            //\n            // If the receiver isn't ready yet, but we don't have anything\n            // in the channel yet, then allow one message.\n            self.buffered_once = true;\n        } else {\n            return Err(val);\n        };\n\n        let (tx, rx) = oneshot::channel();\n        self.inner\n            .send(Envelope(Some((val, Callback(Some(tx))))))\n            .map(move |_| rx)\n            .map_err(|mut e| (e.0).0.take().expect(\"envelope not dropped\").0)\n    }\n\n    #[inline]\n    pub(crate) fn unbound(self) -> UnboundedSender<T, U> {\n        UnboundedSender {\n            giver: self.giver.shared(),\n            inner: self.inner,\n        }\n    }\n}\n\nimpl<T, U> UnboundedSender<T, U> {\n    #[inline]\n    pub(crate) fn is_ready(&self) -> bool {\n        !self.giver.is_canceled()\n    }\n\n    #[inline]\n    pub(crate) fn is_closed(&self) -> bool {\n        self.giver.is_canceled()\n    }\n\n    pub(crate) fn try_send(&mut self, val: T) -> Result<RetryPromise<T, U>, T> {\n        let (tx, rx) = oneshot::channel();\n        self.inner\n            .send(Envelope(Some((val, Callback(Some(tx))))))\n            .map(move |_| rx)\n            .map_err(|mut e| (e.0).0.take().expect(\"envelope not dropped\").0)\n    }\n}\n\nimpl<T, U> Clone for UnboundedSender<T, U> {\n    #[inline]\n    fn clone(&self) -> Self {\n        UnboundedSender {\n            giver: self.giver.clone(),\n            inner: self.inner.clone(),\n        }\n    }\n}\n\npub(crate) struct Receiver<T, U> {\n    inner: mpsc::UnboundedReceiver<Envelope<T, U>>,\n    taker: want::Taker,\n}\n\nimpl<T, U> Receiver<T, U> {\n    pub(crate) fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<(T, Callback<T, U>)>> {\n        match self.inner.poll_recv(cx) {\n            Poll::Ready(item) => {\n                Poll::Ready(item.map(|mut env| env.0.take().expect(\"envelope not dropped\")))\n            }\n            Poll::Pending => {\n                self.taker.want();\n                Poll::Pending\n            }\n        }\n    }\n\n    #[inline]\n    pub(crate) fn close(&mut self) {\n        self.taker.cancel();\n        self.inner.close();\n    }\n\n    #[inline]\n    pub(crate) fn try_recv(&mut self) -> Option<(T, Callback<T, U>)> {\n        use futures_util::FutureExt;\n        match self.inner.recv().now_or_never() {\n            Some(Some(mut env)) => env.0.take(),\n            _ => None,\n        }\n    }\n}\n\nimpl<T, U> Drop for Receiver<T, U> {\n    #[inline]\n    fn drop(&mut self) {\n        // Notify the giver about the closure first, before dropping\n        // the mpsc::Receiver.\n        self.taker.cancel();\n    }\n}\n\nstruct Envelope<T, U>(Option<(T, Callback<T, U>)>);\n\nimpl<T, U> Drop for Envelope<T, U> {\n    fn drop(&mut self) {\n        if let Some((val, cb)) = self.0.take() {\n            cb.send(Err(TrySendError {\n                error: Error::new_canceled().with(\"connection closed\"),\n                message: Some(val),\n            }));\n        }\n    }\n}\n\npub(crate) struct Callback<T, U>(Option<oneshot::Sender<Result<U, TrySendError<T>>>>);\n\nimpl<T, U> Drop for Callback<T, U> {\n    fn drop(&mut self) {\n        if let Some(tx) = self.0.take() {\n            let _ = tx.send(Err(TrySendError {\n                error: dispatch_gone(),\n                message: None,\n            }));\n        }\n    }\n}\n\n#[cold]\nfn dispatch_gone() -> Error {\n    // FIXME(nox): What errors do we want here?\n    Error::new_user_dispatch_gone().with(if std::thread::panicking() {\n        \"user code panicked\"\n    } else {\n        \"runtime dropped the dispatch task\"\n    })\n}\n\nimpl<T, U> Callback<T, U> {\n    const MISSING_SENDER: &'static str = \"callback sender missing\";\n\n    #[inline]\n    pub(crate) fn is_canceled(&self) -> bool {\n        self.0.as_ref().expect(Self::MISSING_SENDER).is_closed()\n    }\n\n    #[inline]\n    pub(crate) fn poll_canceled(&mut self, cx: &mut Context<'_>) -> Poll<()> {\n        self.0.as_mut().expect(Self::MISSING_SENDER).poll_closed(cx)\n    }\n\n    #[inline]\n    pub(crate) fn send(mut self, val: Result<U, TrySendError<T>>) {\n        let _ = self.0.take().expect(Self::MISSING_SENDER).send(val);\n    }\n}\n\nimpl<T> TrySendError<T> {\n    /// Take the message from this error.\n    ///\n    /// The message will not always have been recovered. If an error occurs\n    /// after the message has been serialized onto the connection, it will not\n    /// be available here.\n    #[inline]\n    pub fn take_message(&mut self) -> Option<T> {\n        self.message.take()\n    }\n\n    /// Consumes this to return the inner error.\n    #[inline]\n    pub fn into_error(self) -> Error {\n        self.error\n    }\n}\n\npin_project! {\n    pub struct SendWhen<B>\n    where\n        B: Body,\n        B: 'static,\n    {\n        #[pin]\n        pub(crate) when: ResponseFutMap<B>,\n        #[pin]\n        pub(crate) call_back: Option<Callback<Request<B>, Response<Incoming>>>,\n    }\n}\n\nimpl<B> Future for SendWhen<B>\nwhere\n    B: Body + 'static,\n    B::Data: Send,\n{\n    type Output = ();\n\n    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {\n        let mut this = self.project();\n        let mut call_back = this.call_back.take().expect(\"polled after complete\");\n\n        match Pin::new(&mut this.when).poll(cx) {\n            Poll::Ready(Ok(res)) => {\n                call_back.send(Ok(res));\n                Poll::Ready(())\n            }\n            Poll::Pending => {\n                // check if the callback is canceled\n                match call_back.poll_canceled(cx) {\n                    Poll::Ready(v) => v,\n                    Poll::Pending => {\n                        // Move call_back back to struct before return\n                        this.call_back.set(Some(call_back));\n                        return Poll::Pending;\n                    }\n                };\n                trace!(\"send_when canceled\");\n                // Tell pipe_task to reset the h2 stream so that\n                // RST_STREAM is sent and flow-control capacity freed.\n                this.when.as_mut().cancel();\n                Poll::Ready(())\n            }\n            Poll::Ready(Err((error, message))) => {\n                call_back.send(Err(TrySendError { error, message }));\n                Poll::Ready(())\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "src/client/core/error.rs",
    "content": "//! Error and Result module.\nuse std::{error::Error as StdError, fmt};\n\n/// Result type often returned from methods that can have crate::core: `Error`s.\npub type Result<T, E = Error> = std::result::Result<T, E>;\n\npub type BoxError = Box<dyn StdError + Send + Sync>;\n\ntype Cause = BoxError;\n\n/// Represents errors that can occur handling HTTP streams.\n///\n/// # Formatting\n///\n/// The `Display` implementation of this type will only print the details of\n/// this level of error, even though it may have been caused by another error\n/// and contain that error in its source. To print all the relevant\n/// information, including the source chain, using something like\n/// `std::error::Report`, or equivalent 3rd party types.\n///\n/// The contents of the formatted error message of this specific `Error` type\n/// is unspecified. **You must not depend on it.** The wording and details may\n/// change in any version, with the goal of improving error messages.\n///\n/// # Source\n///\n/// A `crate::core::Error` may be caused by another error. To aid in debugging,\n/// those are exposed in `Error::source()` as erased types. While it is\n/// possible to check the exact type of the sources, they **can not be depended\n/// on**. They may come from private internal dependencies, and are subject to\n/// change at any moment.\npub struct Error {\n    inner: Box<ErrorImpl>,\n}\n\nstruct ErrorImpl {\n    kind: Kind,\n    cause: Option<Cause>,\n}\n\n#[derive(Debug)]\npub(super) enum Kind {\n    Parse(Parse),\n    User(User),\n    /// A message reached EOF, but is not complete.\n    IncompleteMessage,\n    /// A connection received a message (or bytes) when not waiting for one.\n    UnexpectedMessage,\n    /// A pending item was dropped before ever being processed.\n    Canceled,\n    /// Indicates a channel (client or body sender) is closed.\n    ChannelClosed,\n    /// An `io::Error` that occurred while trying to read or write to a network stream.\n    Io,\n    /// Error while reading a body from connection.\n    Body,\n    /// Error while writing a body to connection.\n    BodyWrite,\n    /// Error calling AsyncWrite::shutdown()\n    Shutdown,\n    /// A general error from h2.\n    Http2,\n}\n\n#[derive(Debug)]\npub(crate) enum Parse {\n    Method,\n    Version,\n    VersionH2,\n    Uri,\n    Header(Header),\n    TooLarge,\n    Status,\n    Internal,\n}\n\n#[derive(Debug)]\npub(crate) enum Header {\n    Token,\n    ContentLengthInvalid,\n    TransferEncodingUnexpected,\n}\n\n#[derive(Debug)]\npub(super) enum User {\n    /// Error calling user's Body::poll_data().\n    Body,\n    /// The user aborted writing of the outgoing body.\n    BodyWriteAborted,\n    /// User tried to send a connect request with a nonzero body\n    InvalidConnectWithBody,\n    /// Error from future of user's Service.\n    Service,\n    /// User tried polling for an upgrade that doesn't exist.\n    NoUpgrade,\n    /// User polled for an upgrade, but low-level API is not using upgrades.\n    ManualUpgrade,\n    /// The dispatch task is gone.\n    DispatchGone,\n}\n\n// Sentinel type to indicate the error was caused by a timeout.\n#[derive(Debug)]\npub(super) struct TimedOut;\n\nimpl Error {\n    /// Returns true if this was an HTTP parse error.\n    #[inline]\n    pub fn is_parse(&self) -> bool {\n        matches!(self.inner.kind, Kind::Parse(_))\n    }\n\n    /// Returns true if this was an HTTP parse error caused by an invalid response status code or\n    /// reason phrase.\n    #[inline]\n    pub fn is_parse_status(&self) -> bool {\n        matches!(self.inner.kind, Kind::Parse(Parse::Status))\n    }\n\n    /// Returns true if this error was caused by user code.\n    #[inline]\n    pub fn is_user(&self) -> bool {\n        matches!(self.inner.kind, Kind::User(_))\n    }\n\n    /// Returns true if this was about a `Request` that was canceled.\n    #[inline]\n    pub fn is_canceled(&self) -> bool {\n        matches!(self.inner.kind, Kind::Canceled)\n    }\n\n    /// Returns true if a sender's channel is closed.\n    #[inline]\n    pub fn is_closed(&self) -> bool {\n        matches!(self.inner.kind, Kind::ChannelClosed)\n    }\n\n    /// Returns true if the connection closed before a message could complete.\n    #[inline]\n    pub fn is_incomplete_message(&self) -> bool {\n        matches!(self.inner.kind, Kind::IncompleteMessage)\n    }\n\n    /// Returns true if the body write was aborted.\n    #[inline]\n    pub fn is_body_write_aborted(&self) -> bool {\n        matches!(self.inner.kind, Kind::User(User::BodyWriteAborted))\n    }\n\n    /// Returns true if the error was caused by a timeout.\n    #[inline]\n    pub fn is_timeout(&self) -> bool {\n        self.find_source::<TimedOut>().is_some()\n    }\n\n    #[inline]\n    pub(super) fn new(kind: Kind) -> Error {\n        Error {\n            inner: Box::new(ErrorImpl { kind, cause: None }),\n        }\n    }\n\n    #[inline]\n    pub(super) fn with<C: Into<Cause>>(mut self, cause: C) -> Error {\n        self.inner.cause = Some(cause.into());\n        self\n    }\n\n    pub(crate) fn find_source<E: StdError + 'static>(&self) -> Option<&E> {\n        let mut cause = self.source();\n        while let Some(err) = cause {\n            if let Some(typed) = err.downcast_ref() {\n                return Some(typed);\n            }\n            cause = err.source();\n        }\n\n        // else\n        None\n    }\n\n    pub(super) fn h2_reason(&self) -> http2::Reason {\n        // Find an http2::Reason somewhere in the cause stack, if it exists,\n        // otherwise assume an INTERNAL_ERROR.\n        self.find_source::<http2::Error>()\n            .and_then(|h2_err| h2_err.reason())\n            .unwrap_or(http2::Reason::INTERNAL_ERROR)\n    }\n\n    #[inline]\n    pub(super) fn new_canceled() -> Error {\n        Error::new(Kind::Canceled)\n    }\n\n    #[inline]\n    pub(super) fn new_incomplete() -> Error {\n        Error::new(Kind::IncompleteMessage)\n    }\n\n    #[inline]\n    pub(super) fn new_too_large() -> Error {\n        Error::new(Kind::Parse(Parse::TooLarge))\n    }\n\n    #[inline]\n    pub(super) fn new_version_h2() -> Error {\n        Error::new(Kind::Parse(Parse::VersionH2))\n    }\n\n    #[inline]\n    pub(super) fn new_unexpected_message() -> Error {\n        Error::new(Kind::UnexpectedMessage)\n    }\n\n    #[inline]\n    pub(super) fn new_io(cause: std::io::Error) -> Error {\n        Error::new(Kind::Io).with(cause)\n    }\n\n    #[inline]\n    pub(super) fn new_closed() -> Error {\n        Error::new(Kind::ChannelClosed)\n    }\n\n    #[inline]\n    pub(super) fn new_body<E: Into<Cause>>(cause: E) -> Error {\n        Error::new(Kind::Body).with(cause)\n    }\n\n    #[inline]\n    pub(super) fn new_body_write<E: Into<Cause>>(cause: E) -> Error {\n        Error::new(Kind::BodyWrite).with(cause)\n    }\n\n    #[inline]\n    pub(super) fn new_body_write_aborted() -> Error {\n        Error::new(Kind::User(User::BodyWriteAborted))\n    }\n\n    #[inline]\n    fn new_user(user: User) -> Error {\n        Error::new(Kind::User(user))\n    }\n\n    #[inline]\n    pub(super) fn new_user_no_upgrade() -> Error {\n        Error::new_user(User::NoUpgrade)\n    }\n\n    #[inline]\n    pub(super) fn new_user_manual_upgrade() -> Error {\n        Error::new_user(User::ManualUpgrade)\n    }\n\n    #[inline]\n    pub(super) fn new_user_service<E: Into<Cause>>(cause: E) -> Error {\n        Error::new_user(User::Service).with(cause)\n    }\n\n    #[inline]\n    pub(super) fn new_user_body<E: Into<Cause>>(cause: E) -> Error {\n        Error::new_user(User::Body).with(cause)\n    }\n\n    #[inline]\n    pub(super) fn new_user_invalid_connect() -> Error {\n        Error::new_user(User::InvalidConnectWithBody)\n    }\n\n    #[inline]\n    pub(super) fn new_shutdown(cause: std::io::Error) -> Error {\n        Error::new(Kind::Shutdown).with(cause)\n    }\n\n    #[inline]\n    pub(super) fn new_user_dispatch_gone() -> Error {\n        Error::new(Kind::User(User::DispatchGone))\n    }\n\n    pub(super) fn new_h2(cause: ::http2::Error) -> Error {\n        if cause.is_io() {\n            Error::new_io(cause.into_io().expect(\"http2::Error::is_io\"))\n        } else {\n            Error::new(Kind::Http2).with(cause)\n        }\n    }\n\n    fn description(&self) -> &str {\n        match self.inner.kind {\n            Kind::Parse(Parse::Method) => \"invalid HTTP method parsed\",\n            Kind::Parse(Parse::Version) => \"invalid HTTP version parsed\",\n            Kind::Parse(Parse::VersionH2) => \"invalid HTTP version parsed (found HTTP2 preface)\",\n            Kind::Parse(Parse::Uri) => \"invalid URI\",\n            Kind::Parse(Parse::Header(Header::Token)) => \"invalid HTTP header parsed\",\n            Kind::Parse(Parse::Header(Header::ContentLengthInvalid)) => {\n                \"invalid content-length parsed\"\n            }\n            Kind::Parse(Parse::Header(Header::TransferEncodingUnexpected)) => {\n                \"unexpected transfer-encoding parsed\"\n            }\n            Kind::Parse(Parse::TooLarge) => \"message head is too large\",\n            Kind::Parse(Parse::Status) => \"invalid HTTP status-code parsed\",\n            Kind::Parse(Parse::Internal) => {\n                \"internal error inside wreq and/or its dependencies, please report\"\n            }\n\n            Kind::IncompleteMessage => \"connection closed before message completed\",\n            Kind::UnexpectedMessage => \"received unexpected message from connection\",\n            Kind::ChannelClosed => \"channel closed\",\n            Kind::Canceled => \"operation was canceled\",\n            Kind::Body => \"error reading a body from connection\",\n            Kind::BodyWrite => \"error writing a body to connection\",\n            Kind::Shutdown => \"error shutting down connection\",\n            Kind::Http2 => \"http2 error\",\n            Kind::Io => \"connection error\",\n\n            Kind::User(User::Body) => \"error from user's Body stream\",\n            Kind::User(User::BodyWriteAborted) => \"user body write aborted\",\n            Kind::User(User::InvalidConnectWithBody) => {\n                \"user sent CONNECT request with non-zero body\"\n            }\n            Kind::User(User::Service) => \"error from user's Service\",\n            Kind::User(User::NoUpgrade) => \"no upgrade available\",\n            Kind::User(User::ManualUpgrade) => \"upgrade expected but low level API in use\",\n            Kind::User(User::DispatchGone) => \"dispatch task is gone\",\n        }\n    }\n}\n\nimpl fmt::Debug for Error {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        let mut f = f.debug_tuple(\"crate::core::Error\");\n        f.field(&self.inner.kind);\n        if let Some(ref cause) = self.inner.cause {\n            f.field(cause);\n        }\n        f.finish()\n    }\n}\n\nimpl fmt::Display for Error {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        f.write_str(self.description())\n    }\n}\n\nimpl StdError for Error {\n    fn source(&self) -> Option<&(dyn StdError + 'static)> {\n        self.inner\n            .cause\n            .as_ref()\n            .map(|cause| &**cause as &(dyn StdError + 'static))\n    }\n}\n\n#[doc(hidden)]\nimpl From<Parse> for Error {\n    fn from(err: Parse) -> Error {\n        Error::new(Kind::Parse(err))\n    }\n}\n\nimpl Parse {\n    #[inline]\n    pub(crate) fn content_length_invalid() -> Self {\n        Parse::Header(Header::ContentLengthInvalid)\n    }\n\n    #[inline]\n    pub(crate) fn transfer_encoding_unexpected() -> Self {\n        Parse::Header(Header::TransferEncodingUnexpected)\n    }\n}\n\nimpl From<httparse::Error> for Parse {\n    fn from(err: httparse::Error) -> Parse {\n        match err {\n            httparse::Error::HeaderName\n            | httparse::Error::HeaderValue\n            | httparse::Error::NewLine\n            | httparse::Error::Token => Parse::Header(Header::Token),\n            httparse::Error::Status => Parse::Status,\n            httparse::Error::TooManyHeaders => Parse::TooLarge,\n            httparse::Error::Version => Parse::Version,\n        }\n    }\n}\n\nimpl From<http::method::InvalidMethod> for Parse {\n    fn from(_: http::method::InvalidMethod) -> Parse {\n        Parse::Method\n    }\n}\n\nimpl From<http::status::InvalidStatusCode> for Parse {\n    fn from(_: http::status::InvalidStatusCode) -> Parse {\n        Parse::Status\n    }\n}\n\nimpl From<http::uri::InvalidUri> for Parse {\n    fn from(_: http::uri::InvalidUri) -> Parse {\n        Parse::Uri\n    }\n}\n\nimpl From<http::uri::InvalidUriParts> for Parse {\n    fn from(_: http::uri::InvalidUriParts) -> Parse {\n        Parse::Uri\n    }\n}\n\n// ===== impl TimedOut ====\n\nimpl fmt::Display for TimedOut {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        f.write_str(\"operation timed out\")\n    }\n}\n\nimpl StdError for TimedOut {}\n\n#[cfg(test)]\nmod tests {\n    use std::mem;\n\n    use super::*;\n\n    fn assert_send_sync<T: Send + Sync + 'static>() {}\n\n    #[test]\n    fn error_satisfies_send_sync() {\n        assert_send_sync::<Error>()\n    }\n\n    #[test]\n    fn error_size_of() {\n        assert_eq!(mem::size_of::<Error>(), mem::size_of::<usize>());\n    }\n\n    #[test]\n    fn h2_reason_unknown() {\n        let closed = Error::new_closed();\n        assert_eq!(closed.h2_reason(), http2::Reason::INTERNAL_ERROR);\n    }\n\n    #[test]\n    fn h2_reason_one_level() {\n        let body_err = Error::new_user_body(http2::Error::from(http2::Reason::ENHANCE_YOUR_CALM));\n        assert_eq!(body_err.h2_reason(), http2::Reason::ENHANCE_YOUR_CALM);\n    }\n\n    #[test]\n    fn h2_reason_nested() {\n        let recvd = Error::new_h2(http2::Error::from(http2::Reason::HTTP_1_1_REQUIRED));\n        // Suppose a user were proxying the received error\n        let svc_err = Error::new_user_service(recvd);\n        assert_eq!(svc_err.h2_reason(), http2::Reason::HTTP_1_1_REQUIRED);\n    }\n}\n"
  },
  {
    "path": "src/client/core/proto/headers.rs",
    "content": "use bytes::BytesMut;\nuse http::{\n    HeaderMap, Method,\n    header::{CONTENT_LENGTH, HeaderValue, ValueIter},\n};\n\n#[inline]\npub(super) fn connection_keep_alive(value: &HeaderValue) -> bool {\n    connection_has(value, \"keep-alive\")\n}\n\n#[inline]\npub(super) fn connection_close(value: &HeaderValue) -> bool {\n    connection_has(value, \"close\")\n}\n\nfn connection_has(value: &HeaderValue, needle: &str) -> bool {\n    if let Ok(s) = value.to_str() {\n        for val in s.split(',') {\n            if val.trim().eq_ignore_ascii_case(needle) {\n                return true;\n            }\n        }\n    }\n    false\n}\n\n#[inline]\npub(super) fn content_length_parse_all(headers: &HeaderMap) -> Option<u64> {\n    content_length_parse_all_values(headers.get_all(CONTENT_LENGTH).into_iter())\n}\n\npub(super) fn content_length_parse_all_values(values: ValueIter<'_, HeaderValue>) -> Option<u64> {\n    // If multiple Content-Length headers were sent, everything can still\n    // be alright if they all contain the same value, and all parse\n    // correctly. If not, then it's an error.\n\n    let mut content_length: Option<u64> = None;\n    for h in values {\n        if let Ok(line) = h.to_str() {\n            for v in line.split(',') {\n                if let Some(n) = from_digits(v.trim().as_bytes()) {\n                    if content_length.is_none() {\n                        content_length = Some(n)\n                    } else if content_length != Some(n) {\n                        return None;\n                    }\n                } else {\n                    return None;\n                }\n            }\n        } else {\n            return None;\n        }\n    }\n\n    content_length\n}\n\nfn from_digits(bytes: &[u8]) -> Option<u64> {\n    // cannot use FromStr for u64, since it allows a signed prefix\n    let mut result = 0u64;\n    const RADIX: u64 = 10;\n\n    if bytes.is_empty() {\n        return None;\n    }\n\n    for &b in bytes {\n        // can't use char::to_digit, since we haven't verified these bytes\n        // are utf-8.\n        match b {\n            b'0'..=b'9' => {\n                result = result.checked_mul(RADIX)?;\n                result = result.checked_add((b - b'0') as u64)?;\n            }\n            _ => {\n                // not a DIGIT, get outta here!\n                return None;\n            }\n        }\n    }\n\n    Some(result)\n}\n\n#[inline]\npub(super) fn method_has_defined_payload_semantics(method: &Method) -> bool {\n    !matches!(\n        *method,\n        Method::GET | Method::HEAD | Method::DELETE | Method::CONNECT | Method::OPTIONS\n    )\n}\n\n#[inline]\npub(super) fn set_content_length_if_missing(headers: &mut HeaderMap, len: u64) {\n    headers\n        .entry(CONTENT_LENGTH)\n        .or_insert_with(|| HeaderValue::from(len));\n}\n\n#[inline]\npub(super) fn transfer_encoding_is_chunked(headers: &HeaderMap) -> bool {\n    is_chunked(headers.get_all(http::header::TRANSFER_ENCODING).into_iter())\n}\n\npub(super) fn is_chunked(mut encodings: ValueIter<'_, HeaderValue>) -> bool {\n    // chunked must always be the last encoding, according to spec\n    if let Some(line) = encodings.next_back() {\n        // chunked must always be the last encoding, according to spec\n        if let Ok(s) = line.to_str() {\n            if let Some(encoding) = s.rsplit(',').next() {\n                return encoding.trim().eq_ignore_ascii_case(\"chunked\");\n            }\n        }\n    }\n\n    false\n}\n\npub(super) fn add_chunked(mut entry: http::header::OccupiedEntry<'_, HeaderValue>) {\n    const CHUNKED: &str = \"chunked\";\n\n    if let Some(line) = entry.iter_mut().next_back() {\n        // + 2 for \", \"\n        let new_cap = line.as_bytes().len() + CHUNKED.len() + 2;\n        let mut buf = BytesMut::with_capacity(new_cap);\n        buf.extend_from_slice(line.as_bytes());\n        buf.extend_from_slice(b\", \");\n        buf.extend_from_slice(CHUNKED.as_bytes());\n\n        *line = HeaderValue::from_maybe_shared(buf.freeze())\n            .expect(\"original header value plus ascii is valid\");\n        return;\n    }\n\n    entry.insert(HeaderValue::from_static(CHUNKED));\n}\n"
  },
  {
    "path": "src/client/core/proto/http1/buf.rs",
    "content": "use std::{collections::VecDeque, io::IoSlice};\n\nuse bytes::{Buf, BufMut, Bytes, BytesMut};\n\n/// A list of buffers that implements `Buf` by concatenating them together.\npub(crate) struct BufList<T> {\n    bufs: VecDeque<T>,\n    remaining: usize,\n}\n\nimpl<T: Buf> BufList<T> {\n    #[inline]\n    pub(crate) fn new() -> BufList<T> {\n        BufList {\n            bufs: VecDeque::new(),\n            remaining: 0,\n        }\n    }\n\n    #[inline]\n    pub(crate) fn push(&mut self, buf: T) {\n        debug_assert!(buf.has_remaining());\n        self.remaining += buf.remaining();\n        self.bufs.push_back(buf);\n    }\n\n    #[inline]\n    pub(crate) fn bufs_cnt(&self) -> usize {\n        self.bufs.len()\n    }\n}\n\nimpl<T: Buf> Buf for BufList<T> {\n    #[inline]\n    fn remaining(&self) -> usize {\n        self.remaining\n    }\n\n    #[inline]\n    fn chunk(&self) -> &[u8] {\n        self.bufs.front().map(Buf::chunk).unwrap_or_default()\n    }\n\n    #[inline]\n    fn advance(&mut self, mut cnt: usize) {\n        assert!(cnt <= self.remaining, \"`cnt` greater than remaining\");\n        self.remaining -= cnt;\n        while cnt > 0 {\n            {\n                let front = &mut self.bufs[0];\n                let rem = front.remaining();\n                if rem > cnt {\n                    front.advance(cnt);\n                    return;\n                } else {\n                    front.advance(rem);\n                    cnt -= rem;\n                }\n            }\n            self.bufs.pop_front();\n        }\n    }\n\n    #[inline]\n    fn chunks_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize {\n        if dst.is_empty() {\n            return 0;\n        }\n        let mut vecs = 0;\n        for buf in &self.bufs {\n            vecs += buf.chunks_vectored(&mut dst[vecs..]);\n            if vecs == dst.len() {\n                break;\n            }\n        }\n        vecs\n    }\n\n    #[inline]\n    fn copy_to_bytes(&mut self, len: usize) -> Bytes {\n        // Our inner buffer may have an optimized version of copy_to_bytes, and if the whole\n        // request can be fulfilled by the front buffer, we can take advantage.\n        match self.bufs.front_mut() {\n            Some(front) if front.remaining() == len => {\n                let b = front.copy_to_bytes(len);\n                self.bufs.pop_front();\n                self.remaining -= len;\n                b\n            }\n            Some(front) if front.remaining() > len => {\n                self.remaining -= len;\n                front.copy_to_bytes(len)\n            }\n            _ => {\n                assert!(len <= self.remaining(), \"`len` greater than remaining\");\n                let mut bm = BytesMut::with_capacity(len);\n                bm.put(self.take(len));\n                bm.freeze()\n            }\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::ptr;\n\n    use super::*;\n\n    fn hello_world_buf() -> BufList<Bytes> {\n        let mut list = BufList::new();\n        list.push(Bytes::from(\"Hello\"));\n        list.push(Bytes::from(\" \"));\n        list.push(Bytes::from(\"World\"));\n        list\n    }\n\n    #[test]\n    fn to_bytes_shorter() {\n        let mut bufs = hello_world_buf();\n        let old_ptr = bufs.chunk().as_ptr();\n        let start = bufs.copy_to_bytes(4);\n        assert_eq!(start, \"Hell\");\n        assert!(ptr::eq(old_ptr, start.as_ptr()));\n        assert_eq!(bufs.chunk(), b\"o\");\n        assert!(ptr::eq(old_ptr.wrapping_add(4), bufs.chunk().as_ptr()));\n        assert_eq!(bufs.remaining(), 7);\n    }\n\n    #[test]\n    fn to_bytes_eq() {\n        let mut bufs = hello_world_buf();\n        let old_ptr = bufs.chunk().as_ptr();\n        let start = bufs.copy_to_bytes(5);\n        assert_eq!(start, \"Hello\");\n        assert!(ptr::eq(old_ptr, start.as_ptr()));\n        assert_eq!(bufs.chunk(), b\" \");\n        assert_eq!(bufs.remaining(), 6);\n    }\n\n    #[test]\n    fn to_bytes_longer() {\n        let mut bufs = hello_world_buf();\n        let start = bufs.copy_to_bytes(7);\n        assert_eq!(start, \"Hello W\");\n        assert_eq!(bufs.remaining(), 4);\n    }\n\n    #[test]\n    fn one_long_buf_to_bytes() {\n        let mut buf = BufList::new();\n        buf.push(b\"Hello World\" as &[_]);\n        assert_eq!(buf.copy_to_bytes(5), \"Hello\");\n        assert_eq!(buf.chunk(), b\" World\");\n    }\n\n    #[test]\n    #[should_panic(expected = \"`len` greater than remaining\")]\n    fn buf_to_bytes_too_many() {\n        hello_world_buf().copy_to_bytes(42);\n    }\n}\n"
  },
  {
    "path": "src/client/core/proto/http1/conn.rs",
    "content": "use std::{\n    fmt, io,\n    marker::{PhantomData, Unpin},\n    pin::Pin,\n    task::{Context, Poll, ready},\n};\n\nuse bytes::{Buf, Bytes};\nuse http::{\n    HeaderMap, Method, Version,\n    header::{CONNECTION, HeaderValue, TE},\n};\nuse http_body::Frame;\nuse httparse::ParserConfig;\nuse tokio::io::{AsyncRead, AsyncWrite};\n\nuse super::{\n    Decoder, Encode, Http1Transaction, ParseContext, Wants,\n    encode::{EncodedBuf, Encoder},\n    io::Buffered,\n};\nuse crate::client::core::{\n    Error, Result,\n    body::DecodedLength,\n    proto::{BodyLength, MessageHead, headers},\n    upgrade,\n};\n\n/// This handles a connection, which will have been established over an\n/// `AsyncRead + AsyncWrite` (like a socket), and will likely include multiple\n/// `Transaction`s over HTTP.\n///\n/// The connection will determine when a message begins and ends as well as\n/// determine if this connection can be kept alive after the message,\n/// or if it is complete.\npub(crate) struct Conn<I, B, T> {\n    io: Buffered<I, EncodedBuf<B>>,\n    state: State,\n    _marker: PhantomData<fn(T)>,\n}\n\nimpl<I, B, T> Conn<I, B, T>\nwhere\n    I: AsyncRead + AsyncWrite + Unpin,\n    B: Buf,\n    T: Http1Transaction,\n{\n    pub(crate) fn new(io: I) -> Conn<I, B, T> {\n        Conn {\n            io: Buffered::new(io),\n            state: State {\n                allow_half_close: false,\n                cached_headers: None,\n                error: None,\n                keep_alive: KA::Busy,\n                method: None,\n                h1_parser_config: ParserConfig::default(),\n                h1_max_headers: None,\n                h09_responses: false,\n                notify_read: false,\n                reading: Reading::Init,\n                writing: Writing::Init,\n                upgrade: None,\n                // We assume a modern world where the remote speaks HTTP/1.1.\n                // If they tell us otherwise, we'll downgrade in `read_head`.\n                version: Version::HTTP_11,\n                allow_trailer_fields: false,\n            },\n            _marker: PhantomData,\n        }\n    }\n\n    #[inline]\n    pub(crate) fn set_write_strategy_queue(&mut self) {\n        self.io.set_write_strategy_queue();\n    }\n\n    #[inline]\n    pub(crate) fn set_max_buf_size(&mut self, max: usize) {\n        self.io.set_max_buf_size(max);\n    }\n\n    #[inline]\n    pub(crate) fn set_read_buf_exact_size(&mut self, sz: usize) {\n        self.io.set_read_buf_exact_size(sz);\n    }\n\n    #[inline]\n    pub(crate) fn set_write_strategy_flatten(&mut self) {\n        self.io.set_write_strategy_flatten();\n    }\n\n    #[inline]\n    pub(crate) fn set_h1_parser_config(&mut self, parser_config: ParserConfig) {\n        self.state.h1_parser_config = parser_config;\n    }\n\n    #[inline]\n    pub(crate) fn set_h09_responses(&mut self) {\n        self.state.h09_responses = true;\n    }\n\n    #[inline]\n    pub(crate) fn set_http1_max_headers(&mut self, val: usize) {\n        self.state.h1_max_headers = Some(val);\n    }\n\n    #[inline]\n    pub(super) fn into_inner(self) -> (I, Bytes) {\n        self.io.into_inner()\n    }\n\n    #[inline]\n    pub(super) fn pending_upgrade(&mut self) -> Option<upgrade::Pending> {\n        self.state.upgrade.take()\n    }\n\n    #[inline]\n    pub(super) fn is_read_closed(&self) -> bool {\n        self.state.is_read_closed()\n    }\n\n    #[inline]\n    pub(super) fn is_write_closed(&self) -> bool {\n        self.state.is_write_closed()\n    }\n\n    pub(super) fn can_read_head(&self) -> bool {\n        if !matches!(self.state.reading, Reading::Init) {\n            return false;\n        }\n\n        !matches!(self.state.writing, Writing::Init)\n    }\n\n    #[inline]\n    pub(super) fn can_read_body(&self) -> bool {\n        matches!(\n            self.state.reading,\n            Reading::Body(..) | Reading::Continue(..)\n        )\n    }\n\n    #[inline]\n    fn should_error_on_eof(&self) -> bool {\n        // If we're idle, it's probably just the connection closing gracefully.\n        !self.state.is_idle()\n    }\n\n    #[inline]\n    fn has_h2_prefix(&self) -> bool {\n        const H2_PREFACE: &[u8] = b\"PRI * HTTP/2.0\\r\\n\\r\\nSM\\r\\n\\r\\n\";\n        let read_buf = self.io.read_buf();\n        read_buf.starts_with(H2_PREFACE)\n    }\n\n    #[allow(clippy::type_complexity)]\n    pub(super) fn poll_read_head(\n        &mut self,\n        cx: &mut Context<'_>,\n    ) -> Poll<Option<Result<(MessageHead<T::Incoming>, DecodedLength, Wants)>>> {\n        debug_assert!(self.can_read_head());\n        trace!(\"Conn::read_head\");\n\n        let msg = match self.io.parse::<T>(\n            cx,\n            ParseContext {\n                cached_headers: &mut self.state.cached_headers,\n                req_method: &mut self.state.method,\n                h1_parser_config: &self.state.h1_parser_config,\n                h1_max_headers: self.state.h1_max_headers,\n                h09_responses: self.state.h09_responses,\n            },\n        ) {\n            Poll::Ready(Ok(msg)) => msg,\n            Poll::Ready(Err(e)) => return self.on_read_head_error(e),\n            Poll::Pending => {\n                return Poll::Pending;\n            }\n        };\n\n        // Note: don't deconstruct `msg` into local variables, it appears\n        // the optimizer doesn't remove the extra copies.\n\n        debug!(\"incoming body is {}\", msg.decode);\n\n        // Prevent accepting HTTP/0.9 responses after the initial one, if any.\n        self.state.h09_responses = false;\n\n        self.state.busy();\n        self.state.keep_alive &= msg.keep_alive;\n        self.state.version = msg.head.version;\n\n        let mut wants = if msg.wants_upgrade {\n            Wants::UPGRADE\n        } else {\n            Wants::EMPTY\n        };\n\n        if msg.decode == DecodedLength::ZERO {\n            if msg.expect_continue {\n                debug!(\"ignoring expect-continue since body is empty\");\n            }\n            self.state.reading = Reading::KeepAlive;\n            self.try_keep_alive(cx);\n        } else if msg.expect_continue && msg.head.version.gt(&Version::HTTP_10) {\n            // TODO: remove this when we land h1_max_header_size support\n            let h1_max_header_size = None;\n            self.state.reading = Reading::Continue(Decoder::new(\n                msg.decode,\n                self.state.h1_max_headers,\n                h1_max_header_size,\n            ));\n            wants = wants.add(Wants::EXPECT);\n        } else {\n            // TODO: remove this when we land h1_max_header_size support\n            let h1_max_header_size = None;\n            self.state.reading = Reading::Body(Decoder::new(\n                msg.decode,\n                self.state.h1_max_headers,\n                h1_max_header_size,\n            ));\n        }\n\n        self.state.allow_trailer_fields = msg\n            .head\n            .headers\n            .get(TE)\n            .is_some_and(|te_header| te_header == \"trailers\");\n\n        Poll::Ready(Some(Ok((msg.head, msg.decode, wants))))\n    }\n\n    fn on_read_head_error<Z>(&mut self, e: Error) -> Poll<Option<Result<Z>>> {\n        // If we are currently waiting on a message, then an empty\n        // message should be reported as an error. If not, it is just\n        // the connection closing gracefully.\n        let must_error = self.should_error_on_eof();\n        self.close_read();\n        self.io.consume_leading_lines();\n        let was_mid_parse = e.is_parse() || !self.io.read_buf().is_empty();\n        if was_mid_parse || must_error {\n            // We check if the buf contains the h2 Preface\n            debug!(\n                \"parse error ({}) with {} bytes\",\n                e,\n                self.io.read_buf().len()\n            );\n            match self.on_parse_error(e) {\n                Ok(()) => Poll::Pending, // XXX: wat?\n                Err(e) => Poll::Ready(Some(Err(e))),\n            }\n        } else {\n            debug!(\"read eof\");\n            self.close_write();\n            Poll::Ready(None)\n        }\n    }\n\n    pub(super) fn poll_read_body(\n        &mut self,\n        cx: &mut Context<'_>,\n    ) -> Poll<Option<io::Result<Frame<Bytes>>>> {\n        debug_assert!(self.can_read_body());\n\n        let (reading, ret) = match self.state.reading {\n            Reading::Body(ref mut decoder) => {\n                match ready!(decoder.decode(cx, &mut self.io)) {\n                    Ok(frame) => {\n                        if frame.is_data() {\n                            let slice = frame.data_ref().unwrap_or_else(|| unreachable!());\n                            let (reading, maybe_frame) = if decoder.is_eof() {\n                                debug!(\"incoming body completed\");\n                                (\n                                    Reading::KeepAlive,\n                                    if !slice.is_empty() {\n                                        Some(Ok(frame))\n                                    } else {\n                                        None\n                                    },\n                                )\n                            } else if slice.is_empty() {\n                                error!(\"incoming body unexpectedly ended\");\n                                // This should be unreachable, since all 3 decoders\n                                // either set eof=true or return an Err when reading\n                                // an empty slice...\n                                (Reading::Closed, None)\n                            } else {\n                                return Poll::Ready(Some(Ok(frame)));\n                            };\n                            (reading, Poll::Ready(maybe_frame))\n                        } else if frame.is_trailers() {\n                            debug!(\"incoming body completed with trailers\");\n                            (Reading::KeepAlive, Poll::Ready(Some(Ok(frame))))\n                        } else {\n                            trace!(\"discarding unknown frame\");\n                            (Reading::Closed, Poll::Ready(None))\n                        }\n                    }\n                    Err(e) => {\n                        debug!(\"incoming body decode error: {}\", e);\n                        (Reading::Closed, Poll::Ready(Some(Err(e))))\n                    }\n                }\n            }\n            Reading::Continue(ref decoder) => {\n                // Write the 100 Continue if not already responded...\n                if let Writing::Init = self.state.writing {\n                    trace!(\"automatically sending 100 Continue\");\n                    let cont = b\"HTTP/1.1 100 Continue\\r\\n\\r\\n\";\n                    self.io.headers_buf().extend_from_slice(cont);\n                }\n\n                // And now recurse once in the Reading::Body state...\n                self.state.reading = Reading::Body(decoder.clone());\n                return self.poll_read_body(cx);\n            }\n            _ => unreachable!(\"poll_read_body invalid state: {:?}\", self.state.reading),\n        };\n\n        self.state.reading = reading;\n        self.try_keep_alive(cx);\n        ret\n    }\n\n    #[inline]\n    pub(super) fn wants_read_again(&mut self) -> bool {\n        let ret = self.state.notify_read;\n        self.state.notify_read = false;\n        ret\n    }\n\n    pub(super) fn poll_read_keep_alive(&mut self, cx: &mut Context<'_>) -> Poll<Result<()>> {\n        debug_assert!(!self.can_read_head() && !self.can_read_body());\n\n        if self.is_read_closed() {\n            Poll::Pending\n        } else if self.is_mid_message() {\n            self.mid_message_detect_eof(cx)\n        } else {\n            self.require_empty_read(cx)\n        }\n    }\n\n    #[inline]\n    fn is_mid_message(&self) -> bool {\n        !matches!(\n            (&self.state.reading, &self.state.writing),\n            (&Reading::Init, &Writing::Init)\n        )\n    }\n\n    // This will check to make sure the io object read is empty.\n    //\n    // This should only be called for Clients wanting to enter the idle\n    // state.\n    fn require_empty_read(&mut self, cx: &mut Context<'_>) -> Poll<Result<()>> {\n        debug_assert!(!self.can_read_head() && !self.can_read_body() && !self.is_read_closed());\n        debug_assert!(!self.is_mid_message());\n\n        if !self.io.read_buf().is_empty() {\n            debug!(\"received an unexpected {} bytes\", self.io.read_buf().len());\n            return Poll::Ready(Err(Error::new_unexpected_message()));\n        }\n\n        let num_read = ready!(self.force_io_read(cx)).map_err(Error::new_io)?;\n\n        if num_read == 0 {\n            let ret = if self.should_error_on_eof() {\n                trace!(\"found unexpected EOF on busy connection: {:?}\", self.state);\n                Poll::Ready(Err(Error::new_incomplete()))\n            } else {\n                trace!(\"found EOF on idle connection, closing\");\n                Poll::Ready(Ok(()))\n            };\n\n            // order is important: should_error needs state BEFORE close_read\n            self.state.close_read();\n            return ret;\n        }\n\n        debug!(\n            \"received unexpected {} bytes on an idle connection\",\n            num_read\n        );\n        Poll::Ready(Err(Error::new_unexpected_message()))\n    }\n\n    fn mid_message_detect_eof(&mut self, cx: &mut Context<'_>) -> Poll<Result<()>> {\n        debug_assert!(!self.can_read_head() && !self.can_read_body() && !self.is_read_closed());\n        debug_assert!(self.is_mid_message());\n\n        if self.state.allow_half_close || !self.io.read_buf().is_empty() {\n            return Poll::Pending;\n        }\n\n        let num_read = ready!(self.force_io_read(cx)).map_err(Error::new_io)?;\n\n        if num_read == 0 {\n            trace!(\"found unexpected EOF on busy connection: {:?}\", self.state);\n            self.state.close_read();\n            Poll::Ready(Err(Error::new_incomplete()))\n        } else {\n            Poll::Ready(Ok(()))\n        }\n    }\n\n    fn force_io_read(&mut self, cx: &mut Context<'_>) -> Poll<io::Result<usize>> {\n        debug_assert!(!self.state.is_read_closed());\n\n        let result = ready!(self.io.poll_read_from_io(cx));\n        #[allow(clippy::manual_inspect)]\n        Poll::Ready(result.map_err(|e| {\n            trace!(error = %e, \"force_io_read; io error\");\n            self.state.close();\n            e\n        }))\n    }\n\n    fn maybe_notify(&mut self, cx: &mut Context<'_>) {\n        // its possible that we returned NotReady from poll() without having\n        // exhausted the underlying Io. We would have done this when we\n        // determined we couldn't keep reading until we knew how writing\n        // would finish.\n\n        match self.state.reading {\n            Reading::Continue(..) | Reading::Body(..) | Reading::KeepAlive | Reading::Closed => {\n                return;\n            }\n            Reading::Init => (),\n        };\n\n        match self.state.writing {\n            Writing::Body(..) => return,\n            Writing::Init | Writing::KeepAlive | Writing::Closed => (),\n        }\n\n        if !self.io.is_read_blocked() {\n            if self.io.read_buf().is_empty() {\n                match self.io.poll_read_from_io(cx) {\n                    Poll::Ready(Ok(n)) => {\n                        if n == 0 {\n                            trace!(\"maybe_notify; read eof\");\n                            if self.state.is_idle() {\n                                self.state.close();\n                            } else {\n                                self.close_read()\n                            }\n                            return;\n                        }\n                    }\n                    Poll::Pending => {\n                        trace!(\"maybe_notify; read_from_io blocked\");\n                        return;\n                    }\n                    Poll::Ready(Err(e)) => {\n                        trace!(\"maybe_notify; read_from_io error: {}\", e);\n                        self.state.close();\n                        self.state.error = Some(Error::new_io(e));\n                    }\n                }\n            }\n            self.state.notify_read = true;\n        }\n    }\n\n    #[inline]\n    fn try_keep_alive(&mut self, cx: &mut Context<'_>) {\n        self.state.try_keep_alive::<T>();\n        self.maybe_notify(cx);\n    }\n\n    #[inline]\n    pub(super) fn can_write_head(&self) -> bool {\n        if matches!(self.state.reading, Reading::Closed) {\n            return false;\n        }\n\n        match self.state.writing {\n            Writing::Init => self.io.can_headers_buf(),\n            _ => false,\n        }\n    }\n\n    #[inline]\n    pub(super) fn can_write_body(&self) -> bool {\n        match self.state.writing {\n            Writing::Body(..) => true,\n            Writing::Init | Writing::KeepAlive | Writing::Closed => false,\n        }\n    }\n\n    #[inline]\n    pub(super) fn can_buffer_body(&self) -> bool {\n        self.io.can_buffer()\n    }\n\n    pub(super) fn write_head(&mut self, head: MessageHead<T::Outgoing>, body: Option<BodyLength>) {\n        if let Some(encoder) = self.encode_head(head, body) {\n            self.state.writing = if !encoder.is_eof() {\n                Writing::Body(encoder)\n            } else if encoder.is_last() {\n                Writing::Closed\n            } else {\n                Writing::KeepAlive\n            };\n        }\n    }\n\n    fn encode_head(\n        &mut self,\n        mut head: MessageHead<T::Outgoing>,\n        body: Option<BodyLength>,\n    ) -> Option<Encoder> {\n        debug_assert!(self.can_write_head());\n\n        self.state.busy();\n\n        self.enforce_version(&mut head);\n        let buf = self.io.headers_buf();\n\n        trace_span!(\"encode_headers\");\n\n        match T::encode(\n            Encode {\n                head: &mut head,\n                body,\n                req_method: &mut self.state.method,\n            },\n            buf,\n        ) {\n            Ok(encoder) => {\n                debug_assert!(self.state.cached_headers.is_none());\n                debug_assert!(head.headers.is_empty());\n                self.state.cached_headers = Some(head.headers);\n\n                Some(encoder)\n            }\n            Err(err) => {\n                self.state.error = Some(err);\n                self.state.writing = Writing::Closed;\n                None\n            }\n        }\n    }\n\n    // Fix keep-alive when Connection: keep-alive header is not present\n    fn fix_keep_alive(&mut self, head: &mut MessageHead<T::Outgoing>) {\n        let outgoing_is_keep_alive = head\n            .headers\n            .get(CONNECTION)\n            .is_some_and(headers::connection_keep_alive);\n\n        if !outgoing_is_keep_alive {\n            match head.version {\n                // If response is version 1.0 and keep-alive is not present in the response,\n                // disable keep-alive so the server closes the connection\n                Version::HTTP_10 => self.state.disable_keep_alive(),\n                // If response is version 1.1 and keep-alive is wanted, add\n                // Connection: keep-alive header when not present\n                Version::HTTP_11 if self.state.wants_keep_alive() => {\n                    head.headers\n                        .insert(CONNECTION, HeaderValue::from_static(\"keep-alive\"));\n                }\n                _ => (),\n            }\n        }\n    }\n\n    // If we know the remote speaks an older version, we try to fix up any messages\n    // to work with our older peer.\n    fn enforce_version(&mut self, head: &mut MessageHead<T::Outgoing>) {\n        match self.state.version {\n            Version::HTTP_10 => {\n                // Fixes response or connection when keep-alive header is not present\n                self.fix_keep_alive(head);\n                // If the remote only knows HTTP/1.0, we should force ourselves\n                // to do only speak HTTP/1.0 as well.\n                head.version = Version::HTTP_10;\n            }\n            Version::HTTP_11 => {\n                if let KA::Disabled = self.state.keep_alive.status() {\n                    head.headers\n                        .insert(CONNECTION, HeaderValue::from_static(\"close\"));\n                }\n            }\n            _ => (),\n        }\n        // If the remote speaks HTTP/1.1, then it *should* be fine with\n        // both HTTP/1.0 and HTTP/1.1 from us. So again, we just let\n        // the user's headers be.\n    }\n\n    pub(super) fn write_body(&mut self, chunk: B) {\n        debug_assert!(self.can_write_body() && self.can_buffer_body());\n        // empty chunks should be discarded at Dispatcher level\n        debug_assert!(chunk.remaining() != 0);\n\n        let state = match self.state.writing {\n            Writing::Body(ref mut encoder) => {\n                self.io.buffer(encoder.encode(chunk));\n\n                if !encoder.is_eof() {\n                    return;\n                }\n\n                if encoder.is_last() {\n                    Writing::Closed\n                } else {\n                    Writing::KeepAlive\n                }\n            }\n            _ => unreachable!(\"write_body invalid state: {:?}\", self.state.writing),\n        };\n\n        self.state.writing = state;\n    }\n\n    pub(super) fn write_trailers(&mut self, trailers: HeaderMap) {\n        debug_assert!(self.can_write_body() && self.can_buffer_body());\n\n        match self.state.writing {\n            Writing::Body(ref encoder) => {\n                if let Some(enc_buf) = encoder.encode_trailers(trailers) {\n                    self.io.buffer(enc_buf);\n\n                    self.state.writing = if encoder.is_last() || encoder.is_close_delimited() {\n                        Writing::Closed\n                    } else {\n                        Writing::KeepAlive\n                    };\n                }\n            }\n            _ => unreachable!(\"write_trailers invalid state: {:?}\", self.state.writing),\n        }\n    }\n\n    pub(super) fn write_body_and_end(&mut self, chunk: B) {\n        debug_assert!(self.can_write_body() && self.can_buffer_body());\n        // empty chunks should be discarded at Dispatcher level\n        debug_assert!(chunk.remaining() != 0);\n\n        let state = match self.state.writing {\n            Writing::Body(ref encoder) => {\n                let can_keep_alive = encoder.encode_and_end(chunk, self.io.write_buf());\n                if can_keep_alive {\n                    Writing::KeepAlive\n                } else {\n                    Writing::Closed\n                }\n            }\n            _ => unreachable!(\"write_body invalid state: {:?}\", self.state.writing),\n        };\n\n        self.state.writing = state;\n    }\n\n    pub(super) fn end_body(&mut self) -> Result<()> {\n        debug_assert!(self.can_write_body());\n\n        let encoder = match self.state.writing {\n            Writing::Body(ref mut enc) => enc,\n            _ => return Ok(()),\n        };\n\n        // end of stream, that means we should try to eof\n        match encoder.end() {\n            Ok(end) => {\n                if let Some(end) = end {\n                    self.io.buffer(end);\n                }\n\n                self.state.writing = if encoder.is_last() || encoder.is_close_delimited() {\n                    Writing::Closed\n                } else {\n                    Writing::KeepAlive\n                };\n\n                Ok(())\n            }\n            Err(not_eof) => {\n                self.state.writing = Writing::Closed;\n                Err(Error::new_body_write_aborted().with(not_eof))\n            }\n        }\n    }\n\n    // When we get a parse error, depending on what side we are, we might be able\n    // to write a response before closing the connection.\n    //\n    // - Client: there is nothing we can do\n    // - Server: if Response hasn't been written yet, we can send a 4xx response\n    fn on_parse_error(&mut self, err: Error) -> Result<()> {\n        if let Writing::Init = self.state.writing {\n            if self.has_h2_prefix() {\n                return Err(Error::new_version_h2());\n            }\n            if let Some(msg) = T::on_error(&err) {\n                // Drop the cached headers so as to not trigger a debug\n                // assert in `write_head`...\n                self.state.cached_headers.take();\n                self.write_head(msg, None);\n                self.state.error = Some(err);\n                return Ok(());\n            }\n        }\n\n        // fallback is pass the error back up\n        Err(err)\n    }\n\n    pub(super) fn poll_flush(&mut self, cx: &mut Context<'_>) -> Poll<io::Result<()>> {\n        ready!(Pin::new(&mut self.io).poll_flush(cx))?;\n        self.try_keep_alive(cx);\n        trace!(\"flushed({}): {:?}\", T::LOG, self.state);\n        Poll::Ready(Ok(()))\n    }\n\n    pub(super) fn poll_shutdown(&mut self, cx: &mut Context<'_>) -> Poll<io::Result<()>> {\n        match ready!(Pin::new(self.io.io_mut()).poll_shutdown(cx)) {\n            Ok(()) => {\n                trace!(\"shut down IO complete\");\n                Poll::Ready(Ok(()))\n            }\n            Err(e) => {\n                debug!(\"error shutting down IO: {}\", e);\n                Poll::Ready(Err(e))\n            }\n        }\n    }\n\n    /// If the read side can be cheaply drained, do so. Otherwise, close.\n    pub(super) fn poll_drain_or_close_read(&mut self, cx: &mut Context<'_>) {\n        if let Reading::Continue(ref decoder) = self.state.reading {\n            // skip sending the 100-continue\n            // just move forward to a read, in case a tiny body was included\n            self.state.reading = Reading::Body(decoder.clone());\n        }\n\n        let _ = self.poll_read_body(cx);\n\n        // If still in Reading::Body, just give up\n        match self.state.reading {\n            Reading::Init | Reading::KeepAlive => {\n                trace!(\"body drained\")\n            }\n            _ => self.close_read(),\n        }\n    }\n\n    #[inline]\n    pub(super) fn close_read(&mut self) {\n        self.state.close_read();\n    }\n\n    #[inline]\n    pub(super) fn close_write(&mut self) {\n        self.state.close_write();\n    }\n\n    pub(super) fn take_error(&mut self) -> Result<()> {\n        if let Some(err) = self.state.error.take() {\n            Err(err)\n        } else {\n            Ok(())\n        }\n    }\n\n    #[inline]\n    pub(super) fn on_upgrade(&mut self) -> upgrade::OnUpgrade {\n        trace!(\"{}: prepare possible HTTP upgrade\", T::LOG);\n        self.state.prepare_upgrade()\n    }\n}\n\nimpl<I, B: Buf, T> fmt::Debug for Conn<I, B, T> {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        f.debug_struct(\"Conn\")\n            .field(\"state\", &self.state)\n            .field(\"io\", &self.io)\n            .finish()\n    }\n}\n\n// B and T are never pinned\nimpl<I: Unpin, B, T> Unpin for Conn<I, B, T> {}\n\nstruct State {\n    allow_half_close: bool,\n    /// Re-usable HeaderMap to reduce allocating new ones.\n    cached_headers: Option<HeaderMap>,\n    /// If an error occurs when there wasn't a direct way to return it\n    /// back to the user, this is set.\n    error: Option<Error>,\n    /// Current keep-alive status.\n    keep_alive: KA,\n    /// If mid-message, the HTTP Method that started it.\n    ///\n    /// This is used to know things such as if the message can include\n    /// a body or not.\n    method: Option<Method>,\n    h1_parser_config: ParserConfig,\n    h1_max_headers: Option<usize>,\n    h09_responses: bool,\n    /// Set to true when the Dispatcher should poll read operations\n    /// again. See the `maybe_notify` method for more.\n    notify_read: bool,\n    /// State of allowed reads\n    reading: Reading,\n    /// State of allowed writes\n    writing: Writing,\n    /// An expected pending HTTP upgrade.\n    upgrade: Option<upgrade::Pending>,\n    /// Either HTTP/1.0 or 1.1 connection\n    version: Version,\n    /// Flag to track if trailer fields are allowed to be sent\n    allow_trailer_fields: bool,\n}\n\n#[derive(Debug)]\nenum Reading {\n    Init,\n    Continue(Decoder),\n    Body(Decoder),\n    KeepAlive,\n    Closed,\n}\n\nenum Writing {\n    Init,\n    Body(Encoder),\n    KeepAlive,\n    Closed,\n}\n\nimpl fmt::Debug for State {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        let mut builder = f.debug_struct(\"State\");\n        builder\n            .field(\"reading\", &self.reading)\n            .field(\"writing\", &self.writing)\n            .field(\"keep_alive\", &self.keep_alive);\n\n        // Only show error field if it's interesting...\n        if let Some(ref error) = self.error {\n            builder.field(\"error\", error);\n        }\n\n        if self.allow_half_close {\n            builder.field(\"allow_half_close\", &true);\n        }\n\n        // Purposefully leaving off other fields...\n        builder.finish()\n    }\n}\n\nimpl fmt::Debug for Writing {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        match *self {\n            Writing::Init => f.write_str(\"Init\"),\n            Writing::Body(ref enc) => f.debug_tuple(\"Body\").field(enc).finish(),\n            Writing::KeepAlive => f.write_str(\"KeepAlive\"),\n            Writing::Closed => f.write_str(\"Closed\"),\n        }\n    }\n}\n\nimpl std::ops::BitAndAssign<bool> for KA {\n    fn bitand_assign(&mut self, enabled: bool) {\n        if !enabled {\n            trace!(\"remote disabling keep-alive\");\n            *self = KA::Disabled;\n        }\n    }\n}\n\n#[derive(Clone, Copy, Debug, Default)]\nenum KA {\n    Idle,\n    #[default]\n    Busy,\n    Disabled,\n}\n\nimpl KA {\n    #[inline]\n    fn idle(&mut self) {\n        *self = KA::Idle;\n    }\n\n    #[inline]\n    fn busy(&mut self) {\n        *self = KA::Busy;\n    }\n\n    #[inline]\n    fn disable(&mut self) {\n        *self = KA::Disabled;\n    }\n\n    #[inline]\n    fn status(&self) -> KA {\n        *self\n    }\n}\n\nimpl State {\n    fn close(&mut self) {\n        trace!(\"State::close()\");\n        self.reading = Reading::Closed;\n        self.writing = Writing::Closed;\n        self.keep_alive.disable();\n    }\n\n    fn close_read(&mut self) {\n        trace!(\"State::close_read()\");\n        self.reading = Reading::Closed;\n        self.keep_alive.disable();\n    }\n\n    fn close_write(&mut self) {\n        trace!(\"State::close_write()\");\n        self.writing = Writing::Closed;\n        self.keep_alive.disable();\n    }\n\n    #[inline]\n    fn wants_keep_alive(&self) -> bool {\n        !matches!(self.keep_alive.status(), KA::Disabled)\n    }\n\n    fn try_keep_alive<T: Http1Transaction>(&mut self) {\n        match (&self.reading, &self.writing) {\n            (&Reading::KeepAlive, &Writing::KeepAlive) => {\n                if let KA::Busy = self.keep_alive.status() {\n                    self.idle::<T>();\n                } else {\n                    trace!(\n                        \"try_keep_alive({}): could keep-alive, but status = {:?}\",\n                        T::LOG,\n                        self.keep_alive\n                    );\n                    self.close();\n                }\n            }\n            (&Reading::Closed, &Writing::KeepAlive) | (&Reading::KeepAlive, &Writing::Closed) => {\n                self.close()\n            }\n            _ => (),\n        }\n    }\n\n    #[inline]\n    fn disable_keep_alive(&mut self) {\n        self.keep_alive.disable()\n    }\n\n    fn busy(&mut self) {\n        if let KA::Disabled = self.keep_alive.status() {\n            return;\n        }\n        self.keep_alive.busy();\n    }\n\n    fn idle<T: Http1Transaction>(&mut self) {\n        debug_assert!(!self.is_idle(), \"State::idle() called while idle\");\n\n        self.method = None;\n        self.keep_alive.idle();\n\n        if !self.is_idle() {\n            self.close();\n            return;\n        }\n\n        self.reading = Reading::Init;\n        self.writing = Writing::Init;\n\n        // If Client connection has just gone idle, the Dispatcher\n        // should try the poll loop one more time, so as to poll the\n        // pending requests stream.\n        self.notify_read = true;\n    }\n\n    #[inline]\n    fn is_idle(&self) -> bool {\n        matches!(self.keep_alive.status(), KA::Idle)\n    }\n\n    #[inline]\n    fn is_read_closed(&self) -> bool {\n        matches!(self.reading, Reading::Closed)\n    }\n\n    #[inline]\n    fn is_write_closed(&self) -> bool {\n        matches!(self.writing, Writing::Closed)\n    }\n\n    #[inline]\n    fn prepare_upgrade(&mut self) -> upgrade::OnUpgrade {\n        let (tx, rx) = upgrade::pending();\n        self.upgrade = Some(tx);\n        rx\n    }\n}\n"
  },
  {
    "path": "src/client/core/proto/http1/decode.rs",
    "content": "use std::{\n    error::Error as StdError,\n    fmt, io,\n    task::{Context, Poll, ready},\n};\n\nuse bytes::{BufMut, Bytes, BytesMut};\nuse http::{HeaderMap, HeaderName, HeaderValue};\nuse http_body::Frame;\n\nuse self::Kind::{Chunked, Eof, Length};\nuse super::{DecodedLength, io::MemRead, role::DEFAULT_MAX_HEADERS};\n\n/// Maximum amount of bytes allowed in chunked extensions.\n///\n/// This limit is currentlty applied for the entire body, not per chunk.\nconst CHUNKED_EXTENSIONS_LIMIT: u64 = 1024 * 16;\n\n/// Maximum number of bytes allowed for all trailer fields.\n///\n/// TODO: remove this when we land h1_max_header_size support\nconst TRAILER_LIMIT: usize = 1024 * 16;\n\n/// Decoders to handle different Transfer-Encodings.\n///\n/// If a message body does not include a Transfer-Encoding, it *should*\n/// include a Content-Length header.\n#[derive(Clone, PartialEq)]\npub(crate) struct Decoder {\n    kind: Kind,\n}\n\n#[derive(Debug, Clone, PartialEq)]\nenum Kind {\n    /// A Reader used when a Content-Length header is passed with a positive integer.\n    Length(u64),\n    /// A Reader used when Transfer-Encoding is `chunked`.\n    Chunked {\n        state: ChunkedState,\n        chunk_len: u64,\n        extensions_cnt: u64,\n        trailers_buf: Option<BytesMut>,\n        trailers_cnt: usize,\n        h1_max_headers: Option<usize>,\n        h1_max_header_size: Option<usize>,\n    },\n    /// A Reader used for responses that don't indicate a length or chunked.\n    ///\n    /// The bool tracks when EOF is seen on the transport.\n    ///\n    /// Note: This should only used for `Response`s. It is illegal for a\n    /// `Request` to be made with both `Content-Length` and\n    /// `Transfer-Encoding: chunked` missing, as explained from the spec:\n    ///\n    /// > If a Transfer-Encoding header field is present in a response and\n    /// > the chunked transfer coding is not the final encoding, the\n    /// > message body length is determined by reading the connection until\n    /// > it is closed by the server.  If a Transfer-Encoding header field\n    /// > is present in a request and the chunked transfer coding is not\n    /// > the final encoding, the message body length cannot be determined\n    /// > reliably; the server MUST respond with the 400 (Bad Request)\n    /// > status code and then close the connection.\n    Eof(bool),\n}\n\n#[derive(Debug, PartialEq, Clone, Copy)]\nenum ChunkedState {\n    Start,\n    Size,\n    SizeLws,\n    Extension,\n    SizeLf,\n    Body,\n    BodyCr,\n    BodyLf,\n    Trailer,\n    TrailerLf,\n    EndCr,\n    EndLf,\n    End,\n}\n\nstruct StepArgs<'a> {\n    chunk_size: &'a mut u64,\n    chunk_buf: &'a mut Option<Bytes>,\n    extensions_cnt: &'a mut u64,\n    trailers_buf: &'a mut Option<BytesMut>,\n    trailers_cnt: &'a mut usize,\n    max_headers_cnt: usize,\n    max_headers_bytes: usize,\n}\n\n// ===== impl Decoder =====\n\nimpl Decoder {\n    #[inline]\n    fn length(x: u64) -> Decoder {\n        Decoder {\n            kind: Kind::Length(x),\n        }\n    }\n\n    #[inline]\n    fn eof() -> Decoder {\n        Decoder {\n            kind: Kind::Eof(false),\n        }\n    }\n\n    #[inline]\n    fn chunked(h1_max_headers: Option<usize>, h1_max_header_size: Option<usize>) -> Decoder {\n        Decoder {\n            kind: Kind::Chunked {\n                state: ChunkedState::new(),\n                chunk_len: 0,\n                extensions_cnt: 0,\n                trailers_buf: None,\n                trailers_cnt: 0,\n                h1_max_headers,\n                h1_max_header_size,\n            },\n        }\n    }\n\n    pub(super) fn new(\n        len: DecodedLength,\n        h1_max_headers: Option<usize>,\n        h1_max_header_size: Option<usize>,\n    ) -> Self {\n        match len {\n            DecodedLength::CHUNKED => Decoder::chunked(h1_max_headers, h1_max_header_size),\n            DecodedLength::CLOSE_DELIMITED => Decoder::eof(),\n            length => Decoder::length(length.danger_len()),\n        }\n    }\n\n    #[inline]\n    pub(crate) fn is_eof(&self) -> bool {\n        matches!(\n            self.kind,\n            Length(0)\n                | Chunked {\n                    state: ChunkedState::End,\n                    ..\n                }\n                | Eof(true)\n        )\n    }\n\n    pub(crate) fn decode<R: MemRead>(\n        &mut self,\n        cx: &mut Context<'_>,\n        body: &mut R,\n    ) -> Poll<Result<Frame<Bytes>, io::Error>> {\n        trace!(\"decode; state={:?}\", self.kind);\n        match self.kind {\n            Length(ref mut remaining) => {\n                if *remaining == 0 {\n                    Poll::Ready(Ok(Frame::data(Bytes::new())))\n                } else {\n                    let to_read = *remaining as usize;\n                    let buf = ready!(body.read_mem(cx, to_read))?;\n                    let num = buf.as_ref().len() as u64;\n                    if num > *remaining {\n                        *remaining = 0;\n                    } else if num == 0 {\n                        return Poll::Ready(Err(io::Error::new(\n                            io::ErrorKind::UnexpectedEof,\n                            IncompleteBody,\n                        )));\n                    } else {\n                        *remaining -= num;\n                    }\n                    Poll::Ready(Ok(Frame::data(buf)))\n                }\n            }\n            Chunked {\n                ref mut state,\n                ref mut chunk_len,\n                ref mut extensions_cnt,\n                ref mut trailers_buf,\n                ref mut trailers_cnt,\n                ref h1_max_headers,\n                ref h1_max_header_size,\n            } => {\n                let h1_max_headers = h1_max_headers.unwrap_or(DEFAULT_MAX_HEADERS);\n                let h1_max_header_size = h1_max_header_size.unwrap_or(TRAILER_LIMIT);\n                loop {\n                    let mut buf = None;\n                    // advances the chunked state\n                    *state = ready!(state.step(\n                        cx,\n                        body,\n                        StepArgs {\n                            chunk_size: chunk_len,\n                            extensions_cnt,\n                            chunk_buf: &mut buf,\n                            trailers_buf,\n                            trailers_cnt,\n                            max_headers_cnt: h1_max_headers,\n                            max_headers_bytes: h1_max_header_size,\n                        }\n                    ))?;\n                    if *state == ChunkedState::End {\n                        trace!(\"end of chunked\");\n\n                        if trailers_buf.is_some() {\n                            trace!(\"found possible trailers\");\n\n                            // decoder enforces that trailers count will not exceed h1_max_headers\n                            if *trailers_cnt >= h1_max_headers {\n                                return Poll::Ready(Err(io::Error::new(\n                                    io::ErrorKind::InvalidData,\n                                    \"chunk trailers count overflow\",\n                                )));\n                            }\n                            match decode_trailers(\n                                &mut trailers_buf.take().expect(\"Trailer is None\"),\n                                *trailers_cnt,\n                            ) {\n                                Ok(headers) => {\n                                    return Poll::Ready(Ok(Frame::trailers(headers)));\n                                }\n                                Err(e) => {\n                                    return Poll::Ready(Err(e));\n                                }\n                            }\n                        }\n\n                        return Poll::Ready(Ok(Frame::data(Bytes::new())));\n                    }\n                    if let Some(buf) = buf {\n                        return Poll::Ready(Ok(Frame::data(buf)));\n                    }\n                }\n            }\n            Eof(ref mut is_eof) => {\n                if *is_eof {\n                    Poll::Ready(Ok(Frame::data(Bytes::new())))\n                } else {\n                    // 8192 chosen because its about 2 packets, there probably\n                    // won't be that much available, so don't have MemReaders\n                    // allocate buffers to big\n                    body.read_mem(cx, 8192).map_ok(|slice| {\n                        *is_eof = slice.is_empty();\n                        Frame::data(slice)\n                    })\n                }\n            }\n        }\n    }\n\n    #[cfg(test)]\n    async fn decode_fut<R: MemRead>(&mut self, body: &mut R) -> Result<Frame<Bytes>, io::Error> {\n        std::future::poll_fn(move |cx| self.decode(cx, body)).await\n    }\n}\n\nimpl fmt::Debug for Decoder {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        fmt::Debug::fmt(&self.kind, f)\n    }\n}\n\nmacro_rules! byte (\n    ($rdr:ident, $cx:expr) => ({\n        let buf = ready!($rdr.read_mem($cx, 1))?;\n        if !buf.is_empty() {\n            buf[0]\n        } else {\n            return Poll::Ready(Err(io::Error::new(io::ErrorKind::UnexpectedEof,\n                                      \"unexpected EOF during chunk size line\")));\n        }\n    })\n);\n\nmacro_rules! or_overflow {\n    ($e:expr) => (\n        match $e {\n            Some(val) => val,\n            None => return Poll::Ready(Err(io::Error::new(\n                io::ErrorKind::InvalidData,\n                \"invalid chunk size: overflow\",\n            ))),\n        }\n    )\n}\n\nmacro_rules! put_u8 {\n    ($trailers_buf:expr, $byte:expr, $limit:expr) => {\n        $trailers_buf.put_u8($byte);\n\n        if $trailers_buf.len() >= $limit {\n            return Poll::Ready(Err(io::Error::new(\n                io::ErrorKind::InvalidData,\n                \"chunk trailers bytes over limit\",\n            )));\n        }\n    };\n}\n\n// ===== impl ChunkedState =====\n\nimpl ChunkedState {\n    fn new() -> ChunkedState {\n        ChunkedState::Start\n    }\n\n    #[allow(clippy::too_many_arguments)]\n    fn step<R: MemRead>(\n        &self,\n        cx: &mut Context<'_>,\n        body: &mut R,\n        step: StepArgs<'_>,\n    ) -> Poll<Result<ChunkedState, io::Error>> {\n        use self::ChunkedState::*;\n        match *self {\n            Start => ChunkedState::read_start(cx, body, step.chunk_size),\n            Size => ChunkedState::read_size(cx, body, step.chunk_size),\n            SizeLws => ChunkedState::read_size_lws(cx, body),\n            Extension => ChunkedState::read_extension(cx, body, step.extensions_cnt),\n            SizeLf => ChunkedState::read_size_lf(cx, body, *step.chunk_size),\n            Body => ChunkedState::read_body(cx, body, step.chunk_size, step.chunk_buf),\n            BodyCr => ChunkedState::read_body_cr(cx, body),\n            BodyLf => ChunkedState::read_body_lf(cx, body),\n            Trailer => {\n                ChunkedState::read_trailer(cx, body, step.trailers_buf, step.max_headers_bytes)\n            }\n            TrailerLf => ChunkedState::read_trailer_lf(\n                cx,\n                body,\n                step.trailers_buf,\n                step.trailers_cnt,\n                step.max_headers_cnt,\n                step.max_headers_bytes,\n            ),\n            EndCr => ChunkedState::read_end_cr(cx, body, step.trailers_buf, step.max_headers_bytes),\n            EndLf => ChunkedState::read_end_lf(cx, body, step.trailers_buf, step.max_headers_bytes),\n            End => Poll::Ready(Ok(ChunkedState::End)),\n        }\n    }\n\n    fn read_start<R: MemRead>(\n        cx: &mut Context<'_>,\n        rdr: &mut R,\n        size: &mut u64,\n    ) -> Poll<Result<ChunkedState, io::Error>> {\n        trace!(\"Read chunk start\");\n\n        let radix = 16;\n        match byte!(rdr, cx) {\n            b @ b'0'..=b'9' => {\n                *size = or_overflow!(size.checked_mul(radix));\n                *size = or_overflow!(size.checked_add((b - b'0') as u64));\n            }\n            b @ b'a'..=b'f' => {\n                *size = or_overflow!(size.checked_mul(radix));\n                *size = or_overflow!(size.checked_add((b + 10 - b'a') as u64));\n            }\n            b @ b'A'..=b'F' => {\n                *size = or_overflow!(size.checked_mul(radix));\n                *size = or_overflow!(size.checked_add((b + 10 - b'A') as u64));\n            }\n            _ => {\n                return Poll::Ready(Err(io::Error::new(\n                    io::ErrorKind::InvalidInput,\n                    \"Invalid chunk size line: missing size digit\",\n                )));\n            }\n        }\n\n        Poll::Ready(Ok(ChunkedState::Size))\n    }\n\n    fn read_size<R: MemRead>(\n        cx: &mut Context<'_>,\n        rdr: &mut R,\n        size: &mut u64,\n    ) -> Poll<Result<ChunkedState, io::Error>> {\n        trace!(\"Read chunk hex size\");\n\n        let radix = 16;\n        match byte!(rdr, cx) {\n            b @ b'0'..=b'9' => {\n                *size = or_overflow!(size.checked_mul(radix));\n                *size = or_overflow!(size.checked_add((b - b'0') as u64));\n            }\n            b @ b'a'..=b'f' => {\n                *size = or_overflow!(size.checked_mul(radix));\n                *size = or_overflow!(size.checked_add((b + 10 - b'a') as u64));\n            }\n            b @ b'A'..=b'F' => {\n                *size = or_overflow!(size.checked_mul(radix));\n                *size = or_overflow!(size.checked_add((b + 10 - b'A') as u64));\n            }\n            b'\\t' | b' ' => return Poll::Ready(Ok(ChunkedState::SizeLws)),\n            b';' => return Poll::Ready(Ok(ChunkedState::Extension)),\n            b'\\r' => return Poll::Ready(Ok(ChunkedState::SizeLf)),\n            _ => {\n                return Poll::Ready(Err(io::Error::new(\n                    io::ErrorKind::InvalidInput,\n                    \"Invalid chunk size line: Invalid Size\",\n                )));\n            }\n        }\n        Poll::Ready(Ok(ChunkedState::Size))\n    }\n    fn read_size_lws<R: MemRead>(\n        cx: &mut Context<'_>,\n        rdr: &mut R,\n    ) -> Poll<Result<ChunkedState, io::Error>> {\n        trace!(\"read_size_lws\");\n        match byte!(rdr, cx) {\n            // LWS can follow the chunk size, but no more digits can come\n            b'\\t' | b' ' => Poll::Ready(Ok(ChunkedState::SizeLws)),\n            b';' => Poll::Ready(Ok(ChunkedState::Extension)),\n            b'\\r' => Poll::Ready(Ok(ChunkedState::SizeLf)),\n            _ => Poll::Ready(Err(io::Error::new(\n                io::ErrorKind::InvalidInput,\n                \"Invalid chunk size linear white space\",\n            ))),\n        }\n    }\n    fn read_extension<R: MemRead>(\n        cx: &mut Context<'_>,\n        rdr: &mut R,\n        extensions_cnt: &mut u64,\n    ) -> Poll<Result<ChunkedState, io::Error>> {\n        trace!(\"read_extension\");\n        // We don't care about extensions really at all. Just ignore them.\n        // They \"end\" at the next CRLF.\n        //\n        // However, some implementations may not check for the CR, so to save\n        // them from themselves, we reject extensions containing plain LF as\n        // well.\n        match byte!(rdr, cx) {\n            b'\\r' => Poll::Ready(Ok(ChunkedState::SizeLf)),\n            b'\\n' => Poll::Ready(Err(io::Error::new(\n                io::ErrorKind::InvalidData,\n                \"invalid chunk extension contains newline\",\n            ))),\n            _ => {\n                *extensions_cnt += 1;\n                if *extensions_cnt >= CHUNKED_EXTENSIONS_LIMIT {\n                    Poll::Ready(Err(io::Error::new(\n                        io::ErrorKind::InvalidData,\n                        \"chunk extensions over limit\",\n                    )))\n                } else {\n                    Poll::Ready(Ok(ChunkedState::Extension))\n                }\n            } // no supported extensions\n        }\n    }\n    fn read_size_lf<R: MemRead>(\n        cx: &mut Context<'_>,\n        rdr: &mut R,\n        size: u64,\n    ) -> Poll<Result<ChunkedState, io::Error>> {\n        trace!(\"Chunk size is {:?}\", size);\n        match byte!(rdr, cx) {\n            b'\\n' => {\n                if size == 0 {\n                    Poll::Ready(Ok(ChunkedState::EndCr))\n                } else {\n                    debug!(\"incoming chunked header: {0:#X} ({0} bytes)\", size);\n                    Poll::Ready(Ok(ChunkedState::Body))\n                }\n            }\n            _ => Poll::Ready(Err(io::Error::new(\n                io::ErrorKind::InvalidInput,\n                \"Invalid chunk size LF\",\n            ))),\n        }\n    }\n\n    fn read_body<R: MemRead>(\n        cx: &mut Context<'_>,\n        rdr: &mut R,\n        rem: &mut u64,\n        buf: &mut Option<Bytes>,\n    ) -> Poll<Result<ChunkedState, io::Error>> {\n        trace!(\"Chunked read, remaining={:?}\", rem);\n\n        // cap remaining bytes at the max capacity of usize\n        let rem_cap = match *rem {\n            r if r > usize::MAX as u64 => usize::MAX,\n            r => r as usize,\n        };\n\n        let to_read = rem_cap;\n        let slice = ready!(rdr.read_mem(cx, to_read))?;\n        let count = slice.len();\n\n        if count == 0 {\n            *rem = 0;\n            return Poll::Ready(Err(io::Error::new(\n                io::ErrorKind::UnexpectedEof,\n                IncompleteBody,\n            )));\n        }\n        *buf = Some(slice);\n        *rem -= count as u64;\n\n        if *rem > 0 {\n            Poll::Ready(Ok(ChunkedState::Body))\n        } else {\n            Poll::Ready(Ok(ChunkedState::BodyCr))\n        }\n    }\n    fn read_body_cr<R: MemRead>(\n        cx: &mut Context<'_>,\n        rdr: &mut R,\n    ) -> Poll<Result<ChunkedState, io::Error>> {\n        match byte!(rdr, cx) {\n            b'\\r' => Poll::Ready(Ok(ChunkedState::BodyLf)),\n            _ => Poll::Ready(Err(io::Error::new(\n                io::ErrorKind::InvalidInput,\n                \"Invalid chunk body CR\",\n            ))),\n        }\n    }\n    fn read_body_lf<R: MemRead>(\n        cx: &mut Context<'_>,\n        rdr: &mut R,\n    ) -> Poll<Result<ChunkedState, io::Error>> {\n        match byte!(rdr, cx) {\n            b'\\n' => Poll::Ready(Ok(ChunkedState::Start)),\n            _ => Poll::Ready(Err(io::Error::new(\n                io::ErrorKind::InvalidInput,\n                \"Invalid chunk body LF\",\n            ))),\n        }\n    }\n\n    fn read_trailer<R: MemRead>(\n        cx: &mut Context<'_>,\n        rdr: &mut R,\n        trailers_buf: &mut Option<BytesMut>,\n        h1_max_header_size: usize,\n    ) -> Poll<Result<ChunkedState, io::Error>> {\n        trace!(\"read_trailer\");\n        let byte = byte!(rdr, cx);\n\n        put_u8!(\n            trailers_buf.as_mut().expect(\"trailers_buf is None\"),\n            byte,\n            h1_max_header_size\n        );\n\n        match byte {\n            b'\\r' => Poll::Ready(Ok(ChunkedState::TrailerLf)),\n            _ => Poll::Ready(Ok(ChunkedState::Trailer)),\n        }\n    }\n\n    fn read_trailer_lf<R: MemRead>(\n        cx: &mut Context<'_>,\n        rdr: &mut R,\n        trailers_buf: &mut Option<BytesMut>,\n        trailers_cnt: &mut usize,\n        h1_max_headers: usize,\n        h1_max_header_size: usize,\n    ) -> Poll<Result<ChunkedState, io::Error>> {\n        let byte = byte!(rdr, cx);\n        match byte {\n            b'\\n' => {\n                if *trailers_cnt >= h1_max_headers {\n                    return Poll::Ready(Err(io::Error::new(\n                        io::ErrorKind::InvalidData,\n                        \"chunk trailers count overflow\",\n                    )));\n                }\n                *trailers_cnt += 1;\n\n                put_u8!(\n                    trailers_buf.as_mut().expect(\"trailers_buf is None\"),\n                    byte,\n                    h1_max_header_size\n                );\n\n                Poll::Ready(Ok(ChunkedState::EndCr))\n            }\n            _ => Poll::Ready(Err(io::Error::new(\n                io::ErrorKind::InvalidInput,\n                \"Invalid trailer end LF\",\n            ))),\n        }\n    }\n\n    fn read_end_cr<R: MemRead>(\n        cx: &mut Context<'_>,\n        rdr: &mut R,\n        trailers_buf: &mut Option<BytesMut>,\n        h1_max_header_size: usize,\n    ) -> Poll<Result<ChunkedState, io::Error>> {\n        let byte = byte!(rdr, cx);\n        match byte {\n            b'\\r' => {\n                if let Some(trailers_buf) = trailers_buf {\n                    put_u8!(trailers_buf, byte, h1_max_header_size);\n                }\n                Poll::Ready(Ok(ChunkedState::EndLf))\n            }\n            byte => {\n                match trailers_buf {\n                    None => {\n                        // 64 will fit a single Expires header without reallocating\n                        let mut buf = BytesMut::with_capacity(64);\n                        buf.put_u8(byte);\n                        *trailers_buf = Some(buf);\n                    }\n                    Some(trailers_buf) => {\n                        put_u8!(trailers_buf, byte, h1_max_header_size);\n                    }\n                }\n\n                Poll::Ready(Ok(ChunkedState::Trailer))\n            }\n        }\n    }\n    fn read_end_lf<R: MemRead>(\n        cx: &mut Context<'_>,\n        rdr: &mut R,\n        trailers_buf: &mut Option<BytesMut>,\n        h1_max_header_size: usize,\n    ) -> Poll<Result<ChunkedState, io::Error>> {\n        let byte = byte!(rdr, cx);\n        match byte {\n            b'\\n' => {\n                if let Some(trailers_buf) = trailers_buf {\n                    put_u8!(trailers_buf, byte, h1_max_header_size);\n                }\n                Poll::Ready(Ok(ChunkedState::End))\n            }\n            _ => Poll::Ready(Err(io::Error::new(\n                io::ErrorKind::InvalidInput,\n                \"Invalid chunk end LF\",\n            ))),\n        }\n    }\n}\n\n// TODO: disallow Transfer-Encoding, Content-Length, Trailer, etc in trailers ??\nfn decode_trailers(buf: &mut BytesMut, count: usize) -> Result<HeaderMap, io::Error> {\n    let mut trailers = HeaderMap::new();\n    let mut headers = vec![httparse::EMPTY_HEADER; count];\n    let res = httparse::parse_headers(buf, &mut headers);\n    match res {\n        Ok(httparse::Status::Complete((_, headers))) => {\n            for header in headers.iter() {\n                use std::convert::TryFrom;\n                let name = match HeaderName::try_from(header.name) {\n                    Ok(name) => name,\n                    Err(_) => {\n                        return Err(io::Error::new(\n                            io::ErrorKind::InvalidInput,\n                            format!(\"Invalid header name: {:?}\", &header),\n                        ));\n                    }\n                };\n\n                let value = match HeaderValue::from_bytes(header.value) {\n                    Ok(value) => value,\n                    Err(_) => {\n                        return Err(io::Error::new(\n                            io::ErrorKind::InvalidInput,\n                            format!(\"Invalid header value: {:?}\", &header),\n                        ));\n                    }\n                };\n\n                trailers.insert(name, value);\n            }\n\n            Ok(trailers)\n        }\n        Ok(httparse::Status::Partial) => Err(io::Error::new(\n            io::ErrorKind::InvalidInput,\n            \"Partial header\",\n        )),\n        Err(e) => Err(io::Error::new(io::ErrorKind::InvalidInput, e)),\n    }\n}\n\n#[derive(Debug)]\nstruct IncompleteBody;\n\n// === impl IncompleteBody ===\n\nimpl fmt::Display for IncompleteBody {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        write!(f, \"end of file before message length reached\")\n    }\n}\n\nimpl StdError for IncompleteBody {}\n\n#[cfg(test)]\nmod tests {\n    use std::{pin::Pin, time::Duration};\n\n    use tokio::io::{AsyncRead, ReadBuf};\n\n    use super::*;\n\n    impl MemRead for &[u8] {\n        fn read_mem(&mut self, _: &mut Context<'_>, len: usize) -> Poll<io::Result<Bytes>> {\n            let n = std::cmp::min(len, self.len());\n            if n > 0 {\n                let (a, b) = self.split_at(n);\n                let buf = Bytes::copy_from_slice(a);\n                *self = b;\n                Poll::Ready(Ok(buf))\n            } else {\n                Poll::Ready(Ok(Bytes::new()))\n            }\n        }\n    }\n\n    impl MemRead for &mut (dyn AsyncRead + Unpin) {\n        fn read_mem(&mut self, cx: &mut Context<'_>, len: usize) -> Poll<io::Result<Bytes>> {\n            let mut v = vec![0; len];\n            let mut buf = ReadBuf::new(&mut v);\n            ready!(Pin::new(self).poll_read(cx, &mut buf)?);\n            Poll::Ready(Ok(Bytes::copy_from_slice(buf.filled())))\n        }\n    }\n\n    impl MemRead for Bytes {\n        fn read_mem(&mut self, _: &mut Context<'_>, len: usize) -> Poll<io::Result<Bytes>> {\n            let n = std::cmp::min(len, self.len());\n            let ret = self.split_to(n);\n            Poll::Ready(Ok(ret))\n        }\n    }\n\n    #[tokio::test]\n    async fn test_read_chunk_size() {\n        use std::io::ErrorKind::{InvalidData, InvalidInput, UnexpectedEof};\n\n        async fn read(s: &str) -> u64 {\n            let mut state = ChunkedState::new();\n            let rdr = &mut s.as_bytes();\n            let mut size = 0;\n            let mut ext_cnt = 0;\n            let mut trailers_cnt = 0;\n            loop {\n                let result = std::future::poll_fn(|cx| {\n                    state.step(\n                        cx,\n                        rdr,\n                        StepArgs {\n                            chunk_size: &mut size,\n                            extensions_cnt: &mut ext_cnt,\n                            chunk_buf: &mut None,\n                            trailers_buf: &mut None,\n                            trailers_cnt: &mut trailers_cnt,\n                            max_headers_cnt: DEFAULT_MAX_HEADERS,\n                            max_headers_bytes: TRAILER_LIMIT,\n                        },\n                    )\n                })\n                .await;\n                let desc = format!(\"read_size failed for {s:?}\");\n                state = result.expect(&desc);\n                if state == ChunkedState::Body || state == ChunkedState::EndCr {\n                    break;\n                }\n            }\n            size\n        }\n\n        async fn read_err(s: &str, expected_err: io::ErrorKind) {\n            let mut state = ChunkedState::new();\n            let rdr = &mut s.as_bytes();\n            let mut size = 0;\n            let mut ext_cnt = 0;\n            let mut trailers_cnt = 0;\n            loop {\n                let result = std::future::poll_fn(|cx| {\n                    state.step(\n                        cx,\n                        rdr,\n                        StepArgs {\n                            chunk_size: &mut size,\n                            extensions_cnt: &mut ext_cnt,\n                            chunk_buf: &mut None,\n                            trailers_buf: &mut None,\n                            trailers_cnt: &mut trailers_cnt,\n                            max_headers_cnt: DEFAULT_MAX_HEADERS,\n                            max_headers_bytes: TRAILER_LIMIT,\n                        },\n                    )\n                })\n                .await;\n                state = match result {\n                    Ok(s) => s,\n                    Err(e) => {\n                        assert!(\n                            expected_err == e.kind(),\n                            \"Reading {:?}, expected {:?}, but got {:?}\",\n                            s,\n                            expected_err,\n                            e.kind()\n                        );\n                        return;\n                    }\n                };\n                if state == ChunkedState::Body || state == ChunkedState::End {\n                    panic!(\"Was Ok. Expected Err for {s:?}\");\n                }\n            }\n        }\n\n        assert_eq!(1, read(\"1\\r\\n\").await);\n        assert_eq!(1, read(\"01\\r\\n\").await);\n        assert_eq!(0, read(\"0\\r\\n\").await);\n        assert_eq!(0, read(\"00\\r\\n\").await);\n        assert_eq!(10, read(\"A\\r\\n\").await);\n        assert_eq!(10, read(\"a\\r\\n\").await);\n        assert_eq!(255, read(\"Ff\\r\\n\").await);\n        assert_eq!(255, read(\"Ff   \\r\\n\").await);\n        // Missing LF or CRLF\n        read_err(\"F\\rF\", InvalidInput).await;\n        read_err(\"F\", UnexpectedEof).await;\n        // Missing digit\n        read_err(\"\\r\\n\\r\\n\", InvalidInput).await;\n        read_err(\"\\r\\n\", InvalidInput).await;\n        // Invalid hex digit\n        read_err(\"X\\r\\n\", InvalidInput).await;\n        read_err(\"1X\\r\\n\", InvalidInput).await;\n        read_err(\"-\\r\\n\", InvalidInput).await;\n        read_err(\"-1\\r\\n\", InvalidInput).await;\n        // Acceptable (if not fully valid) extensions do not influence the size\n        assert_eq!(1, read(\"1;extension\\r\\n\").await);\n        assert_eq!(10, read(\"a;ext name=value\\r\\n\").await);\n        assert_eq!(1, read(\"1;extension;extension2\\r\\n\").await);\n        assert_eq!(1, read(\"1;;;  ;\\r\\n\").await);\n        assert_eq!(2, read(\"2; extension...\\r\\n\").await);\n        assert_eq!(3, read(\"3   ; extension=123\\r\\n\").await);\n        assert_eq!(3, read(\"3   ;\\r\\n\").await);\n        assert_eq!(3, read(\"3   ;   \\r\\n\").await);\n        // Invalid extensions cause an error\n        read_err(\"1 invalid extension\\r\\n\", InvalidInput).await;\n        read_err(\"1 A\\r\\n\", InvalidInput).await;\n        read_err(\"1;no CRLF\", UnexpectedEof).await;\n        read_err(\"1;reject\\nnewlines\\r\\n\", InvalidData).await;\n        // Overflow\n        read_err(\"f0000000000000003\\r\\n\", InvalidData).await;\n    }\n\n    #[tokio::test]\n    async fn test_read_sized_early_eof() {\n        let mut bytes = &b\"foo bar\"[..];\n        let mut decoder = Decoder::length(10);\n        assert_eq!(\n            decoder\n                .decode_fut(&mut bytes)\n                .await\n                .unwrap()\n                .data_ref()\n                .unwrap()\n                .len(),\n            7\n        );\n        let e = decoder.decode_fut(&mut bytes).await.unwrap_err();\n        assert_eq!(e.kind(), io::ErrorKind::UnexpectedEof);\n    }\n\n    #[tokio::test]\n    async fn test_read_chunked_early_eof() {\n        let mut bytes = &b\"\\\n            9\\r\\n\\\n            foo bar\\\n        \"[..];\n        let mut decoder = Decoder::chunked(None, None);\n        assert_eq!(\n            decoder\n                .decode_fut(&mut bytes)\n                .await\n                .unwrap()\n                .data_ref()\n                .unwrap()\n                .len(),\n            7\n        );\n        let e = decoder.decode_fut(&mut bytes).await.unwrap_err();\n        assert_eq!(e.kind(), io::ErrorKind::UnexpectedEof);\n    }\n\n    #[tokio::test]\n    async fn test_read_chunked_single_read() {\n        let mut mock_buf = &b\"10\\r\\n1234567890abcdef\\r\\n0\\r\\n\"[..];\n        let buf = Decoder::chunked(None, None)\n            .decode_fut(&mut mock_buf)\n            .await\n            .expect(\"decode\")\n            .into_data()\n            .expect(\"unknown frame type\");\n        assert_eq!(16, buf.len());\n        let result = String::from_utf8(buf.as_ref().to_vec()).expect(\"decode String\");\n        assert_eq!(\"1234567890abcdef\", &result);\n    }\n\n    #[tokio::test]\n    async fn test_read_chunked_with_missing_zero_digit() {\n        // After reading a valid chunk, the ending is missing a zero.\n        let mut mock_buf = &b\"1\\r\\nZ\\r\\n\\r\\n\\r\\n\"[..];\n        let mut decoder = Decoder::chunked(None, None);\n        let buf = decoder\n            .decode_fut(&mut mock_buf)\n            .await\n            .expect(\"decode\")\n            .into_data()\n            .expect(\"unknown frame type\");\n        assert_eq!(\"Z\", buf);\n\n        let err = decoder\n            .decode_fut(&mut mock_buf)\n            .await\n            .expect_err(\"decode 2\");\n        assert_eq!(err.kind(), io::ErrorKind::InvalidInput);\n    }\n\n    #[tokio::test]\n    async fn test_read_chunked_extensions_over_limit() {\n        // construct a chunked body where each individual chunked extension\n        // is totally fine, but combined is over the limit.\n        let per_chunk = super::CHUNKED_EXTENSIONS_LIMIT * 2 / 3;\n        let mut scratch = vec![];\n        for _ in 0..2 {\n            scratch.extend(b\"1;\");\n            scratch.extend(b\"x\".repeat(per_chunk as usize));\n            scratch.extend(b\"\\r\\nA\\r\\n\");\n        }\n        scratch.extend(b\"0\\r\\n\\r\\n\");\n        let mut mock_buf = Bytes::from(scratch);\n\n        let mut decoder = Decoder::chunked(None, None);\n        let buf1 = decoder\n            .decode_fut(&mut mock_buf)\n            .await\n            .expect(\"decode1\")\n            .into_data()\n            .expect(\"unknown frame type\");\n        assert_eq!(&buf1[..], b\"A\");\n\n        let err = decoder\n            .decode_fut(&mut mock_buf)\n            .await\n            .expect_err(\"decode2\");\n        assert_eq!(err.kind(), io::ErrorKind::InvalidData);\n        assert_eq!(err.to_string(), \"chunk extensions over limit\");\n    }\n\n    #[tokio::test]\n    async fn test_read_chunked_trailer_with_missing_lf() {\n        let mut mock_buf = &b\"10\\r\\n1234567890abcdef\\r\\n0\\r\\nbad\\r\\r\\n\"[..];\n        let mut decoder = Decoder::chunked(None, None);\n        decoder.decode_fut(&mut mock_buf).await.expect(\"decode\");\n        let e = decoder.decode_fut(&mut mock_buf).await.unwrap_err();\n        assert_eq!(e.kind(), io::ErrorKind::InvalidInput);\n    }\n\n    #[tokio::test]\n    async fn test_read_chunked_after_eof() {\n        let mut mock_buf = &b\"10\\r\\n1234567890abcdef\\r\\n0\\r\\n\\r\\n\"[..];\n        let mut decoder = Decoder::chunked(None, None);\n\n        // normal read\n        let buf = decoder\n            .decode_fut(&mut mock_buf)\n            .await\n            .unwrap()\n            .into_data()\n            .expect(\"unknown frame type\");\n        assert_eq!(16, buf.len());\n        let result = String::from_utf8(buf.as_ref().to_vec()).expect(\"decode String\");\n        assert_eq!(\"1234567890abcdef\", &result);\n\n        // eof read\n        let buf = decoder\n            .decode_fut(&mut mock_buf)\n            .await\n            .expect(\"decode\")\n            .into_data()\n            .expect(\"unknown frame type\");\n        assert_eq!(0, buf.len());\n\n        // ensure read after eof also returns eof\n        let buf = decoder\n            .decode_fut(&mut mock_buf)\n            .await\n            .expect(\"decode\")\n            .into_data()\n            .expect(\"unknown frame type\");\n        assert_eq!(0, buf.len());\n    }\n\n    // perform an async read using a custom buffer size and causing a blocking\n    // read at the specified byte\n    async fn read_async(mut decoder: Decoder, content: &[u8], block_at: usize) -> String {\n        let mut outs = Vec::new();\n\n        let mut ins = if block_at == 0 {\n            tokio_test::io::Builder::new()\n                .wait(Duration::from_millis(10))\n                .read(content)\n                .build()\n        } else {\n            tokio_test::io::Builder::new()\n                .read(&content[..block_at])\n                .wait(Duration::from_millis(10))\n                .read(&content[block_at..])\n                .build()\n        };\n\n        let mut ins = &mut ins as &mut (dyn AsyncRead + Unpin);\n\n        loop {\n            let buf = decoder\n                .decode_fut(&mut ins)\n                .await\n                .expect(\"unexpected decode error\")\n                .into_data()\n                .expect(\"unexpected frame type\");\n            if buf.is_empty() {\n                break; // eof\n            }\n            outs.extend(buf.as_ref());\n        }\n\n        String::from_utf8(outs).expect(\"decode String\")\n    }\n\n    // iterate over the different ways that this async read could go.\n    // tests blocking a read at each byte along the content - The shotgun approach\n    async fn all_async_cases(content: &str, expected: &str, decoder: Decoder) {\n        let content_len = content.len();\n        for block_at in 0..content_len {\n            let actual = read_async(decoder.clone(), content.as_bytes(), block_at).await;\n            assert_eq!(expected, &actual) //, \"Failed async. Blocking at {}\", block_at);\n        }\n    }\n\n    #[tokio::test]\n    async fn test_read_length_async() {\n        let content = \"foobar\";\n        all_async_cases(content, content, Decoder::length(content.len() as u64)).await;\n    }\n\n    #[tokio::test]\n    async fn test_read_chunked_async() {\n        let content = \"3\\r\\nfoo\\r\\n3\\r\\nbar\\r\\n0\\r\\n\\r\\n\";\n        let expected = \"foobar\";\n        all_async_cases(content, expected, Decoder::chunked(None, None)).await;\n    }\n\n    #[tokio::test]\n    async fn test_read_eof_async() {\n        let content = \"foobar\";\n        all_async_cases(content, content, Decoder::eof()).await;\n    }\n\n    #[test]\n    fn test_decode_trailers() {\n        let mut buf = BytesMut::new();\n        buf.extend_from_slice(\n            b\"Expires: Wed, 21 Oct 2015 07:28:00 GMT\\r\\nX-Stream-Error: failed to decode\\r\\n\\r\\n\",\n        );\n        let headers = decode_trailers(&mut buf, 2).expect(\"decode_trailers\");\n        assert_eq!(headers.len(), 2);\n        assert_eq!(\n            headers.get(\"Expires\").unwrap(),\n            \"Wed, 21 Oct 2015 07:28:00 GMT\"\n        );\n        assert_eq!(headers.get(\"X-Stream-Error\").unwrap(), \"failed to decode\");\n    }\n\n    #[tokio::test]\n    async fn test_trailer_max_headers_enforced() {\n        let h1_max_headers = 10;\n        let mut scratch = vec![];\n        scratch.extend(b\"10\\r\\n1234567890abcdef\\r\\n0\\r\\n\");\n        for i in 0..h1_max_headers {\n            scratch.extend(format!(\"trailer{i}: {i}\\r\\n\").as_bytes());\n        }\n        scratch.extend(b\"\\r\\n\");\n        let mut mock_buf = Bytes::from(scratch);\n\n        let mut decoder = Decoder::chunked(Some(h1_max_headers), None);\n\n        // ready chunked body\n        let buf = decoder\n            .decode_fut(&mut mock_buf)\n            .await\n            .unwrap()\n            .into_data()\n            .expect(\"unknown frame type\");\n        assert_eq!(16, buf.len());\n\n        // eof read\n        let err = decoder\n            .decode_fut(&mut mock_buf)\n            .await\n            .expect_err(\"trailer fields over limit\");\n        assert_eq!(err.kind(), io::ErrorKind::InvalidData);\n    }\n\n    #[tokio::test]\n    async fn test_trailer_max_header_size_huge_trailer() {\n        let max_header_size = 1024;\n        let mut scratch = vec![];\n        scratch.extend(b\"10\\r\\n1234567890abcdef\\r\\n0\\r\\n\");\n        scratch.extend(format!(\"huge_trailer: {}\\r\\n\", \"x\".repeat(max_header_size)).as_bytes());\n        scratch.extend(b\"\\r\\n\");\n        let mut mock_buf = Bytes::from(scratch);\n\n        let mut decoder = Decoder::chunked(None, Some(max_header_size));\n\n        // ready chunked body\n        let buf = decoder\n            .decode_fut(&mut mock_buf)\n            .await\n            .unwrap()\n            .into_data()\n            .expect(\"unknown frame type\");\n        assert_eq!(16, buf.len());\n\n        // eof read\n        let err = decoder\n            .decode_fut(&mut mock_buf)\n            .await\n            .expect_err(\"trailers over limit\");\n        assert_eq!(err.kind(), io::ErrorKind::InvalidData);\n    }\n\n    #[tokio::test]\n    async fn test_trailer_max_header_size_many_small_trailers() {\n        let max_headers = 10;\n        let header_size = 64;\n        let mut scratch = vec![];\n        scratch.extend(b\"10\\r\\n1234567890abcdef\\r\\n0\\r\\n\");\n\n        for i in 0..max_headers {\n            scratch.extend(format!(\"trailer{}: {}\\r\\n\", i, \"x\".repeat(header_size)).as_bytes());\n        }\n\n        scratch.extend(b\"\\r\\n\");\n        let mut mock_buf = Bytes::from(scratch);\n\n        let mut decoder = Decoder::chunked(None, Some(max_headers * header_size));\n\n        // ready chunked body\n        let buf = decoder\n            .decode_fut(&mut mock_buf)\n            .await\n            .unwrap()\n            .into_data()\n            .expect(\"unknown frame type\");\n        assert_eq!(16, buf.len());\n\n        // eof read\n        let err = decoder\n            .decode_fut(&mut mock_buf)\n            .await\n            .expect_err(\"trailers over limit\");\n        assert_eq!(err.kind(), io::ErrorKind::InvalidData);\n    }\n}\n"
  },
  {
    "path": "src/client/core/proto/http1/dispatch.rs",
    "content": "use std::{\n    convert::Infallible,\n    future::Future,\n    marker::Unpin,\n    pin::Pin,\n    task::{Context, Poll, ready},\n};\n\nuse bytes::{Buf, Bytes};\nuse http::Request;\nuse http_body::Body;\nuse tokio::io::{AsyncRead, AsyncWrite};\n\nuse super::{BodyLength, Conn, Http1Transaction, MessageHead, Wants};\nuse crate::client::core::{\n    Error, Result,\n    body::{self, DecodedLength, Incoming},\n    dispatch::{self, TrySendError},\n    error::BoxError,\n    proto::{self, Dispatched, RequestHead},\n    upgrade::OnUpgrade,\n};\n\npub(crate) struct Dispatcher<D, Bs: Body, I, T> {\n    conn: Conn<I, Bs::Data, T>,\n    dispatch: D,\n    body_tx: SenderGuard,\n    body_rx: Pin<Box<Option<Bs>>>,\n    is_closing: bool,\n}\n\npub(crate) trait Dispatch {\n    type PollItem;\n    type PollBody;\n    type PollError;\n    type RecvItem;\n\n    #[allow(clippy::type_complexity)]\n    fn poll_msg(\n        self: Pin<&mut Self>,\n        cx: &mut Context<'_>,\n    ) -> Poll<Option<Result<(Self::PollItem, Self::PollBody), Self::PollError>>>;\n\n    fn recv_msg(&mut self, msg: Result<(Self::RecvItem, Incoming)>) -> Result<()>;\n\n    fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), ()>>;\n\n    fn should_poll(&self) -> bool;\n}\n\npin_project_lite::pin_project! {\n    pub(crate) struct Client<B> {\n        callback: Option<dispatch::Callback<Request<B>, http::Response<Incoming>>>,\n        #[pin]\n        rx: ClientRx<B>,\n        rx_closed: bool,\n    }\n}\n\ntype ClientRx<B> = dispatch::Receiver<Request<B>, http::Response<Incoming>>;\n\nimpl<D, Bs, I, T> Dispatcher<D, Bs, I, T>\nwhere\n    D: Dispatch<\n            PollItem = MessageHead<T::Outgoing>,\n            PollBody = Bs,\n            RecvItem = MessageHead<T::Incoming>,\n        > + Unpin,\n    D::PollError: Into<BoxError>,\n    I: AsyncRead + AsyncWrite + Unpin,\n    T: Http1Transaction + Unpin,\n    Bs: Body + 'static,\n    Bs::Error: Into<BoxError>,\n{\n    #[inline]\n    pub(crate) fn new(dispatch: D, conn: Conn<I, Bs::Data, T>) -> Self {\n        Dispatcher {\n            conn,\n            dispatch,\n            body_tx: SenderGuard(None),\n            body_rx: Box::pin(None),\n            is_closing: false,\n        }\n    }\n\n    #[inline]\n    pub(crate) fn into_inner(self) -> (I, Bytes, D) {\n        let (io, buf) = self.conn.into_inner();\n        (io, buf, self.dispatch)\n    }\n\n    fn poll_catch(\n        &mut self,\n        cx: &mut Context<'_>,\n        should_shutdown: bool,\n    ) -> Poll<Result<Dispatched>> {\n        Poll::Ready(ready!(self.poll_inner(cx, should_shutdown)).or_else(|e| {\n            // Be sure to alert a streaming body of the failure.\n            if let Some(mut body) = self.body_tx.take() {\n                body.send_error(Error::new_body(\"connection error\"))\n            }\n            // An error means we're shutting down either way.\n            // We just try to give the error to the user,\n            // and close the connection with an Ok. If we\n            // cannot give it to the user, then return the Err.\n            self.dispatch.recv_msg(Err(e))?;\n            Ok(Dispatched::Shutdown)\n        }))\n    }\n\n    fn poll_inner(\n        &mut self,\n        cx: &mut Context<'_>,\n        should_shutdown: bool,\n    ) -> Poll<Result<Dispatched>> {\n        T::update_date();\n\n        ready!(self.poll_loop(cx))?;\n\n        if self.is_done() {\n            if let Some(pending) = self.conn.pending_upgrade() {\n                self.conn.take_error()?;\n                return Poll::Ready(Ok(Dispatched::Upgrade(pending)));\n            } else if should_shutdown {\n                ready!(self.conn.poll_shutdown(cx)).map_err(Error::new_shutdown)?;\n            }\n            self.conn.take_error()?;\n            Poll::Ready(Ok(Dispatched::Shutdown))\n        } else {\n            Poll::Pending\n        }\n    }\n\n    fn poll_loop(&mut self, cx: &mut Context<'_>) -> Poll<Result<()>> {\n        // Limit the looping on this connection, in case it is ready far too\n        // often, so that other futures don't starve.\n        //\n        // 16 was chosen arbitrarily, as that is number of pipelined requests\n        // benchmarks often use. Perhaps it should be a config option instead.\n        for _ in 0..16 {\n            let _ = self.poll_read(cx)?;\n            let write_ready = self.poll_write(cx)?.is_ready();\n            let flush_ready = self.poll_flush(cx)?.is_ready();\n\n            // If we can write more body and the connection is ready, we should\n            // write again. If we return `Ready(Ok(())` here, we will yield\n            // without a guaranteed wake-up from the write side of the connection.\n            // This would lead to a deadlock if we also don't expect reads.\n            let wants_write_again = self.can_write_again() && (write_ready || flush_ready);\n\n            // This could happen if reading paused before blocking on IO,\n            // such as getting to the end of a framed message, but then\n            // writing/flushing set the state back to Init. In that case,\n            // if the read buffer still had bytes, we'd want to try poll_read\n            // again, or else we wouldn't ever be woken up again.\n            //\n            // Using this instead of task::current() and notify() inside\n            // the Conn is noticeably faster in pipelined benchmarks.\n            let wants_read_again = self.conn.wants_read_again();\n\n            // If we cannot write or read again, we yield and rely on the\n            // wake-up from the connection futures.\n            if !(wants_write_again || wants_read_again) {\n                return Poll::Ready(Ok(()));\n            }\n\n            // If we are continuing only because \"wants_write_again\", check if write is ready.\n            if !wants_read_again && wants_write_again {\n                // If write was ready, just proceed with the loop\n                if write_ready {\n                    continue;\n                }\n                // Write was previously pending, but may have become ready since polling flush, so\n                // we need to check it again. If we simply proceeded, the case of an unbuffered\n                // writer where flush is always ready would cause us to hot loop.\n                if self.poll_write(cx)?.is_pending() {\n                    // write is pending, so it is safe to yield and rely on wake-up from connection\n                    // futures.\n                    return Poll::Ready(Ok(()));\n                }\n            }\n        }\n\n        trace!(\"poll_loop yielding (self = {:p})\", self);\n\n        // Now we yield to allow other tasks to run.\n        cx.waker().wake_by_ref();\n        Poll::Pending\n    }\n\n    fn poll_read(&mut self, cx: &mut Context<'_>) -> Poll<Result<()>> {\n        loop {\n            if self.is_closing {\n                return Poll::Ready(Ok(()));\n            } else if self.conn.can_read_head() {\n                ready!(self.poll_read_head(cx))?;\n            } else if let Some(mut body) = self.body_tx.take() {\n                if self.conn.can_read_body() {\n                    match body.poll_ready(cx) {\n                        Poll::Ready(Ok(())) => (),\n                        Poll::Pending => {\n                            self.body_tx.set(body);\n                            return Poll::Pending;\n                        }\n                        Poll::Ready(Err(_canceled)) => {\n                            // user doesn't care about the body\n                            // so we should stop reading\n                            trace!(\"body receiver dropped before eof, draining or closing\");\n                            self.conn.poll_drain_or_close_read(cx);\n                            continue;\n                        }\n                    }\n                    match self.conn.poll_read_body(cx) {\n                        Poll::Ready(Some(Ok(frame))) => {\n                            if frame.is_data() {\n                                let chunk = frame.into_data().unwrap_or_else(|_| unreachable!());\n                                match body.send_data(chunk) {\n                                    Ok(()) => {\n                                        self.body_tx.set(body);\n                                    }\n                                    Err(_canceled) => {\n                                        if self.conn.can_read_body() {\n                                            trace!(\"body receiver dropped before eof, closing\");\n                                            self.conn.close_read();\n                                        }\n                                    }\n                                }\n                            } else if frame.is_trailers() {\n                                let trailers =\n                                    frame.into_trailers().unwrap_or_else(|_| unreachable!());\n                                match body.send_trailers(trailers) {\n                                    Ok(()) => {\n                                        self.body_tx.set(body);\n                                    }\n                                    Err(_canceled) => {\n                                        if self.conn.can_read_body() {\n                                            trace!(\"body receiver dropped before eof, closing\");\n                                            self.conn.close_read();\n                                        }\n                                    }\n                                }\n                            } else {\n                                // we should have dropped all unknown frames in poll_read_body\n                                error!(\"unexpected frame\");\n                            }\n                        }\n                        Poll::Ready(None) => {\n                            // just drop, the body will close automatically\n                        }\n                        Poll::Pending => {\n                            self.body_tx.set(body);\n                            return Poll::Pending;\n                        }\n                        Poll::Ready(Some(Err(e))) => {\n                            body.send_error(Error::new_body(e));\n                        }\n                    }\n                } else {\n                    // just drop, the body will close automatically\n                }\n            } else {\n                return self.conn.poll_read_keep_alive(cx);\n            }\n        }\n    }\n\n    fn poll_read_head(&mut self, cx: &mut Context<'_>) -> Poll<Result<()>> {\n        // can dispatch receive, or does it still care about other incoming message?\n        match ready!(self.dispatch.poll_ready(cx)) {\n            Ok(()) => (),\n            Err(()) => {\n                trace!(\"dispatch no longer receiving messages\");\n                self.close();\n                return Poll::Ready(Ok(()));\n            }\n        }\n\n        // dispatch is ready for a message, try to read one\n        match ready!(self.conn.poll_read_head(cx)) {\n            Some(Ok((mut head, body_len, wants))) => {\n                let body = match body_len {\n                    DecodedLength::ZERO => Incoming::empty(),\n                    other => {\n                        let (tx, rx) = Incoming::h1(other, wants.contains(Wants::EXPECT));\n                        self.body_tx.set(tx);\n                        rx\n                    }\n                };\n                if wants.contains(Wants::UPGRADE) {\n                    let upgrade = self.conn.on_upgrade();\n                    debug_assert!(!upgrade.is_none(), \"empty upgrade\");\n                    debug_assert!(\n                        head.extensions.get::<OnUpgrade>().is_none(),\n                        \"OnUpgrade already set\"\n                    );\n                    head.extensions.insert(upgrade);\n                }\n                self.dispatch.recv_msg(Ok((head, body)))?;\n                Poll::Ready(Ok(()))\n            }\n            Some(Err(err)) => {\n                debug!(\"read_head error: {}\", err);\n                self.dispatch.recv_msg(Err(err))?;\n                // if here, the dispatcher gave the user the error\n                // somewhere else. we still need to shutdown, but\n                // not as a second error.\n                self.close();\n                Poll::Ready(Ok(()))\n            }\n            None => {\n                // read eof, the write side will have been closed too unless\n                // allow_read_close was set to true, in which case just do\n                // nothing...\n                debug_assert!(self.conn.is_read_closed());\n                if self.conn.is_write_closed() {\n                    self.close();\n                }\n                Poll::Ready(Ok(()))\n            }\n        }\n    }\n\n    fn poll_write(&mut self, cx: &mut Context<'_>) -> Poll<Result<()>> {\n        loop {\n            if self.is_closing {\n                return Poll::Ready(Ok(()));\n            } else if self.body_rx.is_none()\n                && self.conn.can_write_head()\n                && self.dispatch.should_poll()\n            {\n                if let Some(msg) = ready!(Pin::new(&mut self.dispatch).poll_msg(cx)) {\n                    let (head, body) = msg.map_err(Error::new_user_service)?;\n\n                    let body_type = if body.is_end_stream() {\n                        self.body_rx.set(None);\n                        None\n                    } else {\n                        let btype = body\n                            .size_hint()\n                            .exact()\n                            .map(BodyLength::Known)\n                            .or(Some(BodyLength::Unknown));\n                        self.body_rx.set(Some(body));\n                        btype\n                    };\n                    self.conn.write_head(head, body_type);\n                } else {\n                    self.close();\n                    return Poll::Ready(Ok(()));\n                }\n            } else if !self.conn.can_buffer_body() {\n                ready!(self.poll_flush(cx))?;\n            } else {\n                // A new scope is needed :(\n                if let (Some(mut body), clear_body) =\n                    OptGuard::new(self.body_rx.as_mut()).guard_mut()\n                {\n                    debug_assert!(!*clear_body, \"opt guard defaults to keeping body\");\n                    if !self.conn.can_write_body() {\n                        trace!(\n                            \"no more write body allowed, user body is_end_stream = {}\",\n                            body.is_end_stream(),\n                        );\n                        *clear_body = true;\n                        continue;\n                    }\n\n                    let item = ready!(body.as_mut().poll_frame(cx));\n                    if let Some(item) = item {\n                        let frame = item.map_err(|e| {\n                            *clear_body = true;\n                            Error::new_user_body(e)\n                        })?;\n\n                        if frame.is_data() {\n                            let chunk = frame.into_data().unwrap_or_else(|_| unreachable!());\n                            let eos = body.is_end_stream();\n                            if eos {\n                                *clear_body = true;\n                                if chunk.remaining() == 0 {\n                                    trace!(\"discarding empty chunk\");\n                                    self.conn.end_body()?;\n                                } else {\n                                    self.conn.write_body_and_end(chunk);\n                                }\n                            } else {\n                                if chunk.remaining() == 0 {\n                                    trace!(\"discarding empty chunk\");\n                                    continue;\n                                }\n                                self.conn.write_body(chunk);\n                            }\n                        } else if frame.is_trailers() {\n                            *clear_body = true;\n                            self.conn.write_trailers(\n                                frame.into_trailers().unwrap_or_else(|_| unreachable!()),\n                            );\n                        } else {\n                            trace!(\"discarding unknown frame\");\n                            continue;\n                        }\n                    } else {\n                        *clear_body = true;\n                        self.conn.end_body()?;\n                    }\n                } else {\n                    // If there's no body_rx, end the body\n                    if self.conn.can_write_body() {\n                        self.conn.end_body()?;\n                    } else {\n                        return Poll::Pending;\n                    }\n                }\n            }\n        }\n    }\n\n    #[inline]\n    fn poll_flush(&mut self, cx: &mut Context<'_>) -> Poll<Result<()>> {\n        self.conn.poll_flush(cx).map_err(|err| {\n            debug!(\"error writing: {}\", err);\n            Error::new_body_write(err)\n        })\n    }\n\n    #[inline]\n    fn close(&mut self) {\n        self.is_closing = true;\n        self.conn.close_read();\n        self.conn.close_write();\n    }\n\n    #[inline]\n    fn can_write_again(&mut self) -> bool {\n        self.body_rx.is_some()\n    }\n\n    fn is_done(&self) -> bool {\n        if self.is_closing {\n            return true;\n        }\n\n        let read_done = self.conn.is_read_closed();\n\n        if read_done {\n            // a client that cannot read may was well be done.\n            true\n        } else {\n            let write_done = self.conn.is_write_closed()\n                || (!self.dispatch.should_poll() && self.body_rx.is_none());\n            read_done && write_done\n        }\n    }\n}\n\nimpl<D, Bs, I, T> Future for Dispatcher<D, Bs, I, T>\nwhere\n    D: Dispatch<\n            PollItem = MessageHead<T::Outgoing>,\n            PollBody = Bs,\n            RecvItem = MessageHead<T::Incoming>,\n        > + Unpin,\n    D::PollError: Into<BoxError>,\n    I: AsyncRead + AsyncWrite + Unpin,\n    T: Http1Transaction + Unpin,\n    Bs: Body + 'static,\n    Bs::Error: Into<BoxError>,\n{\n    type Output = Result<Dispatched>;\n\n    #[inline]\n    fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {\n        self.poll_catch(cx, true)\n    }\n}\n\n// ===== impl OptGuard =====\n\n/// A drop guard to allow a mutable borrow of an Option while being able to\n/// set whether the `Option` should be cleared on drop.\nstruct OptGuard<'a, T>(Pin<&'a mut Option<T>>, bool);\n\nimpl<'a, T> OptGuard<'a, T> {\n    #[inline]\n    fn new(pin: Pin<&'a mut Option<T>>) -> Self {\n        OptGuard(pin, false)\n    }\n\n    #[inline]\n    fn guard_mut(&mut self) -> (Option<Pin<&mut T>>, &mut bool) {\n        (self.0.as_mut().as_pin_mut(), &mut self.1)\n    }\n}\n\nimpl<T> Drop for OptGuard<'_, T> {\n    #[inline]\n    fn drop(&mut self) {\n        if self.1 {\n            self.0.set(None);\n        }\n    }\n}\n\n// ===== impl SenderGuard =====\n\n/// A guard for the body `Sender`.\n///\n/// If the `Dispatcher` future is dropped (e.g. the runtime driving the\n/// connection is shut down) while it still owns a body `Sender`, the guard\n/// sends an incomplete-message error so the receiver sees an error instead\n/// of a silent, clean end-of-stream.\nstruct SenderGuard(Option<body::Sender>);\n\nimpl SenderGuard {\n    #[inline]\n    fn set(&mut self, sender: body::Sender) {\n        self.0 = Some(sender);\n    }\n\n    #[inline]\n    fn take(&mut self) -> Option<body::Sender> {\n        self.0.take()\n    }\n}\n\nimpl Drop for SenderGuard {\n    #[inline]\n    fn drop(&mut self) {\n        if let Some(mut sender) = self.0.take() {\n            sender.send_error(Error::new_incomplete());\n        }\n    }\n}\n\n// ===== impl Client =====\n\nimpl<B> Client<B> {\n    #[inline]\n    pub(crate) fn new(rx: ClientRx<B>) -> Client<B> {\n        Client {\n            callback: None,\n            rx,\n            rx_closed: false,\n        }\n    }\n}\n\nimpl<B> Dispatch for Client<B>\nwhere\n    B: Body,\n{\n    type PollItem = RequestHead;\n    type PollBody = B;\n    type PollError = Infallible;\n    type RecvItem = proto::ResponseHead;\n\n    fn poll_msg(\n        mut self: Pin<&mut Self>,\n        cx: &mut Context<'_>,\n    ) -> Poll<Option<Result<(Self::PollItem, Self::PollBody), Infallible>>> {\n        let mut this = self.as_mut();\n        debug_assert!(!this.rx_closed);\n        match this.rx.poll_recv(cx) {\n            Poll::Ready(Some((req, mut cb))) => {\n                // check that future hasn't been canceled already\n                match cb.poll_canceled(cx) {\n                    Poll::Ready(()) => {\n                        trace!(\"request canceled\");\n                        Poll::Ready(None)\n                    }\n                    Poll::Pending => {\n                        let (parts, body) = req.into_parts();\n                        let head = RequestHead {\n                            version: parts.version,\n                            subject: proto::RequestLine(parts.method, parts.uri),\n                            headers: parts.headers,\n                            extensions: parts.extensions,\n                        };\n                        this.callback = Some(cb);\n                        Poll::Ready(Some(Ok((head, body))))\n                    }\n                }\n            }\n            Poll::Ready(None) => {\n                // user has dropped sender handle\n                trace!(\"client tx closed\");\n                this.rx_closed = true;\n                Poll::Ready(None)\n            }\n            Poll::Pending => Poll::Pending,\n        }\n    }\n\n    fn recv_msg(&mut self, msg: Result<(Self::RecvItem, Incoming)>) -> Result<()> {\n        match msg {\n            Ok((msg, body)) => {\n                if let Some(cb) = self.callback.take() {\n                    let res = msg.into_response(body);\n                    cb.send(Ok(res));\n                    Ok(())\n                } else {\n                    // Getting here is likely a bug! An error should have happened\n                    // in Conn::require_empty_read() before ever parsing a\n                    // full message!\n                    Err(Error::new_unexpected_message())\n                }\n            }\n            Err(err) => {\n                if let Some(cb) = self.callback.take() {\n                    cb.send(Err(TrySendError {\n                        error: err,\n                        message: None,\n                    }));\n                    Ok(())\n                } else if !self.rx_closed {\n                    self.rx.close();\n                    if let Some((req, cb)) = self.rx.try_recv() {\n                        trace!(\"canceling queued request with connection error: {}\", err);\n                        // in this case, the message was never even started, so it's safe to tell\n                        // the user that the request was completely canceled\n                        cb.send(Err(TrySendError {\n                            error: Error::new_canceled().with(err),\n                            message: Some(req),\n                        }));\n                        Ok(())\n                    } else {\n                        Err(err)\n                    }\n                } else {\n                    Err(err)\n                }\n            }\n        }\n    }\n\n    fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), ()>> {\n        match self.callback {\n            Some(ref mut cb) => match cb.poll_canceled(cx) {\n                Poll::Ready(()) => {\n                    trace!(\"callback receiver has dropped\");\n                    Poll::Ready(Err(()))\n                }\n                Poll::Pending => Poll::Ready(Ok(())),\n            },\n            None => Poll::Ready(Err(())),\n        }\n    }\n\n    #[inline]\n    fn should_poll(&self) -> bool {\n        self.callback.is_none()\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::time::Duration;\n\n    use super::{proto::http1, *};\n\n    #[test]\n    fn client_read_bytes_before_writing_request() {\n        let _ = pretty_env_logger::try_init();\n\n        tokio_test::task::spawn(()).enter(|cx, _| {\n            let (io, mut handle) = tokio_test::io::Builder::new().build_with_handle();\n\n            // Block at 0 for now, but we will release this response before\n            // the request is ready to write later...\n            let (mut tx, rx) = dispatch::channel();\n            let conn = Conn::<_, bytes::Bytes, http1::role::Client>::new(io);\n            let mut dispatcher = Dispatcher::new(Client::new(rx), conn);\n\n            // First poll is needed to allow tx to send...\n            assert!(Pin::new(&mut dispatcher).poll(cx).is_pending());\n\n            // Unblock our IO, which has a response before we've sent request!\n            //\n            handle.read(b\"HTTP/1.1 200 OK\\r\\n\\r\\n\");\n\n            let mut res_rx = tx.try_send(http::Request::new(Incoming::empty())).unwrap();\n\n            tokio_test::assert_ready_ok!(Pin::new(&mut dispatcher).poll(cx));\n            let err = tokio_test::assert_ready_ok!(Pin::new(&mut res_rx).poll(cx))\n                .expect_err(\"callback should send error\");\n\n            match (err.error.is_canceled(), err.message.as_ref()) {\n                (true, Some(_)) => (),\n                _ => panic!(\"expected Canceled, got {err:?}\"),\n            }\n        });\n    }\n\n    #[tokio::test]\n    async fn client_flushing_is_not_ready_for_next_request() {\n        let _ = pretty_env_logger::try_init();\n\n        let (io, _handle) = tokio_test::io::Builder::new()\n            .write(b\"POST / HTTP/1.1\\r\\ncontent-length: 4\\r\\n\\r\\n\")\n            .read(b\"HTTP/1.1 200 OK\\r\\ncontent-length: 0\\r\\n\\r\\n\")\n            .wait(std::time::Duration::from_secs(2))\n            .build_with_handle();\n\n        let (mut tx, rx) = dispatch::channel();\n        let mut conn = Conn::<_, bytes::Bytes, http1::role::Client>::new(io);\n        conn.set_write_strategy_queue();\n\n        let dispatcher = Dispatcher::new(Client::new(rx), conn);\n        let _dispatcher = tokio::spawn(dispatcher);\n\n        let body = {\n            let (mut tx, body) = Incoming::h1(DecodedLength::new(4), false);\n            std::future::poll_fn(|cx| tx.poll_ready(cx))\n                .await\n                .expect(\"ready\");\n            tx.send_data(\"reee\".into()).unwrap();\n            body\n        };\n\n        let req = http::Request::builder().method(\"POST\").body(body).unwrap();\n\n        let res = tx.try_send(req).unwrap().await.expect(\"response\");\n        drop(res);\n\n        assert!(!tx.is_ready());\n    }\n\n    #[tokio::test]\n    async fn body_empty_chunks_ignored() {\n        let _ = pretty_env_logger::try_init();\n\n        let io = tokio_test::io::Builder::new()\n            // no reading or writing, just be blocked for the test...\n            .wait(Duration::from_secs(5))\n            .build();\n\n        let (mut tx, rx) = dispatch::channel();\n        let conn = Conn::<_, bytes::Bytes, http1::role::Client>::new(io);\n        let mut dispatcher = tokio_test::task::spawn(Dispatcher::new(Client::new(rx), conn));\n\n        // First poll is needed to allow tx to send...\n        assert!(dispatcher.poll().is_pending());\n\n        let body = {\n            let (mut tx, body) = Incoming::channel();\n            std::future::poll_fn(|cx| tx.poll_ready(cx))\n                .await\n                .expect(\"ready\");\n            tx.send_data(\"\".into()).unwrap();\n            body\n        };\n\n        let _res_rx = tx.try_send(http::Request::new(body)).unwrap();\n\n        // Ensure conn.write_body wasn't called with the empty chunk.\n        // If it is, it will trigger an assertion.\n        assert!(dispatcher.poll().is_pending());\n    }\n}\n"
  },
  {
    "path": "src/client/core/proto/http1/encode.rs",
    "content": "use std::{collections::HashSet, fmt, io::IoSlice};\n\nuse bytes::{\n    Buf, Bytes,\n    buf::{Chain, Take},\n};\nuse http::{\n    HeaderMap, HeaderName,\n    header::{\n        AUTHORIZATION, CACHE_CONTROL, CONTENT_ENCODING, CONTENT_LENGTH, CONTENT_RANGE,\n        CONTENT_TYPE, HOST, MAX_FORWARDS, SET_COOKIE, TE, TRAILER, TRANSFER_ENCODING,\n    },\n};\n\nuse super::{io::WriteBuf, role::write_headers};\n\ntype StaticBuf = &'static [u8];\n\n/// Encoders to handle different Transfer-Encodings.\n#[derive(Debug, Clone, PartialEq)]\npub(crate) struct Encoder {\n    kind: Kind,\n    is_last: bool,\n}\n\n#[derive(Debug)]\npub(crate) struct EncodedBuf<B> {\n    kind: BufKind<B>,\n}\n\n#[derive(Debug)]\npub(crate) struct NotEof(u64);\n\n#[derive(Debug, PartialEq, Clone)]\nenum Kind {\n    /// An Encoder for when Transfer-Encoding includes `chunked`.\n    Chunked(Option<Vec<HeaderName>>),\n    /// An Encoder for when Content-Length is set.\n    ///\n    /// Enforces that the body is not longer than the Content-Length header.\n    Length(u64),\n}\n\n#[derive(Debug)]\nenum BufKind<B> {\n    Exact(B),\n    Limited(Take<B>),\n    Chunked(Chain<Chain<ChunkSize, B>, StaticBuf>),\n    ChunkedEnd(StaticBuf),\n    Trailers(Chain<Chain<StaticBuf, Bytes>, StaticBuf>),\n}\n\nimpl Encoder {\n    #[inline]\n    fn new(kind: Kind) -> Encoder {\n        Encoder {\n            kind,\n            is_last: false,\n        }\n    }\n\n    #[inline]\n    pub(crate) fn chunked() -> Encoder {\n        Encoder::new(Kind::Chunked(None))\n    }\n\n    #[inline]\n    pub(crate) fn length(len: u64) -> Encoder {\n        Encoder::new(Kind::Length(len))\n    }\n\n    #[inline]\n    pub(crate) fn into_chunked_with_trailing_fields(self, trailers: Vec<HeaderName>) -> Encoder {\n        match self.kind {\n            Kind::Chunked(_) => Encoder {\n                kind: Kind::Chunked(Some(trailers)),\n                is_last: self.is_last,\n            },\n            _ => self,\n        }\n    }\n\n    #[inline]\n    pub(crate) fn is_eof(&self) -> bool {\n        matches!(self.kind, Kind::Length(0))\n    }\n\n    #[inline]\n    pub(crate) fn is_last(&self) -> bool {\n        self.is_last\n    }\n\n    #[inline]\n    pub(crate) fn is_close_delimited(&self) -> bool {\n        false\n    }\n\n    #[inline]\n    pub(crate) fn is_chunked(&self) -> bool {\n        matches!(self.kind, Kind::Chunked(_))\n    }\n\n    pub(crate) fn end<B>(&self) -> Result<Option<EncodedBuf<B>>, NotEof> {\n        match self.kind {\n            Kind::Length(0) => Ok(None),\n            Kind::Chunked(_) => Ok(Some(EncodedBuf {\n                kind: BufKind::ChunkedEnd(b\"0\\r\\n\\r\\n\"),\n            })),\n            Kind::Length(n) => Err(NotEof(n)),\n        }\n    }\n\n    pub(crate) fn encode<B>(&mut self, msg: B) -> EncodedBuf<B>\n    where\n        B: Buf,\n    {\n        let len = msg.remaining();\n        debug_assert!(len > 0, \"encode() called with empty buf\");\n\n        let kind = match self.kind {\n            Kind::Chunked(_) => {\n                trace!(\"encoding chunked {}B\", len);\n                let buf = ChunkSize::new(len)\n                    .chain(msg)\n                    .chain(b\"\\r\\n\" as &'static [u8]);\n                BufKind::Chunked(buf)\n            }\n            Kind::Length(ref mut remaining) => {\n                trace!(\"sized write, len = {}\", len);\n                if len as u64 > *remaining {\n                    let limit = *remaining as usize;\n                    *remaining = 0;\n                    BufKind::Limited(msg.take(limit))\n                } else {\n                    *remaining -= len as u64;\n                    BufKind::Exact(msg)\n                }\n            }\n        };\n        EncodedBuf { kind }\n    }\n\n    pub(crate) fn encode_trailers<B>(&self, trailers: HeaderMap) -> Option<EncodedBuf<B>> {\n        trace!(\"encoding trailers\");\n        match &self.kind {\n            Kind::Chunked(Some(allowed_trailer_fields)) => {\n                let allowed_set: HashSet<&HeaderName> = allowed_trailer_fields.iter().collect();\n\n                let mut cur_name = None;\n                let mut allowed_trailers = HeaderMap::new();\n\n                for (opt_name, value) in trailers {\n                    if let Some(n) = opt_name {\n                        cur_name = Some(n);\n                    }\n                    let name = cur_name.as_ref().expect(\"current header name\");\n\n                    if allowed_set.contains(name) {\n                        if is_valid_trailer_field(name) {\n                            allowed_trailers.insert(name, value);\n                        } else {\n                            debug!(\"trailer field is not valid: {}\", &name);\n                        }\n                    } else {\n                        debug!(\"trailer header name not found in trailer header: {}\", &name);\n                    }\n                }\n\n                let mut buf = Vec::new();\n                write_headers(&allowed_trailers, &mut buf);\n\n                if buf.is_empty() {\n                    return None;\n                }\n\n                Some(EncodedBuf {\n                    kind: BufKind::Trailers(b\"0\\r\\n\".chain(Bytes::from(buf)).chain(b\"\\r\\n\")),\n                })\n            }\n            Kind::Chunked(None) => {\n                debug!(\"attempted to encode trailers, but the trailer header is not set\");\n                None\n            }\n            _ => {\n                debug!(\"attempted to encode trailers for non-chunked response\");\n                None\n            }\n        }\n    }\n\n    pub(super) fn encode_and_end<B>(&self, msg: B, dst: &mut WriteBuf<EncodedBuf<B>>) -> bool\n    where\n        B: Buf,\n    {\n        let len = msg.remaining();\n        debug_assert!(len > 0, \"encode() called with empty buf\");\n\n        match self.kind {\n            Kind::Chunked(_) => {\n                trace!(\"encoding chunked {}B\", len);\n                let buf = ChunkSize::new(len)\n                    .chain(msg)\n                    .chain(b\"\\r\\n0\\r\\n\\r\\n\" as &'static [u8]);\n                dst.buffer(buf);\n                !self.is_last\n            }\n            Kind::Length(remaining) => {\n                use std::cmp::Ordering;\n\n                trace!(\"sized write, len = {}\", len);\n                match (len as u64).cmp(&remaining) {\n                    Ordering::Equal => {\n                        dst.buffer(msg);\n                        !self.is_last\n                    }\n                    Ordering::Greater => {\n                        dst.buffer(msg.take(remaining as usize));\n                        !self.is_last\n                    }\n                    Ordering::Less => {\n                        dst.buffer(msg);\n                        false\n                    }\n                }\n            }\n        }\n    }\n}\n\nfn is_valid_trailer_field(name: &HeaderName) -> bool {\n    !matches!(\n        *name,\n        AUTHORIZATION\n            | CACHE_CONTROL\n            | CONTENT_ENCODING\n            | CONTENT_LENGTH\n            | CONTENT_RANGE\n            | CONTENT_TYPE\n            | HOST\n            | MAX_FORWARDS\n            | SET_COOKIE\n            | TRAILER\n            | TRANSFER_ENCODING\n            | TE\n    )\n}\n\nimpl<B> Buf for EncodedBuf<B>\nwhere\n    B: Buf,\n{\n    #[inline]\n    fn remaining(&self) -> usize {\n        match self.kind {\n            BufKind::Exact(ref b) => b.remaining(),\n            BufKind::Limited(ref b) => b.remaining(),\n            BufKind::Chunked(ref b) => b.remaining(),\n            BufKind::ChunkedEnd(ref b) => b.remaining(),\n            BufKind::Trailers(ref b) => b.remaining(),\n        }\n    }\n\n    #[inline]\n    fn chunk(&self) -> &[u8] {\n        match self.kind {\n            BufKind::Exact(ref b) => b.chunk(),\n            BufKind::Limited(ref b) => b.chunk(),\n            BufKind::Chunked(ref b) => b.chunk(),\n            BufKind::ChunkedEnd(ref b) => b.chunk(),\n            BufKind::Trailers(ref b) => b.chunk(),\n        }\n    }\n\n    #[inline]\n    fn advance(&mut self, cnt: usize) {\n        match self.kind {\n            BufKind::Exact(ref mut b) => b.advance(cnt),\n            BufKind::Limited(ref mut b) => b.advance(cnt),\n            BufKind::Chunked(ref mut b) => b.advance(cnt),\n            BufKind::ChunkedEnd(ref mut b) => b.advance(cnt),\n            BufKind::Trailers(ref mut b) => b.advance(cnt),\n        }\n    }\n\n    #[inline]\n    fn chunks_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize {\n        match self.kind {\n            BufKind::Exact(ref b) => b.chunks_vectored(dst),\n            BufKind::Limited(ref b) => b.chunks_vectored(dst),\n            BufKind::Chunked(ref b) => b.chunks_vectored(dst),\n            BufKind::ChunkedEnd(ref b) => b.chunks_vectored(dst),\n            BufKind::Trailers(ref b) => b.chunks_vectored(dst),\n        }\n    }\n}\n\n#[cfg(target_pointer_width = \"32\")]\nconst USIZE_BYTES: usize = 4;\n\n#[cfg(target_pointer_width = \"64\")]\nconst USIZE_BYTES: usize = 8;\n\n// each byte will become 2 hex\nconst CHUNK_SIZE_MAX_BYTES: usize = USIZE_BYTES * 2;\n\n#[derive(Clone, Copy)]\nstruct ChunkSize {\n    bytes: [u8; CHUNK_SIZE_MAX_BYTES + 2],\n    pos: u8,\n    len: u8,\n}\n\nimpl ChunkSize {\n    fn new(len: usize) -> ChunkSize {\n        use std::fmt::Write;\n        let mut size = ChunkSize {\n            bytes: [0; CHUNK_SIZE_MAX_BYTES + 2],\n            pos: 0,\n            len: 0,\n        };\n        write!(&mut size, \"{len:X}\\r\\n\").expect(\"CHUNK_SIZE_MAX_BYTES should fit any usize\");\n        size\n    }\n}\n\nimpl Buf for ChunkSize {\n    #[inline]\n    fn remaining(&self) -> usize {\n        (self.len - self.pos).into()\n    }\n\n    #[inline]\n    fn chunk(&self) -> &[u8] {\n        &self.bytes[self.pos.into()..self.len.into()]\n    }\n\n    #[inline]\n    fn advance(&mut self, cnt: usize) {\n        assert!(cnt <= self.remaining());\n        // just asserted cnt fits in u8\n        self.pos += cnt as u8;\n    }\n}\n\nimpl fmt::Debug for ChunkSize {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        f.debug_struct(\"ChunkSize\")\n            .field(\"bytes\", &&self.bytes[..self.len.into()])\n            .field(\"pos\", &self.pos)\n            .finish()\n    }\n}\n\nimpl fmt::Write for ChunkSize {\n    fn write_str(&mut self, num: &str) -> fmt::Result {\n        use std::io::Write;\n        (&mut self.bytes[self.len.into()..])\n            .write_all(num.as_bytes())\n            .expect(\"&mut [u8].write() cannot error\");\n        self.len += num.len() as u8; // safe because bytes is never bigger than 256\n        Ok(())\n    }\n}\n\nimpl<B: Buf> From<B> for EncodedBuf<B> {\n    fn from(buf: B) -> Self {\n        EncodedBuf {\n            kind: BufKind::Exact(buf),\n        }\n    }\n}\n\nimpl<B: Buf> From<Take<B>> for EncodedBuf<B> {\n    fn from(buf: Take<B>) -> Self {\n        EncodedBuf {\n            kind: BufKind::Limited(buf),\n        }\n    }\n}\n\nimpl<B: Buf> From<Chain<Chain<ChunkSize, B>, StaticBuf>> for EncodedBuf<B> {\n    fn from(buf: Chain<Chain<ChunkSize, B>, StaticBuf>) -> Self {\n        EncodedBuf {\n            kind: BufKind::Chunked(buf),\n        }\n    }\n}\n\nimpl fmt::Display for NotEof {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        write!(f, \"early end, expected {} more bytes\", self.0)\n    }\n}\n\nimpl std::error::Error for NotEof {}\n\n#[cfg(test)]\nmod tests {\n    use bytes::BufMut;\n    use http::{\n        HeaderMap, HeaderName, HeaderValue,\n        header::{\n            AUTHORIZATION, CACHE_CONTROL, CONTENT_ENCODING, CONTENT_LENGTH, CONTENT_RANGE,\n            CONTENT_TYPE, HOST, MAX_FORWARDS, SET_COOKIE, TE, TRAILER, TRANSFER_ENCODING,\n        },\n    };\n\n    use super::{super::io::Cursor, Encoder};\n\n    #[test]\n    fn chunked() {\n        let mut encoder = Encoder::chunked();\n        let mut dst = Vec::new();\n\n        let msg1 = b\"foo bar\".as_ref();\n        let buf1 = encoder.encode(msg1);\n        dst.put(buf1);\n        assert_eq!(dst, b\"7\\r\\nfoo bar\\r\\n\");\n\n        let msg2 = b\"baz quux herp\".as_ref();\n        let buf2 = encoder.encode(msg2);\n        dst.put(buf2);\n\n        assert_eq!(dst, b\"7\\r\\nfoo bar\\r\\nD\\r\\nbaz quux herp\\r\\n\");\n\n        let end = encoder.end::<Cursor<Vec<u8>>>().unwrap().unwrap();\n        dst.put(end);\n\n        assert_eq!(\n            dst,\n            b\"7\\r\\nfoo bar\\r\\nD\\r\\nbaz quux herp\\r\\n0\\r\\n\\r\\n\".as_ref()\n        );\n    }\n\n    #[test]\n    fn length() {\n        let max_len = 8;\n        let mut encoder = Encoder::length(max_len as u64);\n        let mut dst = Vec::new();\n\n        let msg1 = b\"foo bar\".as_ref();\n        let buf1 = encoder.encode(msg1);\n        dst.put(buf1);\n\n        assert_eq!(dst, b\"foo bar\");\n        assert!(!encoder.is_eof());\n        encoder.end::<()>().unwrap_err();\n\n        let msg2 = b\"baz\".as_ref();\n        let buf2 = encoder.encode(msg2);\n        dst.put(buf2);\n\n        assert_eq!(dst.len(), max_len);\n        assert_eq!(dst, b\"foo barb\");\n        assert!(encoder.is_eof());\n        assert!(encoder.end::<()>().unwrap().is_none());\n    }\n\n    #[test]\n    fn chunked_with_valid_trailers() {\n        let encoder = Encoder::chunked();\n        let trailers = vec![HeaderName::from_static(\"chunky-trailer\")];\n        let encoder = encoder.into_chunked_with_trailing_fields(trailers);\n\n        let headers = HeaderMap::from_iter(vec![\n            (\n                HeaderName::from_static(\"chunky-trailer\"),\n                HeaderValue::from_static(\"header data\"),\n            ),\n            (\n                HeaderName::from_static(\"should-not-be-included\"),\n                HeaderValue::from_static(\"oops\"),\n            ),\n        ]);\n\n        let buf1 = encoder.encode_trailers::<&[u8]>(headers).unwrap();\n\n        let mut dst = Vec::new();\n        dst.put(buf1);\n        assert_eq!(dst, b\"0\\r\\nchunky-trailer: header data\\r\\n\\r\\n\");\n    }\n\n    #[test]\n    fn chunked_with_multiple_trailer_headers() {\n        let encoder = Encoder::chunked();\n        let trailers = vec![\n            HeaderName::from_static(\"chunky-trailer\"),\n            HeaderName::from_static(\"chunky-trailer-2\"),\n        ];\n        let encoder = encoder.into_chunked_with_trailing_fields(trailers);\n\n        let headers = HeaderMap::from_iter(vec![\n            (\n                HeaderName::from_static(\"chunky-trailer\"),\n                HeaderValue::from_static(\"header data\"),\n            ),\n            (\n                HeaderName::from_static(\"chunky-trailer-2\"),\n                HeaderValue::from_static(\"more header data\"),\n            ),\n        ]);\n\n        let buf1 = encoder.encode_trailers::<&[u8]>(headers).unwrap();\n\n        let mut dst = Vec::new();\n        dst.put(buf1);\n        assert_eq!(\n            dst,\n            b\"0\\r\\nchunky-trailer: header data\\r\\nchunky-trailer-2: more header data\\r\\n\\r\\n\"\n        );\n    }\n\n    #[test]\n    fn chunked_with_no_trailer_header() {\n        let encoder = Encoder::chunked();\n\n        let headers = HeaderMap::from_iter(vec![(\n            HeaderName::from_static(\"chunky-trailer\"),\n            HeaderValue::from_static(\"header data\"),\n        )]);\n\n        assert!(encoder.encode_trailers::<&[u8]>(headers.clone()).is_none());\n\n        let trailers = vec![];\n        let encoder = encoder.into_chunked_with_trailing_fields(trailers);\n\n        assert!(encoder.encode_trailers::<&[u8]>(headers).is_none());\n    }\n\n    #[test]\n    fn chunked_with_invalid_trailers() {\n        let encoder = Encoder::chunked();\n\n        let trailers = vec![\n            AUTHORIZATION,\n            CACHE_CONTROL,\n            CONTENT_ENCODING,\n            TRAILER,\n            TRANSFER_ENCODING,\n            TE,\n        ];\n        let encoder = encoder.into_chunked_with_trailing_fields(trailers);\n\n        let mut headers = HeaderMap::new();\n        headers.insert(AUTHORIZATION, HeaderValue::from_static(\"header data\"));\n        headers.insert(CACHE_CONTROL, HeaderValue::from_static(\"header data\"));\n        headers.insert(CONTENT_ENCODING, HeaderValue::from_static(\"header data\"));\n        headers.insert(CONTENT_LENGTH, HeaderValue::from_static(\"header data\"));\n        headers.insert(CONTENT_RANGE, HeaderValue::from_static(\"header data\"));\n        headers.insert(CONTENT_TYPE, HeaderValue::from_static(\"header data\"));\n        headers.insert(HOST, HeaderValue::from_static(\"header data\"));\n        headers.insert(MAX_FORWARDS, HeaderValue::from_static(\"header data\"));\n        headers.insert(SET_COOKIE, HeaderValue::from_static(\"header data\"));\n        headers.insert(TRAILER, HeaderValue::from_static(\"header data\"));\n        headers.insert(TRANSFER_ENCODING, HeaderValue::from_static(\"header data\"));\n        headers.insert(TE, HeaderValue::from_static(\"header data\"));\n\n        assert!(encoder.encode_trailers::<&[u8]>(headers).is_none());\n    }\n\n    #[test]\n    fn chunked_trailers_case_insensitive_matching() {\n        // Regression test for issue #4010: HTTP/1.1 trailers are case-sensitive\n        //\n        // Previously, the Trailer header values were stored as HeaderValue (preserving case)\n        // and compared against HeaderName (which is always lowercase). This caused trailers\n        // declared as \"Chunky-Trailer\" to not match actual trailers sent as \"chunky-trailer\".\n        //\n        // The fix converts Trailer header values to HeaderName during parsing, which\n        // normalizes the case and enables proper case-insensitive matching.\n        //\n        // Note: HeaderName::from_static() requires lowercase input. In real usage,\n        // HeaderName::from_bytes() is used to parse the Trailer header value, which\n        // normalizes mixed-case input like \"Chunky-Trailer\" to \"chunky-trailer\".\n        let encoder = Encoder::chunked();\n        let trailers = vec![HeaderName::from_static(\"chunky-trailer\")];\n        let encoder = encoder.into_chunked_with_trailing_fields(trailers);\n\n        // The actual trailer being sent\n        let headers = HeaderMap::from_iter(vec![(\n            HeaderName::from_static(\"chunky-trailer\"),\n            HeaderValue::from_static(\"trailer value\"),\n        )]);\n\n        let buf = encoder.encode_trailers::<&[u8]>(headers).unwrap();\n        let mut dst = Vec::new();\n        dst.put(buf);\n        assert_eq!(dst, b\"0\\r\\nchunky-trailer: trailer value\\r\\n\\r\\n\");\n    }\n}\n"
  },
  {
    "path": "src/client/core/proto/http1/ext.rs",
    "content": "//! HTTP extensions.\n\nuse bytes::Bytes;\n\n/// A reason phrase in an HTTP/1 response.\n///\n/// # Clients\n///\n/// For clients, a `ReasonPhrase` will be present in the extensions of the `http::Response` returned\n/// for a request if the reason phrase is different from the canonical reason phrase for the\n/// response's status code. For example, if a server returns `HTTP/1.1 200 Awesome`, the\n/// `ReasonPhrase` will be present and contain `Awesome`, but if a server returns `HTTP/1.1 200 OK`,\n/// the response will not contain a `ReasonPhrase`.\n#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]\npub struct ReasonPhrase(Bytes);\n\nimpl ReasonPhrase {\n    // Not public on purpose.\n    /// Converts a `Bytes` directly into a `ReasonPhrase` without validating.\n    ///\n    /// Use with care; invalid bytes in a reason phrase can cause serious security problems if\n    /// emitted in a response.\n    #[inline]\n    pub(crate) fn from_bytes_unchecked(reason: Bytes) -> Self {\n        Self(reason)\n    }\n}\n\nimpl AsRef<[u8]> for ReasonPhrase {\n    /// Gets the reason phrase as bytes.\n    #[inline]\n    fn as_ref(&self) -> &[u8] {\n        &self.0\n    }\n}\n"
  },
  {
    "path": "src/client/core/proto/http1/io.rs",
    "content": "use std::{\n    cmp,\n    fmt::{self, Debug},\n    io::{self, IoSlice},\n    pin::Pin,\n    task::{Context, Poll, ready},\n};\n\nuse bytes::{Buf, Bytes, BytesMut};\nuse tokio::io::{AsyncRead, AsyncWrite};\n\nuse super::{Http1Transaction, ParseContext, ParsedMessage, buf::BufList};\nuse crate::client::core::{Error, Result};\n\n/// The initial buffer size allocated before trying to read from IO.\npub(crate) const INIT_BUFFER_SIZE: usize = 8192;\n\n/// The minimum value that can be set to max buffer size.\npub(crate) const MINIMUM_MAX_BUFFER_SIZE: usize = INIT_BUFFER_SIZE;\n\n/// The default maximum read buffer size. If the buffer gets this big and\n/// a message is still not complete, a `TooLarge` error is triggered.\n// Note: if this changes, update server::conn::Http::max_buf_size docs.\npub(crate) const DEFAULT_MAX_BUFFER_SIZE: usize = 8192 + 4096 * 100;\n\n/// The maximum number of distinct `Buf`s to hold in a list before requiring\n/// a flush. Only affects when the buffer strategy is to queue buffers.\n///\n/// Note that a flush can happen before reaching the maximum. This simply\n/// forces a flush if the queue gets this big.\nconst MAX_BUF_LIST_BUFFERS: usize = 16;\n\npub(crate) struct Buffered<T, B> {\n    flush_pipeline: bool,\n    io: T,\n    partial_len: Option<usize>,\n    read_blocked: bool,\n    read_buf: BytesMut,\n    read_buf_strategy: ReadStrategy,\n    write_buf: WriteBuf<B>,\n}\n\nimpl<T, B> fmt::Debug for Buffered<T, B>\nwhere\n    B: Buf,\n{\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        f.debug_struct(\"Buffered\")\n            .field(\"read_buf\", &self.read_buf)\n            .field(\"write_buf\", &self.write_buf)\n            .finish()\n    }\n}\n\nimpl<T, B> Buffered<T, B>\nwhere\n    T: AsyncRead + AsyncWrite + Unpin,\n    B: Buf,\n{\n    pub(crate) fn new(io: T) -> Buffered<T, B> {\n        let strategy = if io.is_write_vectored() {\n            WriteStrategy::Queue\n        } else {\n            WriteStrategy::Flatten\n        };\n        let write_buf = WriteBuf::new(strategy);\n        Buffered {\n            flush_pipeline: false,\n            io,\n            partial_len: None,\n            read_blocked: false,\n            read_buf: BytesMut::with_capacity(0),\n            read_buf_strategy: ReadStrategy::default(),\n            write_buf,\n        }\n    }\n\n    #[inline]\n    pub(crate) fn set_max_buf_size(&mut self, max: usize) {\n        assert!(\n            max >= MINIMUM_MAX_BUFFER_SIZE,\n            \"The max_buf_size cannot be smaller than {MINIMUM_MAX_BUFFER_SIZE}.\",\n        );\n        self.read_buf_strategy = ReadStrategy::with_max(max);\n        self.write_buf.max_buf_size = max;\n    }\n\n    #[inline]\n    pub(crate) fn set_read_buf_exact_size(&mut self, sz: usize) {\n        self.read_buf_strategy = ReadStrategy::Exact(sz);\n    }\n\n    #[inline]\n    pub(crate) fn set_write_strategy_flatten(&mut self) {\n        // this should always be called only at construction time,\n        // so this assert is here to catch myself\n        debug_assert!(self.write_buf.queue.bufs_cnt() == 0);\n        self.write_buf.set_strategy(WriteStrategy::Flatten);\n    }\n\n    #[inline]\n    pub(crate) fn set_write_strategy_queue(&mut self) {\n        // this should always be called only at construction time,\n        // so this assert is here to catch myself\n        debug_assert!(self.write_buf.queue.bufs_cnt() == 0);\n        self.write_buf.set_strategy(WriteStrategy::Queue);\n    }\n\n    #[inline]\n    pub(crate) fn read_buf(&self) -> &[u8] {\n        self.read_buf.as_ref()\n    }\n\n    /// Return the \"allocated\" available space, not the potential space\n    /// that could be allocated in the future.\n    #[inline]\n    fn read_buf_remaining_mut(&self) -> usize {\n        self.read_buf.capacity() - self.read_buf.len()\n    }\n\n    /// Return whether we can append to the headers buffer.\n    ///\n    /// Reasons we can't:\n    /// - The write buf is in queue mode, and some of the past body is still needing to be flushed.\n    #[inline]\n    pub(crate) fn can_headers_buf(&self) -> bool {\n        !self.write_buf.queue.has_remaining()\n    }\n\n    #[inline]\n    pub(crate) fn headers_buf(&mut self) -> &mut Vec<u8> {\n        let buf = self.write_buf.headers_mut();\n        &mut buf.bytes\n    }\n\n    #[inline]\n    pub(super) fn write_buf(&mut self) -> &mut WriteBuf<B> {\n        &mut self.write_buf\n    }\n\n    #[inline]\n    pub(crate) fn buffer<BB: Buf + Into<B>>(&mut self, buf: BB) {\n        self.write_buf.buffer(buf)\n    }\n\n    #[inline]\n    pub(crate) fn can_buffer(&self) -> bool {\n        self.flush_pipeline || self.write_buf.can_buffer()\n    }\n\n    pub(crate) fn consume_leading_lines(&mut self) {\n        if !self.read_buf.is_empty() {\n            let mut i = 0;\n            while i < self.read_buf.len() {\n                match self.read_buf[i] {\n                    b'\\r' | b'\\n' => i += 1,\n                    _ => break,\n                }\n            }\n            self.read_buf.advance(i);\n        }\n    }\n\n    pub(super) fn parse<S>(\n        &mut self,\n        cx: &mut Context<'_>,\n        parse_ctx: ParseContext<'_>,\n    ) -> Poll<Result<ParsedMessage<S::Incoming>>>\n    where\n        S: Http1Transaction,\n    {\n        loop {\n            match super::role::parse_headers::<S>(\n                &mut self.read_buf,\n                self.partial_len,\n                ParseContext {\n                    cached_headers: parse_ctx.cached_headers,\n                    req_method: parse_ctx.req_method,\n                    h1_parser_config: parse_ctx.h1_parser_config,\n                    h1_max_headers: parse_ctx.h1_max_headers,\n                    h09_responses: parse_ctx.h09_responses,\n                },\n            )? {\n                Some(msg) => {\n                    debug!(\"parsed {} headers\", msg.head.headers.len());\n                    self.partial_len = None;\n                    return Poll::Ready(Ok(msg));\n                }\n                None => {\n                    let max = self.read_buf_strategy.max();\n                    let curr_len = self.read_buf.len();\n                    if curr_len >= max {\n                        debug!(\"max_buf_size ({}) reached, closing\", max);\n                        return Poll::Ready(Err(Error::new_too_large()));\n                    }\n                    if curr_len > 0 {\n                        trace!(\"partial headers; {} bytes so far\", curr_len);\n                        self.partial_len = Some(curr_len);\n                    } else {\n                        // 1xx gobled some bytes\n                        self.partial_len = None;\n                    }\n                }\n            }\n            if ready!(self.poll_read_from_io(cx)).map_err(Error::new_io)? == 0 {\n                trace!(\"parse eof\");\n                return Poll::Ready(Err(Error::new_incomplete()));\n            }\n        }\n    }\n\n    pub(crate) fn poll_read_from_io(&mut self, cx: &mut Context<'_>) -> Poll<io::Result<usize>> {\n        self.read_blocked = false;\n        let next = self.read_buf_strategy.next();\n        if self.read_buf_remaining_mut() < next {\n            self.read_buf.reserve(next);\n        }\n\n        match tokio_util::io::poll_read_buf(Pin::new(&mut self.io), cx, &mut self.read_buf) {\n            Poll::Ready(Ok(n)) => {\n                trace!(\"received {} bytes\", n);\n                self.read_buf_strategy.record(n);\n                Poll::Ready(Ok(n))\n            }\n            Poll::Pending => {\n                self.read_blocked = true;\n                Poll::Pending\n            }\n            Poll::Ready(Err(e)) => Poll::Ready(Err(e)),\n        }\n    }\n\n    #[inline]\n    pub(crate) fn into_inner(self) -> (T, Bytes) {\n        (self.io, self.read_buf.freeze())\n    }\n\n    #[inline]\n    pub(crate) fn io_mut(&mut self) -> &mut T {\n        &mut self.io\n    }\n\n    #[inline]\n    pub(crate) fn is_read_blocked(&self) -> bool {\n        self.read_blocked\n    }\n\n    pub(crate) fn poll_flush(&mut self, cx: &mut Context<'_>) -> Poll<io::Result<()>> {\n        if self.flush_pipeline && !self.read_buf.is_empty() {\n            Poll::Ready(Ok(()))\n        } else if self.write_buf.remaining() == 0 {\n            Pin::new(&mut self.io).poll_flush(cx)\n        } else {\n            if let WriteStrategy::Flatten = self.write_buf.strategy {\n                return self.poll_flush_flattened(cx);\n            }\n\n            loop {\n                // Let Tokio pick the write path.\n                // With `tokio-btls` this currently falls back to plain writes;\n                // if we later support vectored TLS writes like `tokio-rustls`,\n                // `poll_write_buf` will pick that up automatically.\n                let n = ready!(tokio_util::io::poll_write_buf(\n                    Pin::new(&mut self.io),\n                    cx,\n                    &mut self.write_buf,\n                )?);\n                debug!(\"flushed {} bytes\", n);\n                if self.write_buf.remaining() == 0 {\n                    break;\n                } else if n == 0 {\n                    trace!(\n                        \"write returned zero, but {} bytes remaining\",\n                        self.write_buf.remaining()\n                    );\n                    return Poll::Ready(Err(io::ErrorKind::WriteZero.into()));\n                }\n            }\n            Pin::new(&mut self.io).poll_flush(cx)\n        }\n    }\n\n    /// Specialized version of `flush` when strategy is Flatten.\n    ///\n    /// Since all buffered bytes are flattened into the single headers buffer,\n    /// that skips some bookkeeping around using multiple buffers.\n    fn poll_flush_flattened(&mut self, cx: &mut Context<'_>) -> Poll<io::Result<()>> {\n        loop {\n            let n = ready!(Pin::new(&mut self.io).poll_write(cx, self.write_buf.headers.chunk()))?;\n            debug!(\"flushed {} bytes\", n);\n            self.write_buf.headers.advance(n);\n            if self.write_buf.headers.remaining() == 0 {\n                self.write_buf.headers.reset();\n                break;\n            } else if n == 0 {\n                trace!(\n                    \"write returned zero, but {} bytes remaining\",\n                    self.write_buf.remaining()\n                );\n                return Poll::Ready(Err(io::ErrorKind::WriteZero.into()));\n            }\n        }\n        Pin::new(&mut self.io).poll_flush(cx)\n    }\n}\n\n// The `B` is a `Buf`, we never project a pin to it\nimpl<T: Unpin, B> Unpin for Buffered<T, B> {}\n\n// TODO: This trait is old... at least rename to PollBytes or something...\npub(crate) trait MemRead {\n    fn read_mem(&mut self, cx: &mut Context<'_>, len: usize) -> Poll<io::Result<Bytes>>;\n}\n\nimpl<T, B> MemRead for Buffered<T, B>\nwhere\n    T: AsyncRead + AsyncWrite + Unpin,\n    B: Buf,\n{\n    fn read_mem(&mut self, cx: &mut Context<'_>, len: usize) -> Poll<io::Result<Bytes>> {\n        if !self.read_buf.is_empty() {\n            let n = std::cmp::min(len, self.read_buf.len());\n            Poll::Ready(Ok(self.read_buf.split_to(n).freeze()))\n        } else {\n            let n = ready!(self.poll_read_from_io(cx))?;\n            Poll::Ready(Ok(self.read_buf.split_to(::std::cmp::min(len, n)).freeze()))\n        }\n    }\n}\n\n#[derive(Clone, Copy, Debug)]\nenum ReadStrategy {\n    Adaptive {\n        decrease_now: bool,\n        next: usize,\n        max: usize,\n    },\n    Exact(usize),\n}\n\nimpl ReadStrategy {\n    fn with_max(max: usize) -> ReadStrategy {\n        ReadStrategy::Adaptive {\n            decrease_now: false,\n            next: INIT_BUFFER_SIZE,\n            max,\n        }\n    }\n\n    fn next(&self) -> usize {\n        match *self {\n            ReadStrategy::Adaptive { next, .. } => next,\n            ReadStrategy::Exact(exact) => exact,\n        }\n    }\n\n    fn max(&self) -> usize {\n        match *self {\n            ReadStrategy::Adaptive { max, .. } => max,\n            ReadStrategy::Exact(exact) => exact,\n        }\n    }\n\n    fn record(&mut self, bytes_read: usize) {\n        match *self {\n            ReadStrategy::Adaptive {\n                ref mut decrease_now,\n                ref mut next,\n                max,\n                ..\n            } => {\n                if bytes_read >= *next {\n                    *next = cmp::min(incr_power_of_two(*next), max);\n                    *decrease_now = false;\n                } else {\n                    let decr_to = prev_power_of_two(*next);\n                    if bytes_read < decr_to {\n                        if *decrease_now {\n                            *next = cmp::max(decr_to, INIT_BUFFER_SIZE);\n                            *decrease_now = false;\n                        } else {\n                            // Decreasing is a two \"record\" process.\n                            *decrease_now = true;\n                        }\n                    } else {\n                        // A read within the current range should cancel\n                        // a potential decrease, since we just saw proof\n                        // that we still need this size.\n                        *decrease_now = false;\n                    }\n                }\n            }\n            ReadStrategy::Exact(_) => (),\n        }\n    }\n}\n\nfn incr_power_of_two(n: usize) -> usize {\n    n.saturating_mul(2)\n}\n\nfn prev_power_of_two(n: usize) -> usize {\n    // Only way this shift can underflow is if n is less than 4.\n    // (Which would means `usize::MAX >> 64` and underflowed!)\n    debug_assert!(n >= 4);\n    (usize::MAX >> (n.leading_zeros() + 2)) + 1\n}\n\nimpl Default for ReadStrategy {\n    fn default() -> ReadStrategy {\n        ReadStrategy::with_max(DEFAULT_MAX_BUFFER_SIZE)\n    }\n}\n\n#[derive(Clone)]\npub(crate) struct Cursor<T> {\n    bytes: T,\n    pos: usize,\n}\n\nimpl<T: AsRef<[u8]>> Cursor<T> {\n    #[inline]\n    pub(crate) fn new(bytes: T) -> Cursor<T> {\n        Cursor { bytes, pos: 0 }\n    }\n}\n\nimpl Cursor<Vec<u8>> {\n    /// If we've advanced the position a bit in this cursor, and wish to\n    /// extend the underlying vector, we may wish to unshift the \"read\" bytes\n    /// off, and move everything else over.\n    fn maybe_unshift(&mut self, additional: usize) {\n        if self.pos == 0 {\n            // nothing to do\n            return;\n        }\n\n        if self.bytes.capacity() - self.bytes.len() >= additional {\n            // there's room!\n            return;\n        }\n\n        self.bytes.drain(0..self.pos);\n        self.pos = 0;\n    }\n\n    fn reset(&mut self) {\n        self.pos = 0;\n        self.bytes.clear();\n    }\n}\n\nimpl<T: AsRef<[u8]>> fmt::Debug for Cursor<T> {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        f.debug_struct(\"Cursor\")\n            .field(\"pos\", &self.pos)\n            .field(\"len\", &self.bytes.as_ref().len())\n            .finish()\n    }\n}\n\nimpl<T: AsRef<[u8]>> Buf for Cursor<T> {\n    #[inline]\n    fn remaining(&self) -> usize {\n        self.bytes.as_ref().len() - self.pos\n    }\n\n    #[inline]\n    fn chunk(&self) -> &[u8] {\n        &self.bytes.as_ref()[self.pos..]\n    }\n\n    #[inline]\n    fn advance(&mut self, cnt: usize) {\n        debug_assert!(self.pos + cnt <= self.bytes.as_ref().len());\n        self.pos += cnt;\n    }\n}\n\n// an internal buffer to collect writes before flushes\npub(super) struct WriteBuf<B> {\n    /// Re-usable buffer that holds message headers\n    headers: Cursor<Vec<u8>>,\n    max_buf_size: usize,\n    /// Deque of user buffers if strategy is Queue\n    queue: BufList<B>,\n    strategy: WriteStrategy,\n}\n\nimpl<B: Buf> WriteBuf<B> {\n    fn new(strategy: WriteStrategy) -> WriteBuf<B> {\n        WriteBuf {\n            headers: Cursor::new(Vec::with_capacity(INIT_BUFFER_SIZE)),\n            max_buf_size: DEFAULT_MAX_BUFFER_SIZE,\n            queue: BufList::new(),\n            strategy,\n        }\n    }\n}\n\nimpl<B> WriteBuf<B>\nwhere\n    B: Buf,\n{\n    #[inline]\n    fn set_strategy(&mut self, strategy: WriteStrategy) {\n        self.strategy = strategy;\n    }\n\n    pub(super) fn buffer<BB: Buf + Into<B>>(&mut self, mut buf: BB) {\n        debug_assert!(buf.has_remaining());\n        match self.strategy {\n            WriteStrategy::Flatten => {\n                let head = self.headers_mut();\n\n                head.maybe_unshift(buf.remaining());\n                trace!(\n                    self.len = head.remaining(),\n                    buf.len = buf.remaining(),\n                    \"buffer.flatten\"\n                );\n                //perf: This is a little faster than <Vec as BufMut>>::put,\n                //but accomplishes the same result.\n                loop {\n                    let adv = {\n                        let slice = buf.chunk();\n                        if slice.is_empty() {\n                            return;\n                        }\n                        head.bytes.extend_from_slice(slice);\n                        slice.len()\n                    };\n                    buf.advance(adv);\n                }\n            }\n            WriteStrategy::Queue => {\n                trace!(\n                    self.len = self.remaining(),\n                    buf.len = buf.remaining(),\n                    \"buffer.queue\"\n                );\n                self.queue.push(buf.into());\n            }\n        }\n    }\n\n    #[inline]\n    fn can_buffer(&self) -> bool {\n        match self.strategy {\n            WriteStrategy::Flatten => self.remaining() < self.max_buf_size,\n            WriteStrategy::Queue => {\n                self.queue.bufs_cnt() < MAX_BUF_LIST_BUFFERS && self.remaining() < self.max_buf_size\n            }\n        }\n    }\n\n    #[inline]\n    fn headers_mut(&mut self) -> &mut Cursor<Vec<u8>> {\n        debug_assert!(!self.queue.has_remaining());\n        &mut self.headers\n    }\n}\n\nimpl<B: Buf> fmt::Debug for WriteBuf<B> {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        f.debug_struct(\"WriteBuf\")\n            .field(\"remaining\", &self.remaining())\n            .field(\"strategy\", &self.strategy)\n            .finish()\n    }\n}\n\nimpl<B: Buf> Buf for WriteBuf<B> {\n    #[inline]\n    fn remaining(&self) -> usize {\n        self.headers.remaining() + self.queue.remaining()\n    }\n\n    #[inline]\n    fn chunk(&self) -> &[u8] {\n        let headers = self.headers.chunk();\n        if !headers.is_empty() {\n            headers\n        } else {\n            self.queue.chunk()\n        }\n    }\n\n    #[inline]\n    fn advance(&mut self, cnt: usize) {\n        let hrem = self.headers.remaining();\n\n        match hrem.cmp(&cnt) {\n            cmp::Ordering::Equal => self.headers.reset(),\n            cmp::Ordering::Greater => self.headers.advance(cnt),\n            cmp::Ordering::Less => {\n                let qcnt = cnt - hrem;\n                self.headers.reset();\n                self.queue.advance(qcnt);\n            }\n        }\n    }\n\n    #[inline]\n    fn chunks_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize {\n        let n = self.headers.chunks_vectored(dst);\n        self.queue.chunks_vectored(&mut dst[n..]) + n\n    }\n}\n\n#[derive(Debug)]\nenum WriteStrategy {\n    Flatten,\n    Queue,\n}\n\n#[cfg(test)]\nmod tests {\n    use std::time::Duration;\n\n    use tokio_test::io::Builder as Mock;\n\n    use super::*;\n\n    impl<T, B> Buffered<T, B>\n    where\n        T: AsyncRead + AsyncWrite + Unpin,\n        B: Buf,\n    {\n        fn flush(&mut self) -> impl std::future::Future<Output = io::Result<()>> + '_ {\n            std::future::poll_fn(move |cx| self.poll_flush(cx))\n        }\n    }\n\n    #[tokio::test]\n    async fn parse_reads_until_blocked() {\n        use crate::client::core::proto::http1;\n\n        let _ = pretty_env_logger::try_init();\n        let mock = Mock::new()\n            // Split over multiple reads will read all of it\n            .read(b\"HTTP/1.1 200 OK\\r\\n\")\n            .read(b\"Server: crate::core:\\r\\n\")\n            // missing last line ending\n            .wait(Duration::from_secs(1))\n            .build();\n\n        let mut buffered = Buffered::<_, Cursor<Vec<u8>>>::new(mock);\n\n        // We expect a `parse` to be not ready, and so can't await it directly.\n        // Rather, this `poll_fn` will wrap the `Poll` result.\n        std::future::poll_fn(|cx| {\n            let parse_ctx = ParseContext {\n                cached_headers: &mut None,\n                req_method: &mut None,\n                h1_parser_config: &Default::default(),\n                h1_max_headers: None,\n                h09_responses: false,\n            };\n            assert!(\n                buffered\n                    .parse::<http1::role::Client>(cx, parse_ctx)\n                    .is_pending()\n            );\n            Poll::Ready(())\n        })\n        .await;\n\n        assert_eq!(\n            buffered.read_buf,\n            b\"HTTP/1.1 200 OK\\r\\nServer: crate::core:\\r\\n\"[..]\n        );\n    }\n\n    #[test]\n    fn read_strategy_adaptive_increments() {\n        let mut strategy = ReadStrategy::default();\n        assert_eq!(strategy.next(), 8192);\n\n        // Grows if record == next\n        strategy.record(8192);\n        assert_eq!(strategy.next(), 16384);\n\n        strategy.record(16384);\n        assert_eq!(strategy.next(), 32768);\n\n        // Enormous records still increment at same rate\n        strategy.record(usize::MAX);\n        assert_eq!(strategy.next(), 65536);\n\n        let max = strategy.max();\n        while strategy.next() < max {\n            strategy.record(max);\n        }\n\n        assert_eq!(strategy.next(), max, \"never goes over max\");\n        strategy.record(max + 1);\n        assert_eq!(strategy.next(), max, \"never goes over max\");\n    }\n\n    #[test]\n    fn read_strategy_adaptive_decrements() {\n        let mut strategy = ReadStrategy::default();\n        strategy.record(8192);\n        assert_eq!(strategy.next(), 16384);\n\n        strategy.record(1);\n        assert_eq!(\n            strategy.next(),\n            16384,\n            \"first smaller record doesn't decrement yet\"\n        );\n        strategy.record(8192);\n        assert_eq!(strategy.next(), 16384, \"record was with range\");\n\n        strategy.record(1);\n        assert_eq!(\n            strategy.next(),\n            16384,\n            \"in-range record should make this the 'first' again\"\n        );\n\n        strategy.record(1);\n        assert_eq!(strategy.next(), 8192, \"second smaller record decrements\");\n\n        strategy.record(1);\n        assert_eq!(strategy.next(), 8192, \"first doesn't decrement\");\n        strategy.record(1);\n        assert_eq!(strategy.next(), 8192, \"doesn't decrement under minimum\");\n    }\n\n    #[test]\n    fn read_strategy_adaptive_stays_the_same() {\n        let mut strategy = ReadStrategy::default();\n        strategy.record(8192);\n        assert_eq!(strategy.next(), 16384);\n\n        strategy.record(8193);\n        assert_eq!(\n            strategy.next(),\n            16384,\n            \"first smaller record doesn't decrement yet\"\n        );\n\n        strategy.record(8193);\n        assert_eq!(\n            strategy.next(),\n            16384,\n            \"with current step does not decrement\"\n        );\n    }\n\n    #[test]\n    fn read_strategy_adaptive_max_fuzz() {\n        fn fuzz(max: usize) {\n            let mut strategy = ReadStrategy::with_max(max);\n            while strategy.next() < max {\n                strategy.record(usize::MAX);\n            }\n            let mut next = strategy.next();\n            while next > 8192 {\n                strategy.record(1);\n                strategy.record(1);\n                next = strategy.next();\n                assert!(\n                    next.is_power_of_two(),\n                    \"decrement should be powers of two: {next} (max = {max})\",\n                );\n            }\n        }\n\n        let mut max = 8192;\n        while max < usize::MAX {\n            fuzz(max);\n            max = (max / 2).saturating_mul(3);\n        }\n        fuzz(usize::MAX);\n    }\n\n    #[test]\n    #[should_panic]\n    #[cfg(debug_assertions)] // needs to trigger a debug_assert\n    fn write_buf_requires_non_empty_bufs() {\n        let mock = Mock::new().build();\n        let mut buffered = Buffered::<_, Cursor<Vec<u8>>>::new(mock);\n\n        buffered.buffer(Cursor::new(Vec::new()));\n    }\n\n    #[tokio::test]\n    async fn write_buf_flatten() {\n        let _ = pretty_env_logger::try_init();\n\n        let mock = Mock::new()\n            .write(b\"hello world, it's crate::core:!\")\n            .build();\n\n        let mut buffered = Buffered::<_, Cursor<Vec<u8>>>::new(mock);\n        buffered.write_buf.set_strategy(WriteStrategy::Flatten);\n\n        buffered.headers_buf().extend(b\"hello \");\n        buffered.buffer(Cursor::new(b\"world, \".to_vec()));\n        buffered.buffer(Cursor::new(b\"it's \".to_vec()));\n        buffered.buffer(Cursor::new(b\"crate::core:!\".to_vec()));\n        assert_eq!(buffered.write_buf.queue.bufs_cnt(), 0);\n\n        buffered.flush().await.expect(\"flush\");\n    }\n\n    #[test]\n    fn write_buf_flatten_partially_flushed() {\n        let _ = pretty_env_logger::try_init();\n\n        let b = |s: &str| Cursor::new(s.as_bytes().to_vec());\n\n        let mut write_buf = WriteBuf::<Cursor<Vec<u8>>>::new(WriteStrategy::Flatten);\n\n        write_buf.buffer(b(\"hello \"));\n        write_buf.buffer(b(\"world, \"));\n\n        assert_eq!(write_buf.chunk(), b\"hello world, \");\n\n        // advance most of the way, but not all\n        write_buf.advance(11);\n\n        assert_eq!(write_buf.chunk(), b\", \");\n        assert_eq!(write_buf.headers.pos, 11);\n        assert_eq!(write_buf.headers.bytes.capacity(), INIT_BUFFER_SIZE);\n\n        // there's still room in the headers buffer, so just push on the end\n        write_buf.buffer(b(\"it's crate::core:!\"));\n\n        assert_eq!(write_buf.chunk(), b\", it's crate::core:!\");\n        assert_eq!(write_buf.headers.pos, 11);\n\n        let rem1 = write_buf.remaining();\n        let cap = write_buf.headers.bytes.capacity();\n\n        // but when this would go over capacity, don't copy the old bytes\n        write_buf.buffer(Cursor::new(vec![b'X'; cap]));\n        assert_eq!(write_buf.remaining(), cap + rem1);\n        assert_eq!(write_buf.headers.pos, 0);\n    }\n\n    #[tokio::test]\n    async fn write_buf_queue_disable_auto() {\n        let _ = pretty_env_logger::try_init();\n\n        let mock = Mock::new()\n            .write(b\"hello \")\n            .write(b\"world, \")\n            .write(b\"it's \")\n            .write(b\"crate::core:!\")\n            .build();\n\n        let mut buffered = Buffered::<_, Cursor<Vec<u8>>>::new(mock);\n        buffered.write_buf.set_strategy(WriteStrategy::Queue);\n\n        // we have 4 buffers, and vec IO disabled, but explicitly said\n        // don't try to auto detect (via setting strategy above)\n\n        buffered.headers_buf().extend(b\"hello \");\n        buffered.buffer(Cursor::new(b\"world, \".to_vec()));\n        buffered.buffer(Cursor::new(b\"it's \".to_vec()));\n        buffered.buffer(Cursor::new(b\"crate::core:!\".to_vec()));\n        assert_eq!(buffered.write_buf.queue.bufs_cnt(), 3);\n\n        buffered.flush().await.expect(\"flush\");\n\n        assert_eq!(buffered.write_buf.queue.bufs_cnt(), 0);\n    }\n\n    // #[cfg(feature = \"nightly\")]\n    // #[bench]\n    // fn bench_write_buf_flatten_buffer_chunk(b: &mut Bencher) {\n    //     let s = \"Hello, World!\";\n    //     b.bytes = s.len() as u64;\n\n    //     let mut write_buf = WriteBuf::<bytes::Bytes>::new();\n    //     write_buf.set_strategy(WriteStrategy::Flatten);\n    //     b.iter(|| {\n    //         let chunk = bytes::Bytes::from(s);\n    //         write_buf.buffer(chunk);\n    //         ::test::black_box(&write_buf);\n    //         write_buf.headers.bytes.clear();\n    //     })\n    // }\n}\n"
  },
  {
    "path": "src/client/core/proto/http1/role.rs",
    "content": "use std::{\n    fmt::{self, Write as _},\n    mem::MaybeUninit,\n};\n\nuse bytes::{Bytes, BytesMut};\nuse http::{\n    Method, StatusCode, Version,\n    header::{self, Entry, HeaderMap, HeaderName, HeaderValue},\n};\nuse smallvec::{SmallVec, smallvec, smallvec_inline};\n\nuse super::{Encode, Encoder, Http1Transaction, ParseContext, ParsedMessage, ext::ReasonPhrase};\nuse crate::{\n    client::core::{\n        Error, Result,\n        body::DecodedLength,\n        error::Parse,\n        proto::{BodyLength, MessageHead, RequestHead, RequestLine, headers},\n    },\n    config::RequestConfig,\n    header::OrigHeaderMap,\n};\n\n/// totally scientific\nconst AVERAGE_HEADER_SIZE: usize = 30;\npub(crate) const DEFAULT_MAX_HEADERS: usize = 100;\n\nmacro_rules! header_name {\n    ($bytes:expr) => {{\n        {\n            match HeaderName::from_bytes($bytes) {\n                Ok(name) => name,\n                Err(e) => maybe_panic!(e),\n            }\n        }\n    }};\n}\n\nmacro_rules! header_value {\n    ($bytes:expr) => {{\n        {\n            #[allow(unsafe_code)]\n            unsafe {\n                HeaderValue::from_maybe_shared_unchecked($bytes)\n            }\n        }\n    }};\n}\n\nmacro_rules! maybe_panic {\n    ($($arg:tt)*) => ({\n        let _err = ($($arg)*);\n        if cfg!(debug_assertions) {\n            panic!(\"{:?}\", _err);\n        } else {\n            error!(\"Internal core error, please report {:?}\", _err);\n            return Err(Parse::Internal)\n        }\n    })\n}\n\npub(super) fn parse_headers<T>(\n    bytes: &mut BytesMut,\n    prev_len: Option<usize>,\n    ctx: ParseContext<'_>,\n) -> Result<Option<ParsedMessage<T::Incoming>>, Parse>\nwhere\n    T: Http1Transaction,\n{\n    // If the buffer is empty, don't bother entering the span, it's just noise.\n    if bytes.is_empty() {\n        return Ok(None);\n    }\n\n    trace_span!(\"parse_headers\");\n\n    if let Some(prev_len) = prev_len {\n        if !is_complete_fast(bytes, prev_len) {\n            return Ok(None);\n        }\n    }\n\n    T::parse(bytes, ctx)\n}\n\n/// A fast scan for the end of a message.\n/// Used when there was a partial read, to skip full parsing on a\n/// a slow connection.\nfn is_complete_fast(bytes: &[u8], prev_len: usize) -> bool {\n    let start = prev_len.saturating_sub(3);\n    let bytes = &bytes[start..];\n\n    for (i, b) in bytes.iter().copied().enumerate() {\n        if b == b'\\r' {\n            if bytes[i + 1..].chunks(3).next() == Some(&b\"\\n\\r\\n\"[..]) {\n                return true;\n            }\n        } else if b == b'\\n' && bytes.get(i + 1) == Some(&b'\\n') {\n            return true;\n        }\n    }\n\n    false\n}\n\npub(crate) enum Client {}\n\nimpl Http1Transaction for Client {\n    type Incoming = StatusCode;\n\n    type Outgoing = RequestLine;\n\n    #[cfg(feature = \"tracing\")]\n    const LOG: &'static str = \"{role=client}\";\n\n    fn parse(\n        buf: &mut BytesMut,\n        ctx: ParseContext<'_>,\n    ) -> Result<Option<ParsedMessage<Self::Incoming>>, Parse> {\n        debug_assert!(!buf.is_empty(), \"parse called with empty buf\");\n\n        // Loop to skip information status code headers (100 Continue, etc).\n        loop {\n            let mut headers_indices: SmallVec<[MaybeUninit<HeaderIndices>; DEFAULT_MAX_HEADERS]> =\n                match ctx.h1_max_headers {\n                    Some(cap) => smallvec![MaybeUninit::uninit(); cap],\n                    None => smallvec_inline![MaybeUninit::uninit(); DEFAULT_MAX_HEADERS],\n                };\n\n            let (len, status, reason, version, headers_len) = {\n                let mut headers: SmallVec<\n                    [MaybeUninit<httparse::Header<'_>>; DEFAULT_MAX_HEADERS],\n                > = match ctx.h1_max_headers {\n                    Some(cap) => smallvec![MaybeUninit::uninit(); cap],\n                    None => smallvec_inline![MaybeUninit::uninit(); DEFAULT_MAX_HEADERS],\n                };\n\n                trace!(bytes = buf.len(), \"Response.parse\");\n\n                let mut res = httparse::Response::new(&mut []);\n                let bytes = buf.as_ref();\n                match ctx.h1_parser_config.parse_response_with_uninit_headers(\n                    &mut res,\n                    bytes,\n                    &mut headers,\n                ) {\n                    Ok(httparse::Status::Complete(len)) => {\n                        trace!(\"Response.parse Complete({})\", len);\n                        let status = StatusCode::from_u16(res.code.unwrap())?;\n\n                        let reason = {\n                            let reason = res.reason.unwrap();\n                            // Only save the reason phrase if it isn't the canonical reason\n                            if Some(reason) != status.canonical_reason() {\n                                Some(Bytes::copy_from_slice(reason.as_bytes()))\n                            } else {\n                                None\n                            }\n                        };\n\n                        let version = if res.version.unwrap() == 1 {\n                            Version::HTTP_11\n                        } else {\n                            Version::HTTP_10\n                        };\n                        record_header_indices(bytes, res.headers, &mut headers_indices)?;\n                        let headers_len = res.headers.len();\n                        (len, status, reason, version, headers_len)\n                    }\n                    Ok(httparse::Status::Partial) => return Ok(None),\n                    Err(httparse::Error::Version) if ctx.h09_responses => {\n                        trace!(\"Response.parse accepted HTTP/0.9 response\");\n\n                        (0, StatusCode::OK, None, Version::HTTP_09, 0)\n                    }\n                    Err(e) => return Err(e.into()),\n                }\n            };\n\n            let mut slice = buf.split_to(len);\n\n            if ctx\n                .h1_parser_config\n                .obsolete_multiline_headers_in_responses_are_allowed()\n            {\n                for header in &mut headers_indices[..headers_len] {\n                    // SAFETY: array is valid up to `headers_len`\n                    #[allow(unsafe_code)]\n                    let header = unsafe { header.assume_init_mut() };\n                    Client::obs_fold_line(&mut slice, header);\n                }\n            }\n\n            let slice = slice.freeze();\n\n            let mut headers = ctx.cached_headers.take().unwrap_or_default();\n\n            let mut keep_alive = version == Version::HTTP_11;\n\n            headers.reserve(headers_len);\n            for header in &headers_indices[..headers_len] {\n                // SAFETY: array is valid up to `headers_len`\n                #[allow(unsafe_code)]\n                let header = unsafe { header.assume_init_ref() };\n                let name = header_name!(&slice[header.name.0..header.name.1]);\n                let value = header_value!(slice.slice(header.value.0..header.value.1));\n\n                if let header::CONNECTION = name {\n                    // keep_alive was previously set to default for Version\n                    if keep_alive {\n                        // HTTP/1.1\n                        keep_alive = !headers::connection_close(&value);\n                    } else {\n                        // HTTP/1.0\n                        keep_alive = headers::connection_keep_alive(&value);\n                    }\n                }\n\n                headers.append(name, value);\n            }\n\n            let mut extensions = http::Extensions::default();\n\n            if let Some(reason) = reason {\n                // Safety: httparse ensures that only valid reason phrase bytes are present in this\n                // field.\n                let reason = ReasonPhrase::from_bytes_unchecked(reason);\n                extensions.insert(reason);\n            }\n\n            let head = MessageHead {\n                version,\n                subject: status,\n                headers,\n                extensions,\n            };\n            if let Some((decode, is_upgrade)) = Client::decoder(&head, ctx.req_method)? {\n                return Ok(Some(ParsedMessage {\n                    head,\n                    decode,\n                    expect_continue: false,\n                    // a client upgrade means the connection can't be used\n                    // again, as it is definitely upgrading.\n                    keep_alive: keep_alive && !is_upgrade,\n                    wants_upgrade: is_upgrade,\n                }));\n            }\n\n            // Parsing a 1xx response could have consumed the buffer, check if\n            // it is empty now...\n            if buf.is_empty() {\n                return Ok(None);\n            }\n        }\n    }\n\n    fn encode(msg: Encode<'_, Self::Outgoing>, dst: &mut Vec<u8>) -> Result<Encoder> {\n        trace!(\n            \"Client::encode method={:?}, body={:?}\",\n            msg.head.subject.0, msg.body\n        );\n\n        *msg.req_method = Some(msg.head.subject.0.clone());\n\n        let body = Client::set_length(msg.head, msg.body);\n\n        let init_cap = 30 + msg.head.headers.len() * AVERAGE_HEADER_SIZE;\n        dst.reserve(init_cap);\n\n        extend(dst, msg.head.subject.0.as_str().as_bytes());\n        extend(dst, b\" \");\n        //TODO: add API to http::Uri to encode without std::fmt\n        let _ = write!(FastWrite(dst), \"{} \", msg.head.subject.1);\n\n        match msg.head.version {\n            Version::HTTP_10 => extend(dst, b\"HTTP/1.0\"),\n            Version::HTTP_11 => extend(dst, b\"HTTP/1.1\"),\n            Version::HTTP_2 => {\n                debug!(\"request with HTTP2 version coerced to HTTP/1.1\");\n                extend(dst, b\"HTTP/1.1\");\n            }\n            other => panic!(\"unexpected request version: {other:?}\"),\n        }\n        extend(dst, b\"\\r\\n\");\n\n        if let Some(orig_headers) = RequestConfig::<OrigHeaderMap>::get(&msg.head.extensions) {\n            write_headers_original_case(&mut msg.head.headers, orig_headers, dst);\n        } else {\n            write_headers(&msg.head.headers, dst);\n        }\n\n        extend(dst, b\"\\r\\n\");\n        msg.head.headers.clear(); //TODO: remove when switching to drain()\n\n        Ok(body)\n    }\n\n    fn on_error(_err: &Error) -> Option<MessageHead<Self::Outgoing>> {\n        // we can't tell the server about any errors it creates\n        None\n    }\n}\n\nimpl Client {\n    /// Returns Some(length, wants_upgrade) if successful.\n    ///\n    /// Returns None if this message head should be skipped (like a 100 status).\n    fn decoder(\n        inc: &MessageHead<StatusCode>,\n        method: &mut Option<Method>,\n    ) -> Result<Option<(DecodedLength, bool)>, Parse> {\n        // According to https://tools.ietf.org/html/rfc7230#section-3.3.3\n        // 1. HEAD responses, and Status 1xx, 204, and 304 cannot have a body.\n        // 2. Status 2xx to a CONNECT cannot have a body.\n        // 3. Transfer-Encoding: chunked has a chunked body.\n        // 4. If multiple differing Content-Length headers or invalid, close connection.\n        // 5. Content-Length header has a sized body.\n        // 6. (irrelevant to Response)\n        // 7. Read till EOF.\n\n        match inc.subject.as_u16() {\n            101 => {\n                return Ok(Some((DecodedLength::ZERO, true)));\n            }\n            100 | 102..=199 => {\n                trace!(\"ignoring informational response: {}\", inc.subject.as_u16());\n                return Ok(None);\n            }\n            204 | 304 => return Ok(Some((DecodedLength::ZERO, false))),\n            _ => (),\n        }\n        match *method {\n            Some(Method::HEAD) => {\n                return Ok(Some((DecodedLength::ZERO, false)));\n            }\n            Some(Method::CONNECT) => {\n                if let 200..=299 = inc.subject.as_u16() {\n                    return Ok(Some((DecodedLength::ZERO, true)));\n                }\n            }\n            Some(_) => {}\n            None => {\n                trace!(\"Client::decoder is missing the Method\");\n            }\n        }\n\n        if inc.headers.contains_key(header::TRANSFER_ENCODING) {\n            // https://tools.ietf.org/html/rfc7230#section-3.3.3\n            // If Transfer-Encoding header is present, and 'chunked' is\n            // not the final encoding, and this is a Request, then it is\n            // malformed. A server should respond with 400 Bad Request.\n            return if inc.version == Version::HTTP_10 {\n                debug!(\"HTTP/1.0 cannot have Transfer-Encoding header\");\n                Err(Parse::transfer_encoding_unexpected())\n            } else if headers::transfer_encoding_is_chunked(&inc.headers) {\n                Ok(Some((DecodedLength::CHUNKED, false)))\n            } else {\n                trace!(\"not chunked, read till eof\");\n                Ok(Some((DecodedLength::CLOSE_DELIMITED, false)))\n            };\n        }\n\n        if let Some(len) = headers::content_length_parse_all(&inc.headers) {\n            return Ok(Some((DecodedLength::checked_new(len)?, false)));\n        }\n\n        if inc.headers.contains_key(header::CONTENT_LENGTH) {\n            debug!(\"illegal Content-Length header\");\n            return Err(Parse::content_length_invalid());\n        }\n\n        trace!(\"neither Transfer-Encoding nor Content-Length\");\n        Ok(Some((DecodedLength::CLOSE_DELIMITED, false)))\n    }\n\n    fn set_length(head: &mut RequestHead, body: Option<BodyLength>) -> Encoder {\n        let body = if let Some(body) = body {\n            body\n        } else {\n            head.headers.remove(header::TRANSFER_ENCODING);\n            return Encoder::length(0);\n        };\n\n        // HTTP/1.0 doesn't know about chunked\n        let can_chunked = head.version == Version::HTTP_11;\n        let headers = &mut head.headers;\n\n        // If the user already set specific headers, we should respect them, regardless\n        // of what the Body knows about itself. They set them for a reason.\n\n        // Because of the borrow checker, we can't check the for an existing\n        // Content-Length header while holding an `Entry` for the Transfer-Encoding\n        // header, so unfortunately, we must do the check here, first.\n\n        let existing_con_len = headers::content_length_parse_all(headers);\n        let mut should_remove_con_len = false;\n\n        if !can_chunked {\n            // Chunked isn't legal, so if it is set, we need to remove it.\n            if headers.remove(header::TRANSFER_ENCODING).is_some() {\n                trace!(\"removing illegal transfer-encoding header\");\n            }\n\n            return if let Some(len) = existing_con_len {\n                Encoder::length(len)\n            } else if let BodyLength::Known(len) = body {\n                set_content_length(headers, len)\n            } else {\n                // HTTP/1.0 client requests without a content-length\n                // cannot have any body at all.\n                Encoder::length(0)\n            };\n        }\n\n        // If the user set a transfer-encoding, respect that. Let's just\n        // make sure `chunked` is the final encoding.\n        let encoder = match headers.entry(header::TRANSFER_ENCODING) {\n            Entry::Occupied(te) => {\n                should_remove_con_len = true;\n                if headers::is_chunked(te.iter()) {\n                    Some(Encoder::chunked())\n                } else {\n                    warn!(\"user provided transfer-encoding does not end in 'chunked'\");\n\n                    // There's a Transfer-Encoding, but it doesn't end in 'chunked'!\n                    // An example that could trigger this:\n                    //\n                    //     Transfer-Encoding: gzip\n                    //\n                    // This can be bad, depending on if this is a request or a\n                    // response.\n                    //\n                    // - A request is illegal if there is a `Transfer-Encoding` but it doesn't end\n                    //   in `chunked`.\n                    // - A response that has `Transfer-Encoding` but doesn't end in `chunked` isn't\n                    //   illegal, it just forces this to be close-delimited.\n                    //\n                    // We can try to repair this, by adding `chunked` ourselves.\n\n                    headers::add_chunked(te);\n                    Some(Encoder::chunked())\n                }\n            }\n            Entry::Vacant(te) => {\n                if let Some(len) = existing_con_len {\n                    Some(Encoder::length(len))\n                } else if let BodyLength::Unknown = body {\n                    // GET, HEAD, and CONNECT almost never have bodies.\n                    //\n                    // So instead of sending a \"chunked\" body with a 0-chunk,\n                    // assume no body here. If you *must* send a body,\n                    // set the headers explicitly.\n                    match head.subject.0 {\n                        Method::GET | Method::HEAD | Method::CONNECT => Some(Encoder::length(0)),\n                        _ => {\n                            te.insert(HeaderValue::from_static(\"chunked\"));\n                            Some(Encoder::chunked())\n                        }\n                    }\n                } else {\n                    None\n                }\n            }\n        };\n\n        let encoder = encoder.map(|enc| {\n            if enc.is_chunked() {\n                // Parse Trailer header values into HeaderNames.\n                // Each Trailer header value may contain comma-separated names.\n                // HeaderName normalizes to lowercase, enabling case-insensitive matching.\n                let allowed_trailer_fields: Vec<HeaderName> = headers\n                    .get_all(header::TRAILER)\n                    .iter()\n                    .filter_map(|hv| hv.to_str().ok())\n                    .flat_map(|s| s.split(','))\n                    .filter_map(|s| HeaderName::from_bytes(s.trim().as_bytes()).ok())\n                    .collect();\n\n                if !allowed_trailer_fields.is_empty() {\n                    return enc.into_chunked_with_trailing_fields(allowed_trailer_fields);\n                }\n            }\n\n            enc\n        });\n\n        // This is because we need a second mutable borrow to remove\n        // content-length header.\n        if let Some(encoder) = encoder {\n            if should_remove_con_len && existing_con_len.is_some() {\n                headers.remove(header::CONTENT_LENGTH);\n            }\n            return encoder;\n        }\n\n        // User didn't set transfer-encoding, AND we know body length,\n        // so we can just set the Content-Length automatically.\n\n        let len = if let BodyLength::Known(len) = body {\n            len\n        } else {\n            unreachable!(\"BodyLength::Unknown would set chunked\");\n        };\n\n        set_content_length(headers, len)\n    }\n\n    fn obs_fold_line(all: &mut [u8], idx: &mut HeaderIndices) {\n        // If the value has obs-folded text, then in-place shift the bytes out\n        // of here.\n        //\n        // https://httpwg.org/specs/rfc9112.html#line.folding\n        //\n        // > A user agent that receives an obs-fold MUST replace each received\n        // > obs-fold with one or more SP octets prior to interpreting the\n        // > field value.\n        //\n        // This means strings like \"\\r\\n\\t foo\" must replace the \"\\r\\n\\t \" with\n        // a single space.\n\n        let buf = &mut all[idx.value.0..idx.value.1];\n\n        // look for a newline, otherwise bail out\n        let first_nl = match buf.iter().position(|b| *b == b'\\n') {\n            Some(i) => i,\n            None => return,\n        };\n\n        // not on standard slices because whatever, sigh\n        fn trim_start(mut s: &[u8]) -> &[u8] {\n            while let [first, rest @ ..] = s {\n                if first.is_ascii_whitespace() {\n                    s = rest;\n                } else {\n                    break;\n                }\n            }\n            s\n        }\n\n        fn trim_end(mut s: &[u8]) -> &[u8] {\n            while let [rest @ .., last] = s {\n                if last.is_ascii_whitespace() {\n                    s = rest;\n                } else {\n                    break;\n                }\n            }\n            s\n        }\n\n        fn trim(s: &[u8]) -> &[u8] {\n            trim_start(trim_end(s))\n        }\n\n        // TODO(perf): we could do the moves in-place, but this is so uncommon\n        // that it shouldn't matter.\n        let mut unfolded = trim_end(&buf[..first_nl]).to_vec();\n        for line in buf[first_nl + 1..].split(|b| *b == b'\\n') {\n            unfolded.push(b' ');\n            unfolded.extend_from_slice(trim(line));\n        }\n        buf[..unfolded.len()].copy_from_slice(&unfolded);\n        idx.value.1 = idx.value.0 + unfolded.len();\n    }\n}\n\nfn set_content_length(headers: &mut HeaderMap, len: u64) -> Encoder {\n    // At this point, there should not be a valid Content-Length\n    // header. However, since we'll be indexing in anyways, we can\n    // warn the user if there was an existing illegal header.\n    //\n    // Or at least, we can in theory. It's actually a little bit slower,\n    // so perhaps only do that while the user is developing/testing.\n\n    if cfg!(debug_assertions) {\n        match headers.entry(header::CONTENT_LENGTH) {\n            Entry::Occupied(mut cl) => {\n                // Internal sanity check, we should have already determined\n                // that the header was illegal before calling this function.\n                debug_assert!(headers::content_length_parse_all_values(cl.iter()).is_none());\n                // Uh oh, the user set `Content-Length` headers, but set bad ones.\n                // This would be an illegal message anyways, so let's try to repair\n                // with our known good length.\n                error!(\"user provided content-length header was invalid\");\n\n                cl.insert(HeaderValue::from(len));\n                Encoder::length(len)\n            }\n            Entry::Vacant(cl) => {\n                cl.insert(HeaderValue::from(len));\n                Encoder::length(len)\n            }\n        }\n    } else {\n        headers.insert(header::CONTENT_LENGTH, HeaderValue::from(len));\n        Encoder::length(len)\n    }\n}\n\n#[derive(Clone, Copy)]\nstruct HeaderIndices {\n    name: (usize, usize),\n    value: (usize, usize),\n}\n\nfn record_header_indices(\n    bytes: &[u8],\n    headers: &[httparse::Header<'_>],\n    indices: &mut [MaybeUninit<HeaderIndices>],\n) -> Result<(), Parse> {\n    let bytes_ptr = bytes.as_ptr() as usize;\n\n    for (header, indices) in headers.iter().zip(indices.iter_mut()) {\n        if header.name.len() >= (1 << 16) {\n            debug!(\"header name larger than 64kb: {:?}\", header.name);\n            return Err(Parse::TooLarge);\n        }\n        let name_start = header.name.as_ptr() as usize - bytes_ptr;\n        let name_end = name_start + header.name.len();\n        let value_start = header.value.as_ptr() as usize - bytes_ptr;\n        let value_end = value_start + header.value.len();\n\n        indices.write(HeaderIndices {\n            name: (name_start, name_end),\n            value: (value_start, value_end),\n        });\n    }\n\n    Ok(())\n}\n\npub(crate) fn write_headers(headers: &HeaderMap, dst: &mut Vec<u8>) {\n    for (name, value) in headers {\n        extend(dst, name.as_ref());\n        extend(dst, b\": \");\n        extend(dst, value.as_bytes());\n        extend(dst, b\"\\r\\n\");\n    }\n}\n\nfn write_headers_original_case(\n    headers: &mut HeaderMap,\n    orig_headers: &OrigHeaderMap,\n    dst: &mut Vec<u8>,\n) {\n    orig_headers.sort_headers_for_each(headers, |orig_name, value| {\n        extend(dst, orig_name);\n\n        // Wanted for curl test cases that send `X-Custom-Header:\\r\\n`\n        if value.is_empty() {\n            extend(dst, b\":\\r\\n\");\n        } else {\n            extend(dst, b\": \");\n            extend(dst, value.as_bytes());\n            extend(dst, b\"\\r\\n\");\n        }\n    });\n}\n\nstruct FastWrite<'a>(&'a mut Vec<u8>);\n\nimpl fmt::Write for FastWrite<'_> {\n    #[inline]\n    fn write_str(&mut self, s: &str) -> fmt::Result {\n        extend(self.0, s.as_bytes());\n        Ok(())\n    }\n\n    #[inline]\n    fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result {\n        fmt::write(self, args)\n    }\n}\n\n#[inline]\nfn extend(dst: &mut Vec<u8>, data: &[u8]) {\n    dst.extend_from_slice(data);\n}\n"
  },
  {
    "path": "src/client/core/proto/http1.rs",
    "content": "//! HTTP/1 protocol implementation and utilities.\n\nmod buf;\nmod decode;\nmod encode;\nmod io;\n\npub(crate) mod conn;\npub(crate) mod dispatch;\npub(crate) mod ext;\npub(crate) mod role;\n\nuse bytes::BytesMut;\nuse http::{HeaderMap, Method};\nuse httparse::ParserConfig;\n\nuse self::{conn::Conn, decode::Decoder, encode::Encoder, io::MINIMUM_MAX_BUFFER_SIZE};\nuse super::{BodyLength, MessageHead};\nuse crate::client::core::{\n    body::DecodedLength,\n    error::{Error, Parse, Result},\n};\n\npub(crate) trait Http1Transaction {\n    type Incoming;\n\n    type Outgoing: Default;\n\n    #[cfg(feature = \"tracing\")]\n    const LOG: &'static str;\n\n    fn parse(\n        bytes: &mut BytesMut,\n        ctx: ParseContext<'_>,\n    ) -> Result<Option<ParsedMessage<Self::Incoming>>, Parse>;\n\n    fn encode(enc: Encode<'_, Self::Outgoing>, dst: &mut Vec<u8>) -> Result<Encoder>;\n\n    fn on_error(err: &Error) -> Option<MessageHead<Self::Outgoing>>;\n\n    fn update_date() {}\n}\n\n#[derive(Debug)]\npub(crate) struct ParsedMessage<T> {\n    head: MessageHead<T>,\n    decode: DecodedLength,\n    expect_continue: bool,\n    keep_alive: bool,\n    wants_upgrade: bool,\n}\n\npub(crate) struct ParseContext<'a> {\n    cached_headers: &'a mut Option<HeaderMap>,\n    req_method: &'a mut Option<Method>,\n    h1_parser_config: &'a ParserConfig,\n    h1_max_headers: Option<usize>,\n    h09_responses: bool,\n}\n\n/// Passed to Http1Transaction::encode\npub(crate) struct Encode<'a, T> {\n    head: &'a mut MessageHead<T>,\n    body: Option<BodyLength>,\n    req_method: &'a mut Option<Method>,\n}\n\n/// Extra flags that a request \"wants\", like expect-continue or upgrades.\n#[derive(Clone, Copy, Debug)]\nstruct Wants(u8);\n\nimpl Wants {\n    const EMPTY: Wants = Wants(0b00);\n    const EXPECT: Wants = Wants(0b01);\n    const UPGRADE: Wants = Wants(0b10);\n\n    #[inline]\n    #[must_use]\n    fn add(self, other: Wants) -> Wants {\n        Wants(self.0 | other.0)\n    }\n\n    #[inline]\n    fn contains(&self, other: Wants) -> bool {\n        (self.0 & other.0) == other.0\n    }\n}\n\n/// Builder for `Http1Options`.\n#[must_use]\n#[derive(Debug)]\npub struct Http1OptionsBuilder {\n    opts: Http1Options,\n}\n\n/// Options for tweaking HTTP/1 connection behavior.\n///\n/// Use `Http1Options` to adjust how HTTP/1 connections work—things like allowing HTTP/0.9\n/// responses, controlling header handling, buffer sizes, and more. Most settings are optional and\n/// have reasonable defaults.\n#[non_exhaustive]\n#[derive(Debug, Default, Clone)]\npub struct Http1Options {\n    /// Enable support for HTTP/0.9 responses.\n    pub h09_responses: bool,\n\n    /// Whether to use vectored writes for HTTP/1 connections.\n    pub h1_writev: Option<bool>,\n\n    /// Maximum number of headers allowed in HTTP/1 responses.\n    pub h1_max_headers: Option<usize>,\n\n    /// Exact size of the read buffer to use for HTTP/1 connections.\n    pub h1_read_buf_exact_size: Option<usize>,\n\n    /// Maximum buffer size for HTTP/1 connections.\n    pub h1_max_buf_size: Option<usize>,\n\n    /// Whether to ignore invalid headers in HTTP/1 responses.\n    pub ignore_invalid_headers_in_responses: bool,\n\n    /// Whether to allow spaces after header names in HTTP/1 responses.\n    pub allow_spaces_after_header_name_in_responses: bool,\n\n    /// Whether to allow obsolete multiline headers in HTTP/1 responses.\n    pub allow_obsolete_multiline_headers_in_responses: bool,\n}\n\nimpl Http1OptionsBuilder {\n    /// Set the `http09_responses` field.\n    #[inline]\n    pub fn http09_responses(mut self, enabled: bool) -> Self {\n        self.opts.h09_responses = enabled;\n        self\n    }\n\n    /// Set whether HTTP/1 connections should try to use vectored writes,\n    /// or always flatten into a single buffer.\n    ///\n    /// Note that setting this to false may mean more copies of body data,\n    /// but may also improve performance when an IO transport doesn't\n    /// support vectored writes well, such as most TLS implementations.\n    ///\n    /// Setting this to true will force crate::core: to use queued strategy\n    /// which may eliminate unnecessary cloning on some TLS backends\n    ///\n    /// Default is `auto`. In this mode crate::core: will try to guess which\n    /// mode to use\n    #[inline]\n    pub fn writev(mut self, writev: Option<bool>) -> Self {\n        self.opts.h1_writev = writev;\n        self\n    }\n\n    /// Set the maximum number of headers.\n    ///\n    /// When a response is received, the parser will reserve a buffer to store headers for optimal\n    /// performance.\n    ///\n    /// If client receives more headers than the buffer size, the error \"message header too large\"\n    /// is returned.\n    ///\n    /// Note that headers is allocated on the stack by default, which has higher performance. After\n    /// setting this value, headers will be allocated in heap memory, that is, heap memory\n    /// allocation will occur for each response, and there will be a performance drop of about 5%.\n    ///\n    /// Default is 100.\n    #[inline]\n    pub fn max_headers(mut self, max_headers: usize) -> Self {\n        self.opts.h1_max_headers = Some(max_headers);\n        self\n    }\n\n    /// Sets the exact size of the read buffer to *always* use.\n    ///\n    /// Note that setting this option unsets the `max_buf_size` option.\n    ///\n    /// Default is an adaptive read buffer.\n    #[inline]\n    pub fn read_buf_exact_size(mut self, sz: Option<usize>) -> Self {\n        self.opts.h1_read_buf_exact_size = sz;\n        self.opts.h1_max_buf_size = None;\n        self\n    }\n\n    /// Set the maximum buffer size for the connection.\n    ///\n    /// Default is ~400kb.\n    ///\n    /// Note that setting this option unsets the `read_exact_buf_size` option.\n    ///\n    /// # Panics\n    ///\n    /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the\n    /// minimum.\n    #[inline]\n    pub fn max_buf_size(mut self, max: usize) -> Self {\n        assert!(\n            max >= MINIMUM_MAX_BUFFER_SIZE,\n            \"the max_buf_size cannot be smaller than the minimum that h1 specifies.\"\n        );\n\n        self.opts.h1_max_buf_size = Some(max);\n        self.opts.h1_read_buf_exact_size = None;\n        self\n    }\n\n    /// Set whether HTTP/1 connections will accept spaces between header names\n    /// and the colon that follow them in responses.\n    ///\n    /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has\n    /// to say about it:\n    ///\n    /// > No whitespace is allowed between the header field-name and colon. In\n    /// > the past, differences in the handling of such whitespace have led to\n    /// > security vulnerabilities in request routing and response handling. A\n    /// > server MUST reject any received request message that contains\n    /// > whitespace between a header field-name and colon with a response code\n    /// > of 400 (Bad Request). A proxy MUST remove any such whitespace from a\n    /// > response message before forwarding the message downstream.\n    ///\n    /// Default is false.\n    ///\n    /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4\n    #[inline]\n    pub fn allow_spaces_after_header_name_in_responses(mut self, enabled: bool) -> Self {\n        self.opts.allow_spaces_after_header_name_in_responses = enabled;\n        self\n    }\n\n    /// Set whether HTTP/1 connections will silently ignored malformed header lines.\n    ///\n    /// If this is enabled and a header line does not start with a valid header\n    /// name, or does not include a colon at all, the line will be silently ignored\n    /// and no error will be reported.\n    ///\n    /// Default is false.\n    #[inline]\n    pub fn ignore_invalid_headers_in_responses(mut self, enabled: bool) -> Self {\n        self.opts.ignore_invalid_headers_in_responses = enabled;\n        self\n    }\n\n    /// Set the `allow_obsolete_multiline_headers_in_responses` field.\n    #[inline]\n    pub fn allow_obsolete_multiline_headers_in_responses(mut self, value: bool) -> Self {\n        self.opts.allow_obsolete_multiline_headers_in_responses = value;\n        self\n    }\n\n    /// Build the [`Http1Options`] instance.\n    #[inline]\n    pub fn build(self) -> Http1Options {\n        self.opts\n    }\n}\n\nimpl Http1Options {\n    /// Create a new [`Http1OptionsBuilder`].\n    pub fn builder() -> Http1OptionsBuilder {\n        Http1OptionsBuilder {\n            opts: Http1Options::default(),\n        }\n    }\n}\n"
  },
  {
    "path": "src/client/core/proto/http2/client.rs",
    "content": "use std::{\n    convert::Infallible,\n    future::Future,\n    marker::PhantomData,\n    pin::Pin,\n    task::{Context, Poll, ready},\n};\n\nuse bytes::Bytes;\nuse futures_util::future::{Either, FusedFuture};\nuse http::{Method, Request, Response, StatusCode};\nuse http_body::Body;\nuse http2::{\n    SendStream,\n    client::{Builder, Connection, ResponseFuture, SendRequest},\n};\nuse pin_project_lite::pin_project;\nuse tokio::{\n    io::{AsyncRead, AsyncWrite},\n    sync::{\n        mpsc,\n        mpsc::{Receiver, Sender},\n        oneshot,\n    },\n};\n\nuse super::{\n    H2Upgraded, PipeToSendStream, SendBuf, ping,\n    ping::{Ponger, Recorder},\n};\nuse crate::{\n    client::core::{\n        Error, Result,\n        body::{self, Incoming},\n        dispatch::{self, Callback, SendWhen, TrySendError},\n        error::BoxError,\n        proto::{Dispatched, headers},\n        rt::{Time, bounds::Http2ClientConnExec},\n        upgrade::{self, Upgraded},\n    },\n    config::RequestConfig,\n    header::OrigHeaderMap,\n};\n\n/// Receiver for HTTP/2 client requests\ntype ClientRx<B> = dispatch::Receiver<Request<B>, Response<Incoming>>;\n\n///// An mpsc channel is used to help notify the `Connection` task when *all*\n///// other handles to it have been dropped, so that it can shutdown.\ntype ConnDropRef = mpsc::Sender<Infallible>;\n\n///// A oneshot channel watches the `Connection` task, and when it completes,\n///// the \"dispatch\" task will be notified and can shutdown sooner.\ntype ConnEof = oneshot::Receiver<Infallible>;\n\npub(crate) async fn handshake<T, B, E>(\n    io: T,\n    req_rx: ClientRx<B>,\n    builder: Builder,\n    ping_config: ping::Config,\n    mut exec: E,\n    timer: Time,\n) -> Result<ClientTask<B, E, T>>\nwhere\n    T: AsyncRead + AsyncWrite + Unpin,\n    B: Body + 'static,\n    B::Data: Send + 'static,\n    E: Http2ClientConnExec<B, T> + Unpin,\n    B::Error: Into<BoxError>,\n{\n    let (h2_tx, mut conn) = builder\n        .handshake::<_, SendBuf<B::Data>>(io)\n        .await\n        .map_err(Error::new_h2)?;\n\n    // An mpsc channel is used entirely to detect when the\n    // 'Client' has been dropped. This is to get around a bug\n    // in h2 where dropping all SendRequests won't notify a\n    // parked Connection.\n    let (conn_drop_ref, conn_drop_rx) = mpsc::channel(1);\n    let (cancel_tx, conn_eof) = oneshot::channel();\n\n    let (conn, ping) = if ping_config.is_enabled() {\n        let pp = conn.ping_pong().expect(\"conn.ping_pong\");\n        let (recorder, ponger) = ping::channel(pp, ping_config, timer);\n\n        let conn: Conn<_, B> = Conn { ponger, conn };\n        (Either::Left(conn), recorder)\n    } else {\n        (Either::Right(conn), ping::Recorder::disabled())\n    };\n    let conn: ConnMapErr<T, B> = ConnMapErr {\n        conn,\n        is_terminated: false,\n    };\n\n    exec.execute_h2_future(H2ClientFuture::Task {\n        task: ConnTask::new(conn, conn_drop_rx, cancel_tx),\n    });\n\n    Ok(ClientTask {\n        ping,\n        conn_drop_ref,\n        conn_eof,\n        executor: exec,\n        h2_tx,\n        req_rx,\n        fut_ctx: None,\n        marker: PhantomData,\n    })\n}\n\npin_project! {\n    struct Conn<T, B>\n    where\n        B: Body,\n    {\n        #[pin]\n        ponger: Ponger,\n        #[pin]\n        conn: Connection<T, SendBuf<<B as Body>::Data>>,\n    }\n}\n\nimpl<T, B> Future for Conn<T, B>\nwhere\n    B: Body,\n    T: AsyncRead + AsyncWrite + Unpin,\n{\n    type Output = Result<(), http2::Error>;\n\n    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {\n        let mut this = self.project();\n        match this.ponger.poll(cx) {\n            Poll::Ready(ping::Ponged::SizeUpdate(wnd)) => {\n                this.conn.set_target_window_size(wnd);\n                this.conn.set_initial_window_size(wnd)?;\n            }\n            Poll::Ready(ping::Ponged::KeepAliveTimedOut) => {\n                debug!(\"connection keep-alive timed out\");\n                return Poll::Ready(Ok(()));\n            }\n            Poll::Pending => {}\n        }\n\n        Pin::new(&mut this.conn).poll(cx)\n    }\n}\n\npin_project! {\n    struct ConnMapErr<T, B>\n    where\n        B: Body,\n        T: AsyncRead,\n        T: AsyncWrite,\n        T: Unpin,\n    {\n        #[pin]\n        conn: Either<Conn<T, B>, Connection<T, SendBuf<<B as Body>::Data>>>,\n        #[pin]\n        is_terminated: bool,\n    }\n}\n\nimpl<T, B> Future for ConnMapErr<T, B>\nwhere\n    B: Body,\n    T: AsyncRead + AsyncWrite + Unpin,\n{\n    type Output = Result<(), ()>;\n\n    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {\n        let mut this = self.project();\n\n        if *this.is_terminated {\n            return Poll::Pending;\n        }\n        let polled = this.conn.poll(cx);\n        if polled.is_ready() {\n            *this.is_terminated = true;\n        }\n        polled.map_err(|_e| {\n            debug!(error = %_e, \"connection error\");\n        })\n    }\n}\n\nimpl<T, B> FusedFuture for ConnMapErr<T, B>\nwhere\n    B: Body,\n    T: AsyncRead + AsyncWrite + Unpin,\n{\n    #[inline]\n    fn is_terminated(&self) -> bool {\n        self.is_terminated\n    }\n}\n\npin_project! {\n    pub struct ConnTask<T, B>\n    where\n        B: Body,\n        T: AsyncRead,\n        T: AsyncWrite,\n        T: Unpin,\n    {\n        #[pin]\n        drop_rx: Receiver<Infallible>,\n        #[pin]\n        cancel_tx: Option<oneshot::Sender<Infallible>>,\n        #[pin]\n        conn: ConnMapErr<T, B>,\n    }\n}\n\nimpl<T, B> ConnTask<T, B>\nwhere\n    B: Body,\n    T: AsyncRead + AsyncWrite + Unpin,\n{\n    #[inline]\n    fn new(\n        conn: ConnMapErr<T, B>,\n        drop_rx: Receiver<Infallible>,\n        cancel_tx: oneshot::Sender<Infallible>,\n    ) -> Self {\n        Self {\n            drop_rx,\n            cancel_tx: Some(cancel_tx),\n            conn,\n        }\n    }\n}\n\nimpl<T, B> Future for ConnTask<T, B>\nwhere\n    B: Body,\n    T: AsyncRead + AsyncWrite + Unpin,\n{\n    type Output = ();\n\n    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {\n        let mut this = self.project();\n\n        if !this.conn.is_terminated() && Pin::new(&mut this.conn).poll(cx).is_ready() {\n            // ok or err, the `conn` has finished.\n            return Poll::Ready(());\n        }\n\n        if this.cancel_tx.is_some() && Pin::new(&mut this.drop_rx).poll_recv(cx).is_ready() {\n            // mpsc has been dropped, hopefully polling\n            // the connection some more should start shutdown\n            // and then close.\n            trace!(\"send_request dropped, starting conn shutdown\");\n            drop(this.cancel_tx.take().expect(\"ConnTask Future polled twice\"));\n        }\n\n        Poll::Pending\n    }\n}\n\npin_project! {\n    #[project = H2ClientFutureProject]\n    pub enum H2ClientFuture<B, T>\n    where\n        B: http_body::Body,\n        B: 'static,\n        B::Error: Into<BoxError>,\n        T: AsyncRead,\n        T: AsyncWrite,\n        T: Unpin,\n    {\n        Pipe {\n            #[pin]\n            pipe: PipeMap<B>,\n        },\n        Send {\n            #[pin]\n            send_when: SendWhen<B>,\n        },\n        Task {\n            #[pin]\n            task: ConnTask<T, B>,\n        },\n    }\n}\n\nimpl<B, T> Future for H2ClientFuture<B, T>\nwhere\n    B: Body + 'static,\n    B::Data: Send,\n    B::Error: Into<BoxError>,\n    T: AsyncRead + AsyncWrite + Unpin,\n{\n    type Output = ();\n\n    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> std::task::Poll<Self::Output> {\n        let this = self.project();\n\n        match this {\n            H2ClientFutureProject::Pipe { pipe } => pipe.poll(cx),\n            H2ClientFutureProject::Send { send_when } => send_when.poll(cx),\n            H2ClientFutureProject::Task { task } => task.poll(cx),\n        }\n    }\n}\n\nstruct FutCtx<B>\nwhere\n    B: Body,\n{\n    is_connect: bool,\n    eos: bool,\n    fut: ResponseFuture,\n    body_tx: SendStream<SendBuf<B::Data>>,\n    body: B,\n    cb: Callback<Request<B>, Response<Incoming>>,\n}\n\nimpl<B: Body> Unpin for FutCtx<B> {}\n\npub(crate) struct ClientTask<B, E, T>\nwhere\n    B: Body,\n    E: Unpin,\n{\n    ping: ping::Recorder,\n    conn_drop_ref: ConnDropRef,\n    conn_eof: ConnEof,\n    executor: E,\n    h2_tx: SendRequest<SendBuf<B::Data>>,\n    req_rx: ClientRx<B>,\n    fut_ctx: Option<FutCtx<B>>,\n    marker: PhantomData<T>,\n}\n\npin_project! {\n    pub struct PipeMap<S>\n    where\n        S: Body,\n    {\n        #[pin]\n        pipe: PipeToSendStream<S>,\n        #[pin]\n        conn_drop_ref: Option<Sender<Infallible>>,\n        #[pin]\n        ping: Option<Recorder>,\n        cancel_rx: Option<oneshot::Receiver<()>>,\n    }\n}\n\nimpl<B> Future for PipeMap<B>\nwhere\n    B: http_body::Body,\n    B::Error: Into<BoxError>,\n{\n    type Output = ();\n\n    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> std::task::Poll<Self::Output> {\n        const EXPECT_TAKEN_ONCE_MSG: &str = \"Future polled twice\";\n\n        let mut this = self.project();\n\n        // Check if the client cancelled the request (e.g. dropped the\n        // response future due to a timeout). If so, reset the h2 stream\n        // so that a RST_STREAM is sent and flow-control capacity is freed.\n        match this.cancel_rx.as_mut().map(|rx| Pin::new(rx).poll(cx)) {\n            Some(Poll::Ready(Ok(()))) => {\n                debug!(\"client request body send cancelled, resetting stream\");\n                this.pipe.as_mut().send_reset(http2::Reason::CANCEL);\n                this.conn_drop_ref.take().expect(EXPECT_TAKEN_ONCE_MSG);\n                this.ping.take().expect(EXPECT_TAKEN_ONCE_MSG);\n                return Poll::Ready(());\n            }\n            Some(Poll::Ready(Err(_))) => {\n                // Sender dropped without cancelling (normal response or error).\n                // Stop polling the receiver.\n                *this.cancel_rx = None;\n            }\n            Some(Poll::Pending) | None => {}\n        }\n\n        match Pin::new(&mut this.pipe).poll(cx) {\n            Poll::Ready(result) => {\n                if let Err(_e) = result {\n                    debug!(\"client request body error: {}\", _e);\n                }\n                drop(this.conn_drop_ref.take().expect(EXPECT_TAKEN_ONCE_MSG));\n                drop(this.ping.take().expect(EXPECT_TAKEN_ONCE_MSG));\n                return Poll::Ready(());\n            }\n            Poll::Pending => (),\n        };\n        Poll::Pending\n    }\n}\n\nimpl<B, E, T> ClientTask<B, E, T>\nwhere\n    B: Body + 'static + Unpin,\n    B::Data: Send,\n    E: Http2ClientConnExec<B, T> + Unpin,\n    B::Error: Into<BoxError>,\n    T: AsyncRead + AsyncWrite + Unpin,\n{\n    fn poll_pipe(&mut self, f: FutCtx<B>, cx: &mut Context<'_>) {\n        let ping = self.ping.clone();\n\n        // A one-shot channel so that send_task can tell pipe_task to\n        // reset the stream when the client cancels the request.\n        let (cancel_tx, cancel_rx) = oneshot::channel::<()>();\n\n        let send_stream = if !f.is_connect {\n            if !f.eos {\n                let mut pipe = PipeToSendStream::new(f.body, f.body_tx);\n\n                // eagerly see if the body pipe is ready and\n                // can thus skip allocating in the executor\n                match Pin::new(&mut pipe).poll(cx) {\n                    Poll::Ready(_) => (),\n                    Poll::Pending => {\n                        let conn_drop_ref = self.conn_drop_ref.clone();\n                        // keep the ping recorder's knowledge of an\n                        // \"open stream\" alive while this body is\n                        // still sending...\n                        let ping = ping.clone();\n\n                        let pipe = PipeMap {\n                            pipe,\n                            conn_drop_ref: Some(conn_drop_ref),\n                            ping: Some(ping),\n                            cancel_rx: Some(cancel_rx),\n                        };\n                        // Clear send task\n                        self.executor\n                            .execute_h2_future(H2ClientFuture::Pipe { pipe });\n                    }\n                }\n            }\n\n            None\n        } else {\n            Some(f.body_tx)\n        };\n\n        self.executor.execute_h2_future(H2ClientFuture::Send {\n            send_when: SendWhen {\n                when: ResponseFutMap {\n                    fut: f.fut,\n                    ping: Some(ping),\n                    send_stream: Some(send_stream),\n                    cancel_tx: Some(cancel_tx),\n                },\n                call_back: Some(f.cb),\n            },\n        });\n    }\n}\n\npin_project! {\n    pub(crate) struct ResponseFutMap<B>\n    where\n        B: Body,\n        B: 'static,\n    {\n        #[pin]\n        fut: ResponseFuture,\n        #[pin]\n        ping: Option<Recorder>,\n        #[pin]\n        send_stream: Option<Option<SendStream<SendBuf<<B as Body>::Data>>>>,\n        cancel_tx: Option<oneshot::Sender<()>>,\n    }\n}\n\nimpl<B: Body + 'static> ResponseFutMap<B> {\n    /// Signal the pipe_task to reset the stream (e.g. on client cancellation).\n    pub(crate) fn cancel(self: Pin<&mut Self>) {\n        if let Some(cancel_tx) = self.project().cancel_tx.take() {\n            let _ = cancel_tx.send(());\n        }\n    }\n}\n\nimpl<B> Future for ResponseFutMap<B>\nwhere\n    B: Body + 'static,\n    B::Data: Send,\n{\n    type Output = Result<Response<body::Incoming>, (Error, Option<Request<B>>)>;\n\n    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {\n        let mut this = self.project();\n\n        let result = ready!(this.fut.poll(cx));\n\n        let ping = this.ping.take().expect(\"Future polled twice\");\n        let send_stream = this.send_stream.take().expect(\"Future polled twice\");\n\n        match result {\n            Ok(res) => {\n                // record that we got the response headers\n                ping.record_non_data();\n\n                let content_length = headers::content_length_parse_all(res.headers());\n                if let (Some(mut send_stream), StatusCode::OK) = (send_stream, res.status()) {\n                    if content_length.is_some_and(|len| len != 0) {\n                        warn!(\"h2 connect response with non-zero body not supported\");\n\n                        send_stream.send_reset(http2::Reason::INTERNAL_ERROR);\n                        return Poll::Ready(Err((\n                            Error::new_h2(http2::Reason::INTERNAL_ERROR.into()),\n                            None::<Request<B>>,\n                        )));\n                    }\n                    let (parts, recv_stream) = res.into_parts();\n                    let mut res = Response::from_parts(parts, Incoming::empty());\n\n                    let (pending, on_upgrade) = upgrade::pending();\n                    let io = H2Upgraded {\n                        ping,\n                        send_stream,\n                        recv_stream,\n                        buf: Bytes::new(),\n                    };\n                    let upgraded = Upgraded::new(io, Bytes::new());\n\n                    pending.fulfill(upgraded);\n                    res.extensions_mut().insert(on_upgrade);\n\n                    Poll::Ready(Ok(res))\n                } else {\n                    let res = res.map(|stream| {\n                        let ping = ping.for_stream(&stream);\n                        Incoming::h2(stream, content_length.into(), ping)\n                    });\n                    Poll::Ready(Ok(res))\n                }\n            }\n            Err(err) => {\n                ping.ensure_not_timed_out().map_err(|e| (e, None))?;\n\n                debug!(\"client response error: {}\", err);\n                Poll::Ready(Err((Error::new_h2(err), None::<Request<B>>)))\n            }\n        }\n    }\n}\n\nimpl<B, E, T> Future for ClientTask<B, E, T>\nwhere\n    B: Body + 'static + Unpin,\n    B::Data: Send,\n    B::Error: Into<BoxError>,\n    E: Http2ClientConnExec<B, T> + Unpin,\n    T: AsyncRead + AsyncWrite + Unpin,\n{\n    type Output = Result<Dispatched>;\n\n    fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {\n        loop {\n            match ready!(self.h2_tx.poll_ready(cx)) {\n                Ok(()) => (),\n                Err(err) => {\n                    self.ping.ensure_not_timed_out()?;\n                    return if err.reason() == Some(::http2::Reason::NO_ERROR) {\n                        trace!(\"connection gracefully shutdown\");\n                        Poll::Ready(Ok(Dispatched::Shutdown))\n                    } else {\n                        Poll::Ready(Err(Error::new_h2(err)))\n                    };\n                }\n            };\n\n            // If we were waiting on pending open\n            // continue where we left off.\n            if let Some(f) = self.fut_ctx.take() {\n                self.poll_pipe(f, cx);\n                continue;\n            }\n\n            match self.req_rx.poll_recv(cx) {\n                Poll::Ready(Some((req, cb))) => {\n                    // Check that future hasn't been canceled already\n                    if cb.is_canceled() {\n                        trace!(\"request callback is canceled\");\n                        continue;\n                    }\n                    let (head, body) = req.into_parts();\n                    let mut req = ::http::Request::from_parts(head, ());\n                    super::strip_connection_headers(req.headers_mut(), true);\n                    if let Some(len) = body.size_hint().exact() {\n                        if len != 0 || headers::method_has_defined_payload_semantics(req.method()) {\n                            headers::set_content_length_if_missing(req.headers_mut(), len);\n                        }\n                    }\n\n                    // Sort headers if we have the original headers\n                    if let Some(orig_headers) =\n                        RequestConfig::<OrigHeaderMap>::remove(req.extensions_mut())\n                    {\n                        orig_headers.sort_headers(req.headers_mut());\n                    }\n\n                    let is_connect = req.method() == Method::CONNECT;\n                    let eos = body.is_end_stream();\n\n                    if is_connect\n                        && headers::content_length_parse_all(req.headers())\n                            .is_some_and(|len| len != 0)\n                    {\n                        debug!(\"h2 connect request with non-zero body not supported\");\n                        cb.send(Err(TrySendError {\n                            error: Error::new_user_invalid_connect(),\n                            message: None,\n                        }));\n                        continue;\n                    }\n\n                    let (fut, body_tx) = match self.h2_tx.send_request(req, !is_connect && eos) {\n                        Ok(ok) => ok,\n                        Err(err) => {\n                            debug!(\"client send request error: {}\", err);\n                            cb.send(Err(TrySendError {\n                                error: Error::new_h2(err),\n                                message: None,\n                            }));\n                            continue;\n                        }\n                    };\n\n                    let f = FutCtx {\n                        is_connect,\n                        eos,\n                        fut,\n                        body_tx,\n                        body,\n                        cb,\n                    };\n\n                    // Check poll_ready() again.\n                    // If the call to send_request() resulted in the new stream being pending open\n                    // we have to wait for the open to complete before accepting new requests.\n                    match self.h2_tx.poll_ready(cx) {\n                        Poll::Pending => {\n                            // Save Context\n                            self.fut_ctx = Some(f);\n                            return Poll::Pending;\n                        }\n                        Poll::Ready(Ok(())) => (),\n                        Poll::Ready(Err(err)) => {\n                            f.cb.send(Err(TrySendError {\n                                error: Error::new_h2(err),\n                                message: None,\n                            }));\n                            continue;\n                        }\n                    }\n                    self.poll_pipe(f, cx);\n                    continue;\n                }\n\n                Poll::Ready(None) => {\n                    trace!(\"client::dispatch::Sender dropped\");\n                    return Poll::Ready(Ok(Dispatched::Shutdown));\n                }\n\n                Poll::Pending => match ready!(Pin::new(&mut self.conn_eof).poll(cx)) {\n                    // As of Rust 1.82, this pattern is no longer needed, and emits a warning.\n                    // But we cannot remove it as long as MSRV is less than that.\n                    Ok(never) => match never {},\n                    Err(_conn_is_eof) => {\n                        trace!(\"connection task is closed, closing dispatch task\");\n                        return Poll::Ready(Ok(Dispatched::Shutdown));\n                    }\n                },\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "src/client/core/proto/http2/ping.rs",
    "content": "//! HTTP2 Ping usage\n//!\n//! core uses HTTP2 pings for two purposes:\n//!\n//! 1. Adaptive flow control using BDP\n//! 2. Connection keep-alive\n//!\n//! Both cases are optional.\n//!\n//! # BDP Algorithm\n//!\n//! 1. When receiving a DATA frame, if a BDP ping isn't outstanding: 1a. Record current time. 1b.\n//!    Send a BDP ping.\n//! 2. Increment the number of received bytes.\n//! 3. When the BDP ping ack is received: 3a. Record duration from sent time. 3b. Merge RTT with a\n//!    running average. 3c. Calculate bdp as bytes/rtt. 3d. If bdp is over 2/3 max, set new max to\n//!    bdp and update windows.\n\nuse std::{\n    fmt,\n    future::Future,\n    pin::Pin,\n    sync::Arc,\n    task::{self, Poll},\n    time::{Duration, Instant},\n};\n\nuse http2::{Ping, PingPong};\n\nuse crate::{\n    client::core::{\n        Result,\n        error::{Error, Kind, TimedOut},\n        rt::{Sleep, Time, Timer},\n    },\n    sync::Mutex,\n};\n\ntype WindowSize = u32;\n\npub(super) fn channel(ping_pong: PingPong, config: Config, timer: Time) -> (Recorder, Ponger) {\n    debug_assert!(\n        config.is_enabled(),\n        \"ping channel requires bdp or keep-alive config\",\n    );\n\n    let bdp = config.bdp_initial_window.map(|wnd| Bdp {\n        bdp: wnd,\n        max_bandwidth: 0.0,\n        rtt: 0.0,\n        ping_delay: Duration::from_millis(100),\n        stable_count: 0,\n    });\n\n    let now = timer.now();\n\n    let (bytes, next_bdp_at) = if bdp.is_some() {\n        (Some(0), Some(now))\n    } else {\n        (None, None)\n    };\n\n    let keep_alive = config.keep_alive_interval.map(|interval| KeepAlive {\n        interval,\n        timeout: config.keep_alive_timeout,\n        while_idle: config.keep_alive_while_idle,\n        sleep: timer.sleep(interval),\n        state: KeepAliveState::Init,\n        timer: timer.clone(),\n    });\n\n    let last_read_at = keep_alive.as_ref().map(|_| now);\n\n    let shared = Arc::new(Mutex::new(Shared {\n        bytes,\n        last_read_at,\n        is_keep_alive_timed_out: false,\n        ping_pong,\n        ping_sent_at: None,\n        next_bdp_at,\n        timer,\n    }));\n\n    (\n        Recorder {\n            shared: Some(shared.clone()),\n        },\n        Ponger {\n            bdp,\n            keep_alive,\n            shared,\n        },\n    )\n}\n\n#[derive(Debug, Clone)]\npub(crate) struct Config {\n    bdp_initial_window: Option<WindowSize>,\n    /// If no frames are received in this amount of time, a PING frame is sent.\n    keep_alive_interval: Option<Duration>,\n    /// After sending a keepalive PING, the connection will be closed if\n    /// a pong is not received in this amount of time.\n    keep_alive_timeout: Duration,\n    /// If true, sends pings even when there are no active streams.\n    keep_alive_while_idle: bool,\n}\n\n#[derive(Clone)]\npub(crate) struct Recorder {\n    shared: Option<Arc<Mutex<Shared>>>,\n}\n\npub(super) struct Ponger {\n    bdp: Option<Bdp>,\n    keep_alive: Option<KeepAlive>,\n    shared: Arc<Mutex<Shared>>,\n}\n\nstruct Shared {\n    ping_pong: PingPong,\n    ping_sent_at: Option<Instant>,\n\n    // bdp\n    /// If `Some`, bdp is enabled, and this tracks how many bytes have been\n    /// read during the current sample.\n    bytes: Option<usize>,\n    /// We delay a variable amount of time between BDP pings. This allows us\n    /// to send less pings as the bandwidth stabilizes.\n    next_bdp_at: Option<Instant>,\n\n    // keep-alive\n    /// If `Some`, keep-alive is enabled, and the Instant is how long ago\n    /// the connection read the last frame.\n    last_read_at: Option<Instant>,\n\n    is_keep_alive_timed_out: bool,\n    timer: Time,\n}\n\nstruct Bdp {\n    /// Current BDP in bytes\n    bdp: u32,\n    /// Largest bandwidth we've seen so far.\n    max_bandwidth: f64,\n    /// Round trip time in seconds\n    rtt: f64,\n    /// Delay the next ping by this amount.\n    ///\n    /// This will change depending on how stable the current bandwidth is.\n    ping_delay: Duration,\n    /// The count of ping round trips where BDP has stayed the same.\n    stable_count: u32,\n}\n\nstruct KeepAlive {\n    /// If no frames are received in this amount of time, a PING frame is sent.\n    interval: Duration,\n    /// After sending a keepalive PING, the connection will be closed if\n    /// a pong is not received in this amount of time.\n    timeout: Duration,\n    /// If true, sends pings even when there are no active streams.\n    while_idle: bool,\n    state: KeepAliveState,\n    sleep: Pin<Box<dyn Sleep>>,\n    timer: Time,\n}\n\nenum KeepAliveState {\n    Init,\n    Scheduled(Instant),\n    PingSent,\n}\n\npub(super) enum Ponged {\n    SizeUpdate(WindowSize),\n    KeepAliveTimedOut,\n}\n\n#[derive(Debug)]\npub(super) struct KeepAliveTimedOut;\n\n// ===== impl Config =====\n\nimpl Config {\n    /// Creates a new `Config` with the specified parameters.\n    pub(crate) fn new(\n        adaptive_window: bool,\n        initial_window_size: u32,\n        keep_alive_interval: Option<Duration>,\n        keep_alive_timeout: Duration,\n        keep_alive_while_idle: bool,\n    ) -> Self {\n        Config {\n            bdp_initial_window: if adaptive_window {\n                Some(initial_window_size)\n            } else {\n                None\n            },\n            keep_alive_interval,\n            keep_alive_timeout,\n            keep_alive_while_idle,\n        }\n    }\n\n    #[inline]\n    pub(super) fn is_enabled(&self) -> bool {\n        self.bdp_initial_window.is_some() || self.keep_alive_interval.is_some()\n    }\n}\n\n// ===== impl Recorder =====\n\nimpl Recorder {\n    #[inline]\n    pub(super) fn disabled() -> Recorder {\n        Recorder { shared: None }\n    }\n\n    pub(crate) fn record_data(&self, len: usize) {\n        let Some(ref shared) = self.shared else {\n            return;\n        };\n\n        let mut locked = shared.lock();\n        locked.update_last_read_at();\n\n        // are we ready to send another bdp ping?\n        // if not, we don't need to record bytes either\n        if let Some(ref next_bdp_at) = locked.next_bdp_at {\n            if Instant::now() < *next_bdp_at {\n                return;\n            } else {\n                locked.next_bdp_at = None;\n            }\n        }\n\n        if let Some(ref mut bytes) = locked.bytes {\n            *bytes += len;\n        } else {\n            // no need to send bdp ping if bdp is disabled\n            return;\n        }\n\n        if !locked.is_ping_sent() {\n            locked.send_ping();\n        }\n    }\n\n    pub(crate) fn record_non_data(&self) {\n        let Some(ref shared) = self.shared else {\n            return;\n        };\n\n        let mut locked = shared.lock();\n        locked.update_last_read_at();\n    }\n\n    /// If the incoming stream is already closed, convert self into\n    /// a disabled reporter.\n    pub(super) fn for_stream(self, stream: &http2::RecvStream) -> Self {\n        if stream.is_end_stream() {\n            Recorder::disabled()\n        } else {\n            self\n        }\n    }\n\n    pub(super) fn ensure_not_timed_out(&self) -> Result<()> {\n        if let Some(ref shared) = self.shared {\n            let locked = shared.lock();\n            if locked.is_keep_alive_timed_out {\n                return Err(KeepAliveTimedOut.crate_error());\n            }\n        }\n\n        Ok(())\n    }\n}\n\n// ===== impl Ponger =====\n\nimpl Future for Ponger {\n    type Output = Ponged;\n\n    #[inline]\n    fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {\n        let this = self.as_mut().get_mut();\n        let mut locked = this.shared.lock();\n        // hoping this is fine to move within the lock\n        let now = locked.timer.now();\n\n        let is_idle = this.is_idle();\n\n        if let Some(ref mut ka) = this.keep_alive {\n            ka.maybe_schedule(is_idle, &locked);\n            ka.maybe_ping(cx, is_idle, &mut locked);\n        }\n\n        if !locked.is_ping_sent() {\n            // XXX: this doesn't register a waker...?\n            return Poll::Pending;\n        }\n\n        match locked.ping_pong.poll_pong(cx) {\n            Poll::Ready(Ok(_pong)) => {\n                let start = locked\n                    .ping_sent_at\n                    .expect(\"pong received implies ping_sent_at\");\n                locked.ping_sent_at = None;\n                let rtt = now - start;\n                trace!(\"recv pong\");\n\n                if let Some(ref mut ka) = this.keep_alive {\n                    locked.update_last_read_at();\n                    ka.maybe_schedule(is_idle, &locked);\n                    ka.maybe_ping(cx, is_idle, &mut locked);\n                }\n\n                if let Some(ref mut bdp) = this.bdp {\n                    let bytes = locked.bytes.expect(\"bdp enabled implies bytes\");\n                    locked.bytes = Some(0); // reset\n                    trace!(\"received BDP ack; bytes = {}, rtt = {:?}\", bytes, rtt);\n\n                    let update = bdp.calculate(bytes, rtt);\n                    locked.next_bdp_at = Some(now + bdp.ping_delay);\n                    if let Some(update) = update {\n                        return Poll::Ready(Ponged::SizeUpdate(update));\n                    }\n                }\n            }\n            Poll::Ready(Err(_e)) => {\n                debug!(\"pong error: {}\", _e);\n            }\n            Poll::Pending => {\n                if let Some(ref mut ka) = this.keep_alive {\n                    if let Err(KeepAliveTimedOut) = ka.maybe_timeout(cx) {\n                        this.keep_alive = None;\n                        locked.is_keep_alive_timed_out = true;\n                        return Poll::Ready(Ponged::KeepAliveTimedOut);\n                    }\n                }\n            }\n        }\n\n        // XXX: this doesn't register a waker...?\n        Poll::Pending\n    }\n}\n\nimpl Ponger {\n    #[inline]\n    fn is_idle(&self) -> bool {\n        Arc::strong_count(&self.shared) <= 2\n    }\n}\n\n// ===== impl Shared =====\n\nimpl Shared {\n    fn send_ping(&mut self) {\n        match self.ping_pong.send_ping(Ping::opaque()) {\n            Ok(()) => {\n                self.ping_sent_at = Some(self.timer.now());\n                trace!(\"sent ping\");\n            }\n            Err(_err) => {\n                debug!(\"error sending ping: {}\", _err);\n            }\n        }\n    }\n\n    #[inline]\n    fn is_ping_sent(&self) -> bool {\n        self.ping_sent_at.is_some()\n    }\n\n    #[inline]\n    fn update_last_read_at(&mut self) {\n        if self.last_read_at.is_some() {\n            self.last_read_at = Some(self.timer.now());\n        }\n    }\n\n    #[inline]\n    fn last_read_at(&self) -> Instant {\n        self.last_read_at.expect(\"keep_alive expects last_read_at\")\n    }\n}\n\n// ===== impl Bdp =====\n\n/// Any higher than this likely will be hitting the TCP flow control.\nconst BDP_LIMIT: usize = 1024 * 1024 * 16;\n\nimpl Bdp {\n    fn calculate(&mut self, bytes: usize, rtt: Duration) -> Option<WindowSize> {\n        // No need to do any math if we're at the limit.\n        if self.bdp as usize == BDP_LIMIT {\n            self.stabilize_delay();\n            return None;\n        }\n\n        // average the rtt\n        let rtt = seconds(rtt);\n        if self.rtt == 0.0 {\n            // First sample means rtt is first rtt.\n            self.rtt = rtt;\n        } else {\n            // Weigh this rtt as 1/8 for a moving average.\n            self.rtt += (rtt - self.rtt) * 0.125;\n        }\n\n        // calculate the current bandwidth\n        let bw = (bytes as f64) / (self.rtt * 1.5);\n        trace!(\"current bandwidth = {:.1}B/s\", bw);\n\n        if bw < self.max_bandwidth {\n            // not a faster bandwidth, so don't update\n            self.stabilize_delay();\n            return None;\n        } else {\n            self.max_bandwidth = bw;\n        }\n\n        // if the current `bytes` sample is at least 2/3 the previous\n        // bdp, increase to double the current sample.\n        if bytes >= self.bdp as usize * 2 / 3 {\n            self.bdp = (bytes * 2).min(BDP_LIMIT) as WindowSize;\n            trace!(\"BDP increased to {}\", self.bdp);\n\n            self.stable_count = 0;\n            self.ping_delay /= 2;\n            Some(self.bdp)\n        } else {\n            self.stabilize_delay();\n            None\n        }\n    }\n\n    fn stabilize_delay(&mut self) {\n        if self.ping_delay < Duration::from_secs(10) {\n            self.stable_count += 1;\n\n            if self.stable_count >= 2 {\n                self.ping_delay *= 4;\n                self.stable_count = 0;\n            }\n        }\n    }\n}\n\n#[inline]\nfn seconds(dur: Duration) -> f64 {\n    const NANOS_PER_SEC: f64 = 1_000_000_000.0;\n    let secs = dur.as_secs() as f64;\n    secs + (dur.subsec_nanos() as f64) / NANOS_PER_SEC\n}\n\n// ===== impl KeepAlive =====\n\nimpl KeepAlive {\n    fn maybe_schedule(&mut self, is_idle: bool, shared: &Shared) {\n        match self.state {\n            KeepAliveState::Init => {\n                if !self.while_idle && is_idle {\n                    return;\n                }\n\n                self.schedule(shared);\n            }\n            KeepAliveState::PingSent => {\n                if shared.is_ping_sent() {\n                    return;\n                }\n                self.schedule(shared);\n            }\n            KeepAliveState::Scheduled(..) => (),\n        }\n    }\n\n    fn schedule(&mut self, shared: &Shared) {\n        let interval = shared.last_read_at() + self.interval;\n        self.state = KeepAliveState::Scheduled(interval);\n        self.timer.reset(&mut self.sleep, interval);\n    }\n\n    fn maybe_ping(&mut self, cx: &mut task::Context<'_>, is_idle: bool, shared: &mut Shared) {\n        match self.state {\n            KeepAliveState::Scheduled(at) => {\n                if Pin::new(&mut self.sleep).poll(cx).is_pending() {\n                    return;\n                }\n                // check if we've received a frame while we were scheduled\n                if shared.last_read_at() + self.interval > at {\n                    self.state = KeepAliveState::Init;\n                    cx.waker().wake_by_ref(); // schedule us again\n                    return;\n                }\n                if !self.while_idle && is_idle {\n                    trace!(\"keep-alive no need to ping when idle and while_idle=false\");\n                    return;\n                }\n                trace!(\"keep-alive interval ({:?}) reached\", self.interval);\n                shared.send_ping();\n                self.state = KeepAliveState::PingSent;\n                let timeout = self.timer.now() + self.timeout;\n                self.timer.reset(&mut self.sleep, timeout);\n            }\n            KeepAliveState::Init | KeepAliveState::PingSent => (),\n        }\n    }\n\n    fn maybe_timeout(&mut self, cx: &mut task::Context<'_>) -> Result<(), KeepAliveTimedOut> {\n        match self.state {\n            KeepAliveState::PingSent => {\n                if Pin::new(&mut self.sleep).poll(cx).is_pending() {\n                    return Ok(());\n                }\n                trace!(\"keep-alive timeout ({:?}) reached\", self.timeout);\n                Err(KeepAliveTimedOut)\n            }\n            KeepAliveState::Init | KeepAliveState::Scheduled(..) => Ok(()),\n        }\n    }\n}\n\n// ===== impl KeepAliveTimedOut =====\n\nimpl KeepAliveTimedOut {\n    pub(super) fn crate_error(self) -> Error {\n        Error::new(Kind::Http2).with(self)\n    }\n}\n\nimpl fmt::Display for KeepAliveTimedOut {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        f.write_str(\"keep-alive timed out\")\n    }\n}\n\nimpl std::error::Error for KeepAliveTimedOut {\n    fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {\n        Some(&TimedOut)\n    }\n}\n"
  },
  {
    "path": "src/client/core/proto/http2.rs",
    "content": "//! HTTP/2 protocol implementation and utilities.\n\npub(crate) mod client;\npub(crate) mod ping;\n\nuse std::{\n    future::Future,\n    io::{self, Cursor, IoSlice},\n    pin::Pin,\n    task::{Context, Poll, ready},\n    time::Duration,\n};\n\nuse bytes::{Buf, Bytes};\nuse http::{\n    HeaderMap,\n    header::{CONNECTION, HeaderName, TE, TRANSFER_ENCODING, UPGRADE},\n};\nuse http_body::Body;\nuse http2::{\n    Reason, RecvStream, SendStream,\n    frame::{Priorities, PseudoOrder, SettingsOrder, StreamDependency},\n};\nuse pin_project_lite::pin_project;\nuse tokio::io::{AsyncRead, AsyncWrite, ReadBuf};\n\nuse crate::client::core::{Error, Result, error::BoxError};\n\n/// Default initial stream window size defined in HTTP2 spec.\nconst SPEC_WINDOW_SIZE: u32 = 65_535;\n\n// Our defaults are chosen for the \"majority\" case, which usually are not\n// resource constrained, and so the spec default of 64kb can be too limiting\n// for performance.\nconst DEFAULT_CONN_WINDOW: u32 = 1024 * 1024 * 5; // 5mb\nconst DEFAULT_STREAM_WINDOW: u32 = 1024 * 1024 * 2; // 2mb\nconst DEFAULT_MAX_FRAME_SIZE: u32 = 1024 * 16; // 16kb\nconst DEFAULT_MAX_SEND_BUF_SIZE: usize = 1024 * 1024; // 1mb\nconst DEFAULT_MAX_HEADER_LIST_SIZE: u32 = 1024 * 16; // 16kb\n\n// The maximum number of concurrent streams that the client is allowed to open\n// before it receives the initial SETTINGS frame from the server.\n// This default value is derived from what the HTTP/2 spec recommends as the\n// minimum value that endpoints advertise to their peers. It means that using\n// this value will minimize the chance of the failure where the local endpoint\n// attempts to open too many streams and gets rejected by the remote peer with\n// the `REFUSED_STREAM` error.\nconst DEFAULT_INITIAL_MAX_SEND_STREAMS: usize = 100;\n\n// List of connection headers from RFC 9110 Section 7.6.1\n//\n// TE headers are allowed in HTTP/2 requests as long as the value is \"trailers\", so they're\n// tested separately.\nstatic CONNECTION_HEADERS: [HeaderName; 4] = [\n    HeaderName::from_static(\"keep-alive\"),\n    HeaderName::from_static(\"proxy-connection\"),\n    TRANSFER_ENCODING,\n    UPGRADE,\n];\n\nfn strip_connection_headers(headers: &mut HeaderMap, is_request: bool) {\n    for header in &CONNECTION_HEADERS {\n        if headers.remove(header).is_some() {\n            warn!(\"Connection header illegal in HTTP/2: {}\", header.as_str());\n        }\n    }\n\n    if is_request {\n        if headers\n            .get(TE)\n            .is_some_and(|te_header| te_header != \"trailers\")\n        {\n            warn!(\"TE headers not set to \\\"trailers\\\" are illegal in HTTP/2 requests\");\n            headers.remove(TE);\n        }\n    } else if headers.remove(TE).is_some() {\n        warn!(\"TE headers illegal in HTTP/2 responses\");\n    }\n\n    if let Some(header) = headers.remove(CONNECTION) {\n        warn!(\n            \"Connection header illegal in HTTP/2: {}\",\n            CONNECTION.as_str()\n        );\n\n        if let Ok(header_contents) = header.to_str() {\n            // A `Connection` header may have a comma-separated list of names of other headers that\n            // are meant for only this specific connection.\n            //\n            // Iterate these names and remove them as headers. Connection-specific headers are\n            // forbidden in HTTP2, as that information has been moved into frame types of the h2\n            // protocol.\n            for name in header_contents.split(',') {\n                let name = name.trim();\n                headers.remove(name);\n            }\n        }\n    }\n}\n\n// body adapters used by both Client\npin_project! {\n    pub(crate) struct PipeToSendStream<S>\n    where\n        S: Body,\n    {\n        #[pin]\n        stream: S,\n        body_tx: SendStream<SendBuf<S::Data>>,\n        data_done: bool,\n    }\n}\n\nimpl<S> PipeToSendStream<S>\nwhere\n    S: Body,\n{\n    #[inline]\n    fn new(stream: S, body_tx: SendStream<SendBuf<S::Data>>) -> PipeToSendStream<S> {\n        PipeToSendStream {\n            stream,\n            body_tx,\n            data_done: false,\n        }\n    }\n\n    #[inline]\n    fn send_reset(self: Pin<&mut Self>, reason: http2::Reason) {\n        self.project().body_tx.send_reset(reason);\n    }\n}\n\nimpl<S> Future for PipeToSendStream<S>\nwhere\n    S: Body,\n    S::Error: Into<BoxError>,\n{\n    type Output = Result<()>;\n\n    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {\n        let mut me = self.project();\n        loop {\n            // we don't have the next chunk of data yet, so just reserve 1 byte to make\n            // sure there's some capacity available. h2 will handle the capacity management\n            // for the actual body chunk.\n            me.body_tx.reserve_capacity(1);\n\n            if me.body_tx.capacity() == 0 {\n                loop {\n                    match ready!(me.body_tx.poll_capacity(cx)) {\n                        Some(Ok(0)) => {}\n                        Some(Ok(_)) => break,\n                        Some(Err(e)) => {\n                            return Poll::Ready(Err(Error::new_body_write(e)));\n                        }\n                        None => {\n                            // None means the stream is no longer in a\n                            // streaming state, we either finished it\n                            // somehow, or the remote reset us.\n                            return Poll::Ready(Err(Error::new_body_write(\n                                \"send stream capacity unexpectedly closed\",\n                            )));\n                        }\n                    }\n                }\n            } else if let Poll::Ready(reason) =\n                me.body_tx.poll_reset(cx).map_err(Error::new_body_write)?\n            {\n                debug!(\"stream received RST_STREAM: {:?}\", reason);\n                return Poll::Ready(Err(Error::new_body_write(::http2::Error::from(reason))));\n            }\n\n            match ready!(me.stream.as_mut().poll_frame(cx)) {\n                Some(Ok(frame)) => {\n                    if frame.is_data() {\n                        let chunk = frame.into_data().unwrap_or_else(|_| unreachable!());\n                        let is_eos = me.stream.is_end_stream();\n                        trace!(\n                            \"send body chunk: {} bytes, eos={}\",\n                            chunk.remaining(),\n                            is_eos,\n                        );\n\n                        let buf = SendBuf::Buf(chunk);\n                        me.body_tx\n                            .send_data(buf, is_eos)\n                            .map_err(Error::new_body_write)?;\n\n                        if is_eos {\n                            return Poll::Ready(Ok(()));\n                        }\n                    } else if frame.is_trailers() {\n                        // no more DATA, so give any capacity back\n                        me.body_tx.reserve_capacity(0);\n                        me.body_tx\n                            .send_trailers(frame.into_trailers().unwrap_or_else(|_| unreachable!()))\n                            .map_err(Error::new_body_write)?;\n                        return Poll::Ready(Ok(()));\n                    } else {\n                        trace!(\"discarding unknown frame\");\n                        // loop again\n                    }\n                }\n                Some(Err(e)) => return Poll::Ready(Err(me.body_tx.on_user_err(e))),\n                None => {\n                    // no more frames means we're done here\n                    // but at this point, we haven't sent an EOS DATA, or\n                    // any trailers, so send an empty EOS DATA.\n                    return Poll::Ready(me.body_tx.send_eos_frame());\n                }\n            }\n        }\n    }\n}\n\ntrait SendStreamExt {\n    fn on_user_err<E>(&mut self, err: E) -> Error\n    where\n        E: Into<BoxError>;\n\n    fn send_eos_frame(&mut self) -> Result<()>;\n}\n\nimpl<B: Buf> SendStreamExt for SendStream<SendBuf<B>> {\n    fn on_user_err<E>(&mut self, err: E) -> Error\n    where\n        E: Into<BoxError>,\n    {\n        let err = Error::new_user_body(err);\n        debug!(\"send body user stream error: {}\", err);\n        self.send_reset(err.h2_reason());\n        err\n    }\n\n    fn send_eos_frame(&mut self) -> Result<()> {\n        trace!(\"send body eos\");\n        self.send_data(SendBuf::None, true)\n            .map_err(Error::new_body_write)\n    }\n}\n\n#[repr(usize)]\nenum SendBuf<B> {\n    Buf(B),\n    Cursor(Cursor<Box<[u8]>>),\n    None,\n}\n\nimpl<B: Buf> Buf for SendBuf<B> {\n    #[inline]\n    fn remaining(&self) -> usize {\n        match *self {\n            Self::Buf(ref b) => b.remaining(),\n            Self::Cursor(ref c) => Buf::remaining(c),\n            Self::None => 0,\n        }\n    }\n\n    #[inline]\n    fn chunk(&self) -> &[u8] {\n        match *self {\n            Self::Buf(ref b) => b.chunk(),\n            Self::Cursor(ref c) => c.chunk(),\n            Self::None => &[],\n        }\n    }\n\n    #[inline]\n    fn advance(&mut self, cnt: usize) {\n        match *self {\n            Self::Buf(ref mut b) => b.advance(cnt),\n            Self::Cursor(ref mut c) => c.advance(cnt),\n            Self::None => {}\n        }\n    }\n\n    fn chunks_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize {\n        match *self {\n            Self::Buf(ref b) => b.chunks_vectored(dst),\n            Self::Cursor(ref c) => c.chunks_vectored(dst),\n            Self::None => 0,\n        }\n    }\n}\n\nstruct H2Upgraded<B>\nwhere\n    B: Buf,\n{\n    ping: ping::Recorder,\n    send_stream: SendStream<SendBuf<B>>,\n    recv_stream: RecvStream,\n    buf: Bytes,\n}\n\nimpl<B> AsyncRead for H2Upgraded<B>\nwhere\n    B: Buf,\n{\n    fn poll_read(\n        mut self: Pin<&mut Self>,\n        cx: &mut Context<'_>,\n        read_buf: &mut ReadBuf<'_>,\n    ) -> Poll<io::Result<()>> {\n        if self.buf.is_empty() {\n            self.buf = loop {\n                match ready!(self.recv_stream.poll_data(cx)) {\n                    None => return Poll::Ready(Ok(())),\n                    Some(Ok(buf)) if buf.is_empty() && !self.recv_stream.is_end_stream() => {\n                        continue;\n                    }\n                    Some(Ok(buf)) => {\n                        self.ping.record_data(buf.len());\n                        break buf;\n                    }\n                    Some(Err(e)) => {\n                        return Poll::Ready(match e.reason() {\n                            Some(Reason::NO_ERROR) | Some(Reason::CANCEL) => Ok(()),\n                            Some(Reason::STREAM_CLOSED) => {\n                                Err(io::Error::new(io::ErrorKind::BrokenPipe, e))\n                            }\n                            _ => Err(h2_to_io_error(e)),\n                        });\n                    }\n                }\n            };\n        }\n        let cnt = std::cmp::min(self.buf.len(), read_buf.remaining());\n        read_buf.put_slice(&self.buf[..cnt]);\n        self.buf.advance(cnt);\n        let _ = self.recv_stream.flow_control().release_capacity(cnt);\n        Poll::Ready(Ok(()))\n    }\n}\n\nimpl<B> AsyncWrite for H2Upgraded<B>\nwhere\n    B: Buf,\n{\n    fn poll_write(\n        mut self: Pin<&mut Self>,\n        cx: &mut Context<'_>,\n        buf: &[u8],\n    ) -> Poll<io::Result<usize>> {\n        if buf.is_empty() {\n            return Poll::Ready(Ok(0));\n        }\n        self.send_stream.reserve_capacity(buf.len());\n\n        // We ignore all errors returned by `poll_capacity` and `write`, as we\n        // will get the correct from `poll_reset` anyway.\n        let cnt = match ready!(self.send_stream.poll_capacity(cx)) {\n            None => Some(0),\n            Some(Ok(cnt)) => self\n                .send_stream\n                .send_data(SendBuf::Cursor(Cursor::new(buf[..cnt].into())), false)\n                .ok()\n                .map(|()| cnt),\n            Some(Err(_)) => None,\n        };\n\n        if let Some(cnt) = cnt {\n            return Poll::Ready(Ok(cnt));\n        }\n\n        Poll::Ready(Err(h2_to_io_error(\n            match ready!(self.send_stream.poll_reset(cx)) {\n                Ok(Reason::NO_ERROR) | Ok(Reason::CANCEL) | Ok(Reason::STREAM_CLOSED) => {\n                    return Poll::Ready(Err(io::ErrorKind::BrokenPipe.into()));\n                }\n                Ok(reason) => reason.into(),\n                Err(e) => e,\n            },\n        )))\n    }\n\n    #[inline]\n    fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {\n        Poll::Ready(Ok(()))\n    }\n\n    fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {\n        if self\n            .send_stream\n            .send_data(SendBuf::Cursor(Cursor::new([].into())), true)\n            .is_ok()\n        {\n            return Poll::Ready(Ok(()));\n        }\n\n        Poll::Ready(Err(h2_to_io_error(\n            match ready!(self.send_stream.poll_reset(cx)) {\n                Ok(Reason::NO_ERROR) => return Poll::Ready(Ok(())),\n                Ok(Reason::CANCEL) | Ok(Reason::STREAM_CLOSED) => {\n                    return Poll::Ready(Err(io::ErrorKind::BrokenPipe.into()));\n                }\n                Ok(reason) => reason.into(),\n                Err(e) => e,\n            },\n        )))\n    }\n}\n\nfn h2_to_io_error(e: http2::Error) -> std::io::Error {\n    if e.is_io() {\n        e.into_io()\n            .expect(\"[BUG] http2::Error::is_io() is true, but into_io() failed\")\n    } else {\n        std::io::Error::other(e)\n    }\n}\n\n/// Builder for `Http2Options`.\n#[must_use]\n#[derive(Debug)]\npub struct Http2OptionsBuilder {\n    opts: Http2Options,\n}\n\n/// Options for tuning HTTP/2 connections.\n///\n/// `Http2Options` lets you adjust how HTTP/2 works—stream limits, window sizes, frame and header\n/// settings, and more. Most fields are optional and have sensible defaults. See each field for\n/// details.\n#[non_exhaustive]\n#[derive(Debug, Clone)]\npub struct Http2Options {\n    /// Whether to use adaptive flow control.\n    pub adaptive_window: bool,\n\n    /// The initial stream ID for the connection.\n    pub initial_stream_id: Option<u32>,\n\n    /// The initial window size for HTTP/2 connection-level flow control.\n    pub initial_conn_window_size: u32,\n\n    /// The initial window size for HTTP/2 streams.\n    pub initial_window_size: u32,\n\n    /// The initial maximum number of locally initiated (send) streams.\n    pub initial_max_send_streams: usize,\n\n    /// The maximum frame size to use for HTTP/2.\n    pub max_frame_size: Option<u32>,\n\n    /// The interval for HTTP/2 keep-alive ping frames.\n    pub keep_alive_interval: Option<Duration>,\n\n    /// The timeout for receiving an acknowledgement of the keep-alive ping.\n    pub keep_alive_timeout: Duration,\n\n    /// Whether HTTP/2 keep-alive should apply while the connection is idle.\n    pub keep_alive_while_idle: bool,\n\n    /// The maximum number of concurrent locally reset streams.\n    pub max_concurrent_reset_streams: Option<usize>,\n\n    /// The maximum size of the send buffer for HTTP/2 streams.\n    pub max_send_buffer_size: usize,\n\n    /// The maximum number of concurrent streams initiated by the remote peer.\n    pub max_concurrent_streams: Option<u32>,\n\n    /// The maximum size of the header list.\n    pub max_header_list_size: Option<u32>,\n\n    /// The maximum number of pending accept reset streams.\n    pub max_pending_accept_reset_streams: Option<usize>,\n\n    /// Whether to enable push promises.\n    pub enable_push: Option<bool>,\n\n    /// The header table size for HPACK compression.\n    pub header_table_size: Option<u32>,\n\n    /// Whether to enable the CONNECT protocol.\n    pub enable_connect_protocol: Option<bool>,\n\n    /// Whether to disable RFC 7540 Stream Priorities.\n    pub no_rfc7540_priorities: Option<bool>,\n\n    /// The HTTP/2 pseudo-header field order for outgoing HEADERS frames.\n    pub headers_pseudo_order: Option<PseudoOrder>,\n\n    /// The stream dependency for the outgoing HEADERS frame.\n    pub headers_stream_dependency: Option<StreamDependency>,\n\n    /// The order of settings parameters in the initial SETTINGS frame.\n    pub settings_order: Option<SettingsOrder>,\n\n    /// The list of PRIORITY frames to be sent after connection establishment.\n    pub priorities: Option<Priorities>,\n}\n\nimpl Http2OptionsBuilder {\n    /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2\n    /// stream-level flow control.\n    ///\n    /// Passing `None` will do nothing.\n    ///\n    /// If not set, crate::core: will use a default.\n    ///\n    /// [spec]: https://httpwg.org/specs/rfc9113.html#SETTINGS_INITIAL_WINDOW_SIZE\n    #[inline]\n    pub fn initial_window_size(mut self, sz: impl Into<Option<u32>>) -> Self {\n        if let Some(sz) = sz.into() {\n            self.opts.adaptive_window = false;\n            self.opts.initial_window_size = sz;\n        }\n        self\n    }\n\n    /// Sets the max connection-level flow control for HTTP2\n    ///\n    /// Passing `None` will do nothing.\n    ///\n    /// If not set, crate::core: will use a default.\n    #[inline]\n    pub fn initial_connection_window_size(mut self, sz: impl Into<Option<u32>>) -> Self {\n        if let Some(sz) = sz.into() {\n            self.opts.adaptive_window = false;\n            self.opts.initial_conn_window_size = sz;\n        }\n        self\n    }\n\n    /// Sets the initial maximum of locally initiated (send) streams.\n    ///\n    /// This value will be overwritten by the value included in the initial\n    /// SETTINGS frame received from the peer as part of a [connection preface].\n    ///\n    /// Passing `None` will do nothing.\n    ///\n    /// If not set, crate::core: will use a default.\n    ///\n    /// [connection preface]: https://httpwg.org/specs/rfc9113.html#preface\n    #[inline]\n    pub fn initial_max_send_streams(mut self, initial: impl Into<Option<usize>>) -> Self {\n        if let Some(initial) = initial.into() {\n            self.opts.initial_max_send_streams = initial;\n        }\n        self\n    }\n\n    /// Sets the initial stream id for the connection.\n    #[inline]\n    pub fn initial_stream_id(mut self, id: impl Into<Option<u32>>) -> Self {\n        if let Some(id) = id.into() {\n            self.opts.initial_stream_id = Some(id);\n        }\n        self\n    }\n\n    /// Sets whether to use an adaptive flow control.\n    ///\n    /// Enabling this will override the limits set in\n    /// `initial_stream_window_size` and\n    /// `initial_connection_window_size`.\n    #[inline]\n    pub fn adaptive_window(mut self, enabled: bool) -> Self {\n        self.opts.adaptive_window = enabled;\n        if enabled {\n            self.opts.initial_window_size = SPEC_WINDOW_SIZE;\n            self.opts.initial_conn_window_size = SPEC_WINDOW_SIZE;\n        }\n        self\n    }\n\n    /// Sets the maximum frame size to use for HTTP2.\n    ///\n    /// Default is currently 16KB, but can change.\n    #[inline]\n    pub fn max_frame_size(mut self, sz: impl Into<Option<u32>>) -> Self {\n        if let Some(sz) = sz.into() {\n            self.opts.max_frame_size = Some(sz);\n        }\n        self\n    }\n\n    /// Sets the max size of received header frames.\n    ///\n    /// Default is currently 16KB, but can change.\n    #[inline]\n    pub fn max_header_list_size(mut self, max: u32) -> Self {\n        self.opts.max_header_list_size = Some(max);\n        self\n    }\n\n    /// Sets the header table size.\n    ///\n    /// This setting informs the peer of the maximum size of the header compression\n    /// table used to encode header blocks, in octets. The encoder may select any value\n    /// equal to or less than the header table size specified by the sender.\n    ///\n    /// The default value of crate `h2` is 4,096.\n    #[inline]\n    pub fn header_table_size(mut self, size: impl Into<Option<u32>>) -> Self {\n        if let Some(size) = size.into() {\n            self.opts.header_table_size = Some(size);\n        }\n        self\n    }\n\n    /// Sets the maximum number of concurrent streams.\n    ///\n    /// The maximum concurrent streams setting only controls the maximum number\n    /// of streams that can be initiated by the remote peer. In other words,\n    /// when this setting is set to 100, this does not limit the number of\n    /// concurrent streams that can be created by the caller.\n    ///\n    /// It is recommended that this value be no smaller than 100, so as to not\n    /// unnecessarily limit parallelism. However, any value is legal, including\n    /// 0. If `max` is set to 0, then the remote will not be permitted to\n    /// initiate streams.\n    ///\n    /// Note that streams in the reserved state, i.e., push promises that have\n    /// been reserved but the stream has not started, do not count against this\n    /// setting.\n    ///\n    /// Also note that if the remote *does* exceed the value set here, it is not\n    /// a protocol level error. Instead, the `h2` library will immediately reset\n    /// the stream.\n    ///\n    /// See [Section 5.1.2] in the HTTP/2 spec for more details.\n    ///\n    /// [Section 5.1.2]: https://http2.github.io/http2-spec/#rfc.section.5.1.2\n    #[inline]\n    pub fn max_concurrent_streams(mut self, max: impl Into<Option<u32>>) -> Self {\n        if let Some(max) = max.into() {\n            self.opts.max_concurrent_streams = Some(max);\n        }\n        self\n    }\n\n    /// Sets an interval for HTTP2 Ping frames should be sent to keep a\n    /// connection alive.\n    ///\n    /// Pass `None` to disable HTTP2 keep-alive.\n    ///\n    /// Default is currently disabled.\n    #[inline]\n    pub fn keep_alive_interval(mut self, interval: impl Into<Option<Duration>>) -> Self {\n        self.opts.keep_alive_interval = interval.into();\n        self\n    }\n\n    /// Sets a timeout for receiving an acknowledgement of the keep-alive ping.\n    ///\n    /// If the ping is not acknowledged within the timeout, the connection will\n    /// be closed. Does nothing if `keep_alive_interval` is disabled.\n    ///\n    /// Default is 20 seconds.\n    #[inline]\n    pub fn keep_alive_timeout(mut self, timeout: Duration) -> Self {\n        self.opts.keep_alive_timeout = timeout;\n        self\n    }\n\n    /// Sets whether HTTP2 keep-alive should apply while the connection is idle.\n    ///\n    /// If disabled, keep-alive pings are only sent while there are open\n    /// request/responses streams. If enabled, pings are also sent when no\n    /// streams are active. Does nothing if `keep_alive_interval` is\n    /// disabled.\n    ///\n    /// Default is `false`.\n    #[inline]\n    pub fn keep_alive_while_idle(mut self, enabled: bool) -> Self {\n        self.opts.keep_alive_while_idle = enabled;\n        self\n    }\n\n    /// Enables and disables the push feature for HTTP2.\n    ///\n    /// Passing `None` will do nothing.\n    #[inline]\n    pub fn enable_push(mut self, opt: bool) -> Self {\n        self.opts.enable_push = Some(opt);\n        self\n    }\n\n    /// Sets the enable connect protocol.\n    #[inline]\n    pub fn enable_connect_protocol(mut self, opt: bool) -> Self {\n        self.opts.enable_connect_protocol = Some(opt);\n        self\n    }\n\n    /// Disable RFC 7540 Stream Priorities (set to `true` to disable).\n    /// [RFC 9218]: <https://www.rfc-editor.org/rfc/rfc9218.html#section-2.1>\n    #[inline]\n    pub fn no_rfc7540_priorities(mut self, opt: bool) -> Self {\n        self.opts.no_rfc7540_priorities = Some(opt);\n        self\n    }\n\n    /// Sets the maximum number of HTTP2 concurrent locally reset streams.\n    ///\n    /// See the documentation of [`http2::client::Builder::max_concurrent_reset_streams`] for more\n    /// details.\n    ///\n    /// The default value is determined by the `h2` crate.\n    ///\n    /// [`http2::client::Builder::max_concurrent_reset_streams`]: https://docs.rs/h2/client/struct.Builder.html#method.max_concurrent_reset_streams\n    #[inline]\n    pub fn max_concurrent_reset_streams(mut self, max: usize) -> Self {\n        self.opts.max_concurrent_reset_streams = Some(max);\n        self\n    }\n\n    /// Set the maximum write buffer size for each HTTP/2 stream.\n    ///\n    /// Default is currently 1MB, but may change.\n    ///\n    /// # Panics\n    ///\n    /// The value must be no larger than `u32::MAX`.\n    #[inline]\n    pub fn max_send_buf_size(mut self, max: usize) -> Self {\n        assert!(max <= u32::MAX as usize);\n        self.opts.max_send_buffer_size = max;\n        self\n    }\n\n    /// Configures the maximum number of pending reset streams allowed before a GOAWAY will be sent.\n    ///\n    /// See <https://github.com/hyperium/hyper/issues/2877> for more information.\n    #[inline]\n    pub fn max_pending_accept_reset_streams(mut self, max: impl Into<Option<usize>>) -> Self {\n        if let Some(max) = max.into() {\n            self.opts.max_pending_accept_reset_streams = Some(max);\n        }\n        self\n    }\n\n    /// Sets the stream dependency and weight for the outgoing HEADERS frame.\n    ///\n    /// This configures the priority of the stream by specifying its dependency and weight,\n    /// as defined by the HTTP/2 priority mechanism. This can be used to influence how the\n    /// server allocates resources to this stream relative to others.\n    #[inline]\n    pub fn headers_stream_dependency<T>(mut self, stream_dependency: T) -> Self\n    where\n        T: Into<Option<StreamDependency>>,\n    {\n        if let Some(stream_dependency) = stream_dependency.into() {\n            self.opts.headers_stream_dependency = Some(stream_dependency);\n        }\n        self\n    }\n\n    /// Sets the HTTP/2 pseudo-header field order for outgoing HEADERS frames.\n    ///\n    /// This determines the order in which pseudo-header fields (such as `:method`, `:scheme`, etc.)\n    /// are encoded in the HEADERS frame. Customizing the order may be useful for interoperability\n    /// or testing purposes.\n    #[inline]\n    pub fn headers_pseudo_order<T>(mut self, headers_pseudo_order: T) -> Self\n    where\n        T: Into<Option<PseudoOrder>>,\n    {\n        if let Some(headers_pseudo_order) = headers_pseudo_order.into() {\n            self.opts.headers_pseudo_order = Some(headers_pseudo_order);\n        }\n        self\n    }\n\n    /// Sets the order of settings parameters in the initial SETTINGS frame.\n    ///\n    /// This determines the order in which settings are sent during the HTTP/2 handshake.\n    /// Customizing the order may be useful for testing or protocol compliance.\n    #[inline]\n    pub fn settings_order<T>(mut self, settings_order: T) -> Self\n    where\n        T: Into<Option<SettingsOrder>>,\n    {\n        if let Some(settings_order) = settings_order.into() {\n            self.opts.settings_order = Some(settings_order);\n        }\n        self\n    }\n\n    /// Sets the list of PRIORITY frames to be sent immediately after the connection is established,\n    /// but before the first request is sent.\n    ///\n    /// This allows you to pre-configure the HTTP/2 stream dependency tree by specifying a set of\n    /// PRIORITY frames that will be sent as part of the connection preface. This can be useful for\n    /// optimizing resource allocation or testing custom stream prioritization strategies.\n    ///\n    /// Each `Priority` in the list must have a valid (non-zero) stream ID. Any priority with a\n    /// stream ID of zero will be ignored.\n    #[inline]\n    pub fn priorities<T>(mut self, priorities: T) -> Self\n    where\n        T: Into<Option<Priorities>>,\n    {\n        if let Some(priorities) = priorities.into() {\n            self.opts.priorities = Some(priorities);\n        }\n        self\n    }\n\n    /// Builds the `Http2Options` instance.\n    #[inline]\n    pub fn build(self) -> Http2Options {\n        self.opts\n    }\n}\n\nimpl Http2Options {\n    /// Creates a new `Http2OptionsBuilder` instance.\n    pub fn builder() -> Http2OptionsBuilder {\n        // Reset optional frame size and header size settings to None to allow explicit\n        // customization This ensures users can configure these via builder methods without\n        // being constrained by defaults\n        Http2OptionsBuilder {\n            opts: Http2Options {\n                max_frame_size: None,\n                max_header_list_size: None,\n                ..Default::default()\n            },\n        }\n    }\n}\n\nimpl Default for Http2Options {\n    #[inline]\n    fn default() -> Self {\n        Http2Options {\n            adaptive_window: false,\n            initial_stream_id: None,\n            initial_conn_window_size: DEFAULT_CONN_WINDOW,\n            initial_window_size: DEFAULT_STREAM_WINDOW,\n            initial_max_send_streams: DEFAULT_INITIAL_MAX_SEND_STREAMS,\n            max_frame_size: Some(DEFAULT_MAX_FRAME_SIZE),\n            max_header_list_size: Some(DEFAULT_MAX_HEADER_LIST_SIZE),\n            keep_alive_interval: None,\n            keep_alive_timeout: Duration::from_secs(20),\n            keep_alive_while_idle: false,\n            max_concurrent_reset_streams: None,\n            max_send_buffer_size: DEFAULT_MAX_SEND_BUF_SIZE,\n            max_pending_accept_reset_streams: None,\n            header_table_size: None,\n            max_concurrent_streams: None,\n            enable_push: None,\n            enable_connect_protocol: None,\n            no_rfc7540_priorities: None,\n            settings_order: None,\n            headers_pseudo_order: None,\n            headers_stream_dependency: None,\n            priorities: None,\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::time::Duration;\n\n    use bytes::Bytes;\n    use http_body_util::Full;\n    use tokio::sync::oneshot;\n\n    use crate::client::core::{conn::http2::Builder, rt::TokioExecutor};\n\n    fn setup_duplex_test_server() -> (tokio::io::DuplexStream, tokio::io::DuplexStream) {\n        let (client_io, server_io) = tokio::io::duplex(64);\n        (client_io, server_io)\n    }\n\n    // https://github.com/hyperium/hyper/issues/4040\n    #[tokio::test]\n    async fn h2_pipe_task_cancelled_on_response_future_drop() {\n        let (client_io, server_io) = setup_duplex_test_server();\n        let (rst_tx, rst_rx) = oneshot::channel::<bool>();\n\n        tokio::spawn(async move {\n            let mut builder = http2::server::Builder::new();\n            builder.initial_window_size(0);\n            let mut h2 = builder.handshake::<_, Bytes>(server_io).await.unwrap();\n            let (req, _respond) = h2.accept().await.unwrap().unwrap();\n            tokio::spawn(async move {\n                let _ = std::future::poll_fn(|cx| h2.poll_closed(cx)).await;\n            });\n\n            let mut body = req.into_body();\n            let got_rst = tokio::time::timeout(Duration::from_secs(2), body.data())\n                .await\n                .is_ok_and(|frame| matches!(frame, Some(Err(_)) | None));\n            let _ = rst_tx.send(got_rst);\n        });\n\n        let (mut client, conn) = Builder::new(TokioExecutor::new())\n            .handshake(client_io)\n            .await\n            .expect(\"http handshake\");\n        tokio::spawn(async move {\n            let _ = conn.await;\n        });\n\n        let req = http::Request::post(\"http://localhost/\")\n            .body(Full::new(Bytes::from(vec![b'x'; 50])))\n            .unwrap();\n        let res =\n            tokio::time::timeout(Duration::from_millis(5), client.try_send_request(req)).await;\n        assert!(res.is_err(), \"should timeout waiting for response\");\n\n        let got_rst = rst_rx.await.expect(\"server task should complete\");\n        assert!(got_rst, \"server should receive RST_STREAM\");\n    }\n}\n"
  },
  {
    "path": "src/client/core/proto.rs",
    "content": "//! Pieces pertaining to the HTTP message protocol.\n\nmod headers;\n\npub mod http1;\npub mod http2;\n\n/// A request line of an incoming request message.\n#[derive(Debug, Default, PartialEq)]\npub(crate) struct RequestLine(http::Method, http::Uri);\n\n/// An Incoming Message head. Includes request/status line, and headers.\n#[derive(Debug, Default)]\npub(crate) struct MessageHead<S> {\n    /// HTTP version of the message.\n    version: http::Version,\n    /// Subject (request line or status line) of Incoming message.\n    subject: S,\n    /// Headers of the Incoming message.\n    headers: http::HeaderMap,\n    /// Extensions.\n    extensions: http::Extensions,\n}\n\n/// An incoming request message.\ntype RequestHead = MessageHead<RequestLine>;\n\n/// An incoming response message.\ntype ResponseHead = MessageHead<http::StatusCode>;\n\n#[derive(Debug)]\npub(crate) enum BodyLength {\n    /// Content-Length\n    Known(u64),\n    /// Transfer-Encoding: chunked (if h1)\n    Unknown,\n}\n\n/// Status of when a Dispatcher future completes.\npub(crate) enum Dispatched {\n    /// Dispatcher completely shutdown connection.\n    Shutdown,\n    /// Dispatcher has pending upgrade, and so did not shutdown.\n    Upgrade(crate::client::core::upgrade::Pending),\n}\n\nimpl MessageHead<http::StatusCode> {\n    fn into_response<B>(self, body: B) -> http::Response<B> {\n        let mut res = http::Response::new(body);\n        *res.status_mut() = self.subject;\n        *res.headers_mut() = self.headers;\n        *res.version_mut() = self.version;\n        *res.extensions_mut() = self.extensions;\n        res\n    }\n}\n"
  },
  {
    "path": "src/client/core/rt/bounds.rs",
    "content": "//! Trait aliases\n//!\n//! Traits in this module ease setting bounds and usually automatically\n//! implemented by implementing another trait.\n\npub use self::h2_client::Http2ClientConnExec;\n\nmod h2_client {\n    use std::future::Future;\n\n    use tokio::io::{AsyncRead, AsyncWrite};\n\n    use crate::client::core::{\n        error::BoxError, proto::http2::client::H2ClientFuture, rt::Executor,\n    };\n\n    /// An executor to spawn http2 futures for the client.\n    ///\n    /// This trait is implemented for any type that implements [`Executor`]\n    /// trait for any future.\n    ///\n    /// This trait is sealed and cannot be implemented for types outside this crate.\n    pub trait Http2ClientConnExec<B, T>: sealed_client::Sealed<(B, T)>\n    where\n        B: http_body::Body,\n        B::Error: Into<BoxError>,\n        T: AsyncRead + AsyncWrite + Unpin,\n    {\n        #[doc(hidden)]\n        fn execute_h2_future(&mut self, future: H2ClientFuture<B, T>);\n    }\n\n    impl<E, B, T> Http2ClientConnExec<B, T> for E\n    where\n        E: Executor<H2ClientFuture<B, T>>,\n        B: http_body::Body + 'static,\n        B::Error: Into<BoxError>,\n        H2ClientFuture<B, T>: Future<Output = ()>,\n        T: AsyncRead + AsyncWrite + Unpin,\n    {\n        #[inline]\n        fn execute_h2_future(&mut self, future: H2ClientFuture<B, T>) {\n            self.execute(future)\n        }\n    }\n\n    impl<E, B, T> sealed_client::Sealed<(B, T)> for E\n    where\n        E: Executor<H2ClientFuture<B, T>>,\n        B: http_body::Body + 'static,\n        B::Error: Into<BoxError>,\n        H2ClientFuture<B, T>: Future<Output = ()>,\n        T: AsyncRead + AsyncWrite + Unpin,\n    {\n    }\n\n    mod sealed_client {\n        pub trait Sealed<X> {}\n    }\n}\n"
  },
  {
    "path": "src/client/core/rt/timer.rs",
    "content": "//! Provides a timer trait with timer-like functions\n\nuse std::{\n    any::TypeId,\n    future::Future,\n    pin::Pin,\n    sync::Arc,\n    time::{Duration, Instant},\n};\n\n/// A timer which provides timer-like functions.\npub trait Timer {\n    /// Return a future that resolves in `duration` time.\n    fn sleep(&self, duration: Duration) -> Pin<Box<dyn Sleep>>;\n\n    /// Return a future that resolves at `deadline`.\n    fn sleep_until(&self, deadline: Instant) -> Pin<Box<dyn Sleep>>;\n\n    /// Return an `Instant` representing the current time.\n    ///\n    /// The default implementation returns [`Instant::now()`].\n    fn now(&self) -> Instant {\n        Instant::now()\n    }\n\n    /// Reset a future to resolve at `new_deadline` instead.\n    fn reset(&self, sleep: &mut Pin<Box<dyn Sleep>>, new_deadline: Instant) {\n        *sleep = self.sleep_until(new_deadline);\n    }\n}\n\n/// A future returned by a `Timer`.\npub trait Sleep: Send + Sync + Future<Output = ()> {\n    #[doc(hidden)]\n    /// This method is private and can not be implemented by downstream crate\n    fn __type_id(&self, _: private::Sealed) -> TypeId\n    where\n        Self: 'static,\n    {\n        TypeId::of::<Self>()\n    }\n}\n\n/// A user-provided timer to time background tasks.\n#[derive(Clone)]\npub enum Time {\n    Timer(Arc<dyn Timer + Send + Sync>),\n    Empty,\n}\n\n// =====impl Sleep =====\n\nimpl dyn Sleep {\n    //! This is a re-implementation of downcast methods from std::any::Any\n\n    /// Check whether the type is the same as `T`\n    pub fn is<T>(&self) -> bool\n    where\n        T: Sleep + 'static,\n    {\n        self.__type_id(private::Sealed {}) == TypeId::of::<T>()\n    }\n\n    /// Downcast a pinned &mut Sleep object to its original type\n    pub fn downcast_mut_pin<T>(self: Pin<&mut Self>) -> Option<Pin<&mut T>>\n    where\n        T: Sleep + 'static,\n    {\n        if self.is::<T>() {\n            #[allow(unsafe_code)]\n            unsafe {\n                let inner = Pin::into_inner_unchecked(self);\n                Some(Pin::new_unchecked(\n                    &mut *(&mut *inner as *mut dyn Sleep as *mut T),\n                ))\n            }\n        } else {\n            None\n        }\n    }\n}\n\n// ===== impl Time =====\n\nimpl Timer for Time {\n    fn sleep(&self, duration: Duration) -> Pin<Box<dyn Sleep>> {\n        match *self {\n            Time::Empty => {\n                panic!(\"You must supply a timer.\")\n            }\n            Time::Timer(ref t) => t.sleep(duration),\n        }\n    }\n\n    fn now(&self) -> Instant {\n        match *self {\n            Time::Empty => Instant::now(),\n            Time::Timer(ref t) => t.now(),\n        }\n    }\n\n    fn sleep_until(&self, deadline: Instant) -> Pin<Box<dyn Sleep>> {\n        match *self {\n            Time::Empty => {\n                panic!(\"You must supply a timer.\")\n            }\n            Time::Timer(ref t) => t.sleep_until(deadline),\n        }\n    }\n\n    fn reset(&self, sleep: &mut Pin<Box<dyn Sleep>>, new_deadline: Instant) {\n        match *self {\n            Time::Empty => {\n                panic!(\"You must supply a timer.\")\n            }\n            Time::Timer(ref t) => t.reset(sleep, new_deadline),\n        }\n    }\n}\n\nmod private {\n    pub struct Sealed {}\n}\n"
  },
  {
    "path": "src/client/core/rt/tokio.rs",
    "content": "//! Tokio IO integration for core.\nuse std::{\n    future::Future,\n    pin::Pin,\n    task::{Context, Poll},\n    time::{Duration, Instant},\n};\n\nuse pin_project_lite::pin_project;\n\nuse super::{Executor, Sleep, Timer};\n\n/// Future executor that utilises `tokio` threads.\n#[non_exhaustive]\n#[derive(Default, Debug, Clone)]\npub struct TokioExecutor {}\n\n/// A Timer that uses the tokio runtime.\n#[non_exhaustive]\n#[derive(Default, Clone, Debug)]\npub struct TokioTimer;\n\n// Use TokioSleep to get tokio::time::Sleep to implement Unpin.\n// see https://docs.rs/tokio/latest/tokio/time/struct.Sleep.html\npin_project! {\n    #[derive(Debug)]\n    struct TokioSleep {\n        #[pin]\n        inner: tokio::time::Sleep,\n    }\n}\n\n// ===== impl TokioExecutor =====\n\nimpl<Fut> Executor<Fut> for TokioExecutor\nwhere\n    Fut: Future + Send + 'static,\n    Fut::Output: Send + 'static,\n{\n    fn execute(&self, fut: Fut) {\n        tokio::spawn(fut);\n    }\n}\n\nimpl TokioExecutor {\n    /// Create new executor that relies on [`tokio::spawn`] to execute futures.\n    pub fn new() -> Self {\n        Self {}\n    }\n}\n\n// ==== impl TokioTimer =====\n\nimpl Timer for TokioTimer {\n    fn sleep(&self, duration: Duration) -> Pin<Box<dyn Sleep>> {\n        Box::pin(TokioSleep {\n            inner: tokio::time::sleep(duration),\n        })\n    }\n\n    fn sleep_until(&self, deadline: Instant) -> Pin<Box<dyn Sleep>> {\n        Box::pin(TokioSleep {\n            inner: tokio::time::sleep_until(deadline.into()),\n        })\n    }\n\n    fn now(&self) -> Instant {\n        tokio::time::Instant::now().into()\n    }\n\n    fn reset(&self, sleep: &mut Pin<Box<dyn Sleep>>, new_deadline: Instant) {\n        if let Some(sleep) = sleep.as_mut().downcast_mut_pin::<TokioSleep>() {\n            sleep.reset(new_deadline)\n        }\n    }\n}\n\nimpl TokioTimer {\n    /// Create a new TokioTimer\n    pub fn new() -> Self {\n        Self {}\n    }\n}\n\n// ===== impl TokioSleep =====\n\nimpl Future for TokioSleep {\n    type Output = ();\n\n    #[inline]\n    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {\n        self.project().inner.poll(cx)\n    }\n}\n\nimpl Sleep for TokioSleep {}\n\nimpl TokioSleep {\n    #[inline]\n    fn reset(self: Pin<&mut Self>, deadline: Instant) {\n        self.project().inner.as_mut().reset(deadline.into());\n    }\n}\n"
  },
  {
    "path": "src/client/core/rt.rs",
    "content": "//! Runtime components\n//!\n//! The traits and types within this module are used to allow plugging in\n//! runtime types. These include:\n//!\n//! - Executors\n//! - Timers\n//! - IO transports\n\npub mod bounds;\nmod timer;\nmod tokio;\n\npub use self::{\n    timer::{Sleep, Time, Timer},\n    tokio::{TokioExecutor, TokioTimer},\n};\n\n/// An executor of futures.\n///\n/// This trait allows abstract over async runtimes. Implement this trait for your own type.\npub trait Executor<Fut> {\n    /// Place the future into the executor to be run.\n    fn execute(&self, fut: Fut);\n}\n"
  },
  {
    "path": "src/client/core/upgrade.rs",
    "content": "//! HTTP Upgrades\n//!\n//! This module deals with managing [HTTP Upgrades][mdn] in crate::core:. Since\n//! several concepts in HTTP allow for first talking HTTP, and then converting\n//! to a different protocol, this module conflates them into a single API.\n//! Those include:\n//!\n//! - HTTP/1.1 Upgrades\n//! - HTTP `CONNECT`\n//!\n//! You are responsible for any other pre-requisites to establish an upgrade,\n//! such as sending the appropriate headers, methods, and status codes. You can\n//! then use [`on`][] to grab a `Future` which will resolve to the upgraded\n//! connection object, or an error if the upgrade fails.\n//!\n//! [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Protocol_upgrade_mechanism\n//!\n//! Sending an HTTP upgrade from the client involves setting\n//! either the appropriate method, if wanting to `CONNECT`, or headers such as\n//! `Upgrade` and `Connection`, on the `http::Request`. Once receiving the\n//! `http::Response` back, you must check for the specific information that the\n//! upgrade is agreed upon by the server (such as a `101` status code), and then\n//! get the `Future` from the `Response`.\n\nuse std::{\n    error::Error as StdError,\n    fmt,\n    future::Future,\n    io,\n    pin::Pin,\n    sync::Arc,\n    task::{Context, Poll},\n};\n\nuse bytes::Bytes;\nuse tokio::{\n    io::{AsyncRead, AsyncWrite, ReadBuf},\n    sync::oneshot,\n};\n\nuse self::rewind::Rewind;\nuse super::{Error, Result};\nuse crate::sync::Mutex;\n\n/// An upgraded HTTP connection.\n///\n/// This type holds a trait object internally of the original IO that\n/// was used to speak HTTP before the upgrade. It can be used directly\n/// as a [`AsyncRead`] or [`AsyncWrite`] for convenience.\n///\n/// Alternatively, if the exact type is known, this can be deconstructed\n/// into its parts.\npub struct Upgraded {\n    io: Rewind<Box<dyn Io + Send>>,\n}\n\n/// A future for a possible HTTP upgrade.\n///\n/// If no upgrade was available, or it doesn't succeed, yields an `Error`.\n#[derive(Clone)]\npub struct OnUpgrade {\n    rx: Option<Arc<Mutex<oneshot::Receiver<Result<Upgraded>>>>>,\n}\n\n/// Gets a pending HTTP upgrade from this message.\n///\n/// This can be called on the following types:\n///\n/// - `http::Request<B>`\n/// - `http::Response<B>`\n/// - `&mut http::Request<B>`\n/// - `&mut http::Response<B>`\n#[inline]\npub fn on<T: sealed::CanUpgrade>(msg: T) -> OnUpgrade {\n    msg.on_upgrade()\n}\n\npub(crate) struct Pending {\n    tx: oneshot::Sender<Result<Upgraded>>,\n}\n\npub(crate) fn pending() -> (Pending, OnUpgrade) {\n    let (tx, rx) = oneshot::channel();\n    (\n        Pending { tx },\n        OnUpgrade {\n            rx: Some(Arc::new(Mutex::new(rx))),\n        },\n    )\n}\n\n// ===== impl Upgraded =====\n\nimpl Upgraded {\n    #[inline]\n    pub(crate) fn new<T>(io: T, read_buf: Bytes) -> Self\n    where\n        T: AsyncRead + AsyncWrite + Unpin + Send + 'static,\n    {\n        Upgraded {\n            io: Rewind::new_buffered(Box::new(io), read_buf),\n        }\n    }\n}\n\nimpl AsyncRead for Upgraded {\n    #[inline]\n    fn poll_read(\n        mut self: Pin<&mut Self>,\n        cx: &mut Context<'_>,\n        buf: &mut ReadBuf<'_>,\n    ) -> Poll<io::Result<()>> {\n        Pin::new(&mut self.io).poll_read(cx, buf)\n    }\n}\n\nimpl AsyncWrite for Upgraded {\n    #[inline]\n    fn poll_write(\n        mut self: Pin<&mut Self>,\n        cx: &mut Context<'_>,\n        buf: &[u8],\n    ) -> Poll<io::Result<usize>> {\n        Pin::new(&mut self.io).poll_write(cx, buf)\n    }\n\n    #[inline]\n    fn poll_write_vectored(\n        mut self: Pin<&mut Self>,\n        cx: &mut Context<'_>,\n        bufs: &[io::IoSlice<'_>],\n    ) -> Poll<io::Result<usize>> {\n        Pin::new(&mut self.io).poll_write_vectored(cx, bufs)\n    }\n\n    #[inline]\n    fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {\n        Pin::new(&mut self.io).poll_flush(cx)\n    }\n\n    #[inline]\n    fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {\n        Pin::new(&mut self.io).poll_shutdown(cx)\n    }\n\n    #[inline]\n    fn is_write_vectored(&self) -> bool {\n        self.io.is_write_vectored()\n    }\n}\n\nimpl fmt::Debug for Upgraded {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        f.debug_struct(\"Upgraded\").finish()\n    }\n}\n\n// ===== impl OnUpgrade =====\n\nimpl OnUpgrade {\n    #[inline]\n    pub(super) fn none() -> Self {\n        OnUpgrade { rx: None }\n    }\n\n    #[inline]\n    pub(super) fn is_none(&self) -> bool {\n        self.rx.is_none()\n    }\n}\n\nimpl Future for OnUpgrade {\n    type Output = Result<Upgraded, Error>;\n\n    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {\n        match self.rx {\n            Some(ref rx) => Pin::new(&mut *rx.lock()).poll(cx).map(|res| match res {\n                Ok(Ok(upgraded)) => Ok(upgraded),\n                Ok(Err(err)) => Err(err),\n                Err(_oneshot_canceled) => Err(Error::new_canceled().with(UpgradeExpected)),\n            }),\n            None => Poll::Ready(Err(Error::new_user_no_upgrade())),\n        }\n    }\n}\n\n// ===== impl Pending =====\n\nimpl Pending {\n    #[inline]\n    pub(super) fn fulfill(self, upgraded: Upgraded) {\n        trace!(\"pending upgrade fulfill\");\n        let _ = self.tx.send(Ok(upgraded));\n    }\n\n    /// Don't fulfill the pending Upgrade, but instead signal that\n    /// upgrades are handled manually.\n    #[inline]\n    pub(super) fn manual(self) {\n        trace!(\"pending upgrade handled manually\");\n        let _ = self.tx.send(Err(Error::new_user_manual_upgrade()));\n    }\n}\n\n// ===== impl UpgradeExpected =====\n\n/// Error cause returned when an upgrade was expected but canceled\n/// for whatever reason.\n///\n/// This likely means the actual `Conn` future wasn't polled and upgraded.\n#[derive(Debug)]\nstruct UpgradeExpected;\n\nimpl fmt::Display for UpgradeExpected {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        f.write_str(\"upgrade expected but not completed\")\n    }\n}\n\nimpl StdError for UpgradeExpected {}\n\n// ===== impl Io =====\n\ntrait Io: AsyncRead + AsyncWrite + Unpin + 'static {}\n\nimpl<T: AsyncRead + AsyncWrite + Unpin + 'static> Io for T {}\n\nmod sealed {\n    use super::OnUpgrade;\n\n    pub trait CanUpgrade {\n        fn on_upgrade(self) -> OnUpgrade;\n    }\n\n    impl<B> CanUpgrade for http::Request<B> {\n        fn on_upgrade(mut self) -> OnUpgrade {\n            self.extensions_mut()\n                .remove::<OnUpgrade>()\n                .unwrap_or_else(OnUpgrade::none)\n        }\n    }\n\n    impl<B> CanUpgrade for &'_ mut http::Request<B> {\n        fn on_upgrade(self) -> OnUpgrade {\n            self.extensions_mut()\n                .remove::<OnUpgrade>()\n                .unwrap_or_else(OnUpgrade::none)\n        }\n    }\n\n    impl<B> CanUpgrade for http::Response<B> {\n        fn on_upgrade(mut self) -> OnUpgrade {\n            self.extensions_mut()\n                .remove::<OnUpgrade>()\n                .unwrap_or_else(OnUpgrade::none)\n        }\n    }\n\n    impl<B> CanUpgrade for &'_ mut http::Response<B> {\n        fn on_upgrade(self) -> OnUpgrade {\n            self.extensions_mut()\n                .remove::<OnUpgrade>()\n                .unwrap_or_else(OnUpgrade::none)\n        }\n    }\n}\n\nmod rewind {\n    use std::{\n        cmp, io,\n        pin::Pin,\n        task::{Context, Poll},\n    };\n\n    use bytes::{Buf, Bytes};\n    use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};\n\n    /// Combine a buffer with an IO, rewinding reads to use the buffer.\n    #[derive(Debug)]\n    pub(crate) struct Rewind<T> {\n        pre: Option<Bytes>,\n        inner: T,\n    }\n\n    impl<T> Rewind<T> {\n        #[inline]\n        pub(crate) fn new_buffered(io: T, buf: Bytes) -> Self {\n            Rewind {\n                pre: Some(buf),\n                inner: io,\n            }\n        }\n\n        #[cfg(test)]\n        pub(crate) fn rewind(&mut self, bs: Bytes) {\n            debug_assert!(self.pre.is_none());\n            self.pre = Some(bs);\n        }\n    }\n\n    impl<T> AsyncRead for Rewind<T>\n    where\n        T: AsyncRead + Unpin,\n    {\n        fn poll_read(\n            mut self: Pin<&mut Self>,\n            cx: &mut Context<'_>,\n            buf: &mut ReadBuf<'_>,\n        ) -> Poll<io::Result<()>> {\n            if let Some(mut prefix) = self.pre.take() {\n                // If there are no remaining bytes, let the bytes get dropped.\n                if !prefix.is_empty() {\n                    let copy_len = cmp::min(prefix.len(), buf.remaining());\n                    // TODO: There should be a way to do following two lines cleaner...\n                    buf.put_slice(&prefix[..copy_len]);\n                    prefix.advance(copy_len);\n                    // Put back what's left\n                    if !prefix.is_empty() {\n                        self.pre = Some(prefix);\n                    }\n\n                    return Poll::Ready(Ok(()));\n                }\n            }\n            Pin::new(&mut self.inner).poll_read(cx, buf)\n        }\n    }\n\n    impl<T> AsyncWrite for Rewind<T>\n    where\n        T: AsyncWrite + Unpin,\n    {\n        #[inline]\n        fn poll_write(\n            mut self: Pin<&mut Self>,\n            cx: &mut Context<'_>,\n            buf: &[u8],\n        ) -> Poll<io::Result<usize>> {\n            Pin::new(&mut self.inner).poll_write(cx, buf)\n        }\n\n        #[inline]\n        fn poll_write_vectored(\n            mut self: Pin<&mut Self>,\n            cx: &mut Context<'_>,\n            bufs: &[io::IoSlice<'_>],\n        ) -> Poll<io::Result<usize>> {\n            Pin::new(&mut self.inner).poll_write_vectored(cx, bufs)\n        }\n\n        #[inline]\n        fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {\n            Pin::new(&mut self.inner).poll_flush(cx)\n        }\n\n        #[inline]\n        fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {\n            Pin::new(&mut self.inner).poll_shutdown(cx)\n        }\n\n        #[inline]\n        fn is_write_vectored(&self) -> bool {\n            self.inner.is_write_vectored()\n        }\n    }\n\n    #[cfg(test)]\n    mod tests {\n        use bytes::Bytes;\n        use tokio::io::AsyncReadExt;\n\n        use super::Rewind;\n\n        #[tokio::test]\n        async fn partial_rewind() {\n            let underlying = [104, 101, 108, 108, 111];\n\n            let mock = tokio_test::io::Builder::new().read(&underlying).build();\n\n            let mut stream = Rewind::new_buffered(mock, Bytes::new());\n\n            // Read off some bytes, ensure we filled o1\n            let mut buf = [0; 2];\n            stream.read_exact(&mut buf).await.expect(\"read1\");\n\n            // Rewind the stream so that it is as if we never read in the first place.\n            stream.rewind(Bytes::copy_from_slice(&buf[..]));\n\n            let mut buf = [0; 5];\n            stream.read_exact(&mut buf).await.expect(\"read1\");\n\n            // At this point we should have read everything that was in the MockStream\n            assert_eq!(&buf, &underlying);\n        }\n\n        #[tokio::test]\n        async fn full_rewind() {\n            let underlying = [104, 101, 108, 108, 111];\n\n            let mock = tokio_test::io::Builder::new().read(&underlying).build();\n\n            let mut stream = Rewind::new_buffered(mock, Bytes::new());\n\n            let mut buf = [0; 5];\n            stream.read_exact(&mut buf).await.expect(\"read1\");\n\n            // Rewind the stream so that it is as if we never read in the first place.\n            stream.rewind(Bytes::copy_from_slice(&buf[..]));\n\n            let mut buf = [0; 5];\n            stream.read_exact(&mut buf).await.expect(\"read1\");\n\n            assert_eq!(&buf, &underlying);\n        }\n    }\n}\n"
  },
  {
    "path": "src/client/core.rs",
    "content": "//! Core HTTP client protocol and utilities.\n//!\n//! Much of this codebase is adapted and refined from [hyper](https://github.com/hyperium/hyper),\n//! aiming to match its performance and reliability for asynchronous HTTP/1 and HTTP/2.\n\nmod error;\nmod proto;\n\npub mod body;\npub mod conn;\npub mod dispatch;\npub mod rt;\npub mod upgrade;\n\npub use self::{\n    error::{Error, Result},\n    proto::{http1, http2},\n};\n"
  },
  {
    "path": "src/client/emulate.rs",
    "content": "use http::HeaderMap;\n\nuse super::{\n    core::{http1::Http1Options, http2::Http2Options},\n    group::Group,\n};\nuse crate::{header::OrigHeaderMap, tls::TlsOptions};\n\n/// Converts a value into an [`Emulation`] configuration.\n///\n/// This trait lets multiple input types provide a unified way to produce\n/// an emulation profile. Typical inputs include:\n/// - Predefined browser profiles\n/// - Transport option sets (e.g. HTTP/1, HTTP/2, TLS)\n/// - User-defined strategy types\npub trait IntoEmulation {\n    /// Converts `self` into an [`Emulation`] configuration.\n    fn into_emulation(self) -> Emulation;\n}\n\n/// Builder for creating an [`Emulation`] configuration.\n#[must_use]\n#[derive(Debug)]\npub struct EmulationBuilder {\n    emulation: Emulation,\n}\n\n/// HTTP emulation settings for a client profile.\n///\n/// Combines protocol options (HTTP/1, HTTP/2, TLS) and default headers.\n#[non_exhaustive]\n#[derive(Debug, Clone)]\npub struct Emulation {\n    pub(crate) group: Group,\n\n    /// Default headers applied to outgoing requests.\n    pub headers: HeaderMap,\n\n    /// Original headers with preserved case and duplicates.\n    pub orig_headers: OrigHeaderMap,\n\n    /// TLS configuration.\n    pub tls_options: Option<TlsOptions>,\n\n    /// HTTP/1 configuration.\n    pub http1_options: Option<Http1Options>,\n\n    /// HTTP/2 configuration.\n    pub http2_options: Option<Http2Options>,\n}\n\n// ==== impl EmulationBuilder ====\n\nimpl EmulationBuilder {\n    /// Sets the  HTTP/1 options configuration.\n    #[inline]\n    pub fn http1_options(mut self, opts: Http1Options) -> Self {\n        self.emulation.http1_options = Some(opts);\n        self\n    }\n\n    /// Sets the HTTP/2 options configuration.\n    #[inline]\n    pub fn http2_options(mut self, opts: Http2Options) -> Self {\n        self.emulation.http2_options = Some(opts);\n        self\n    }\n\n    /// Sets the  TLS options configuration.\n    #[inline]\n    pub fn tls_options(mut self, opts: TlsOptions) -> Self {\n        self.emulation.tls_options = Some(opts);\n        self\n    }\n\n    /// Sets the default headers.\n    #[inline]\n    pub fn headers(mut self, src: HeaderMap) -> Self {\n        crate::util::replace_headers(&mut self.emulation.headers, src);\n        self\n    }\n\n    /// Sets the original headers.\n    #[inline]\n    pub fn orig_headers(mut self, src: OrigHeaderMap) -> Self {\n        self.emulation.orig_headers.extend(src);\n        self\n    }\n\n    /// Builds the [`Emulation`] instance.\n    #[inline]\n    pub fn build(mut self, group: Group) -> Emulation {\n        self.emulation.group.emulate(group);\n        self.emulation\n    }\n}\n\n// ==== impl Emulation ====\n\nimpl Emulation {\n    /// Creates a new [`EmulationBuilder`].\n    #[inline]\n    pub fn builder() -> EmulationBuilder {\n        EmulationBuilder {\n            emulation: Emulation {\n                group: Group::default(),\n                headers: HeaderMap::new(),\n                orig_headers: OrigHeaderMap::new(),\n                tls_options: None,\n                http1_options: None,\n                http2_options: None,\n            },\n        }\n    }\n}\n\nimpl<T: Into<Emulation>> IntoEmulation for T {\n    #[inline]\n    fn into_emulation(self) -> Emulation {\n        self.into()\n    }\n}\n"
  },
  {
    "path": "src/client/future.rs",
    "content": "use std::{\n    pin::Pin,\n    task::{Context, Poll, ready},\n};\n\nuse http::{Request, Uri};\nuse pin_project_lite::pin_project;\nuse tower::util::{Either, Oneshot};\n\nuse super::{Body, BoxedClientService, ClientService, Error, Response};\n\npin_project! {\n    /// [`Pending`] is a future representing the state of an HTTP request, which may be either\n    /// an in-flight request (with its associated future and URI) or an error state.\n    /// Used to drive the HTTP request to completion or report an error.\n    #[project = PendingProj]\n    pub enum Pending {\n        Request {\n            uri: Option<Uri>,\n            fut: Pin<Box<Oneshot<Either<ClientService, BoxedClientService>, Request<Body>>>>,\n        },\n        Error {\n            error: Option<Error>,\n        },\n    }\n}\n\nimpl Future for Pending {\n    type Output = Result<Response, Error>;\n\n    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {\n        let (uri, res) = match self.project() {\n            PendingProj::Request { uri, fut } => (uri, fut.as_mut().poll(cx)),\n            PendingProj::Error { error } => {\n                let err = error\n                    .take()\n                    .expect(\"Pending::Error polled after completion\");\n                return Poll::Ready(Err(err));\n            }\n        };\n\n        let res = ready!(res);\n        let uri = uri\n            .take()\n            .expect(\"Pending::Request polled after completion\");\n        let res = match res {\n            Ok(res) => Ok(Response::new(res, uri)),\n            Err(err) => {\n                let mut err = err\n                    .downcast::<Error>()\n                    .map_or_else(Error::request, |err| *err);\n                if err.uri().is_none() {\n                    err = err.with_uri(uri);\n                }\n                Err(err)\n            }\n        };\n\n        Poll::Ready(res)\n    }\n}\n\n#[cfg(test)]\nmod test {\n\n    #[test]\n    fn test_future_size() {\n        let s = std::mem::size_of::<super::Pending>();\n        assert!(s <= 360, \"size_of::<Pending>() == {s}, too big\");\n    }\n\n    #[tokio::test]\n    async fn error_has_url() {\n        let u = \"http://does.not.exist.local/ever\";\n        let err = crate::Client::new().get(u).send().await.unwrap_err();\n        assert_eq!(err.uri().unwrap(), u, \"{err:?}\");\n    }\n}\n"
  },
  {
    "path": "src/client/group.rs",
    "content": "//! # Request Grouping Mechanism\n//!\n//! This module provides the [`Group`] structure, which defines the logical boundaries\n//! for categorizing and segregating outbound requests.\n//!\n//! ## Concept\n//! A `Group` acts as a multi-dimensional identity for a request. In complex networking\n//! stack environments, two requests targeting the same destination may belong to\n//! distinct logical groups due to different metadata, security contexts, or\n//! routing requirements.\n//!\n//! ## Logical Segregation\n//! By assigning requests to different groups, the system ensures:\n//! 1. **Contextual Isolation**: Requests are processed and dispatched within their defined logical\n//!    partitions.\n//! 2. **Deterministic Identity**: The internal `BTreeMap` ensures that the identity of a group is\n//!    stable and invariant to the order in which grouping criteria are applied.\n//! 3. **Resource Affinity**: Resource management (such as connection pooling) respects these\n//!    boundaries, ensuring that resources are never leaked across different request groups.\n\nuse std::{borrow::Cow, collections::BTreeMap, hash::Hash};\n\nuse http::{Uri, Version};\n\nuse crate::{client::SocketBindOptions, proxy::Matcher};\n\nmacro_rules! impl_group_variants {\n    ($($name:ident $(($ty:ty))?,)*) => {\n        /// Unique discriminator for request grouping dimensions.\n        #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, PartialOrd, Ord)]\n        enum GroupId {\n            $($name,)*\n        }\n\n        /// Data container for specific grouping criteria.\n        #[derive(Debug, Clone, Hash, PartialEq, Eq)]\n        enum GroupPart {\n            $($name $(($ty))?,)*\n        }\n    }\n}\n\nimpl_group_variants! {\n    Request(Group),\n    Emulate(Group),\n    Named(Cow<'static, str>),\n    Number(u64),\n    Uri(Uri),\n    Version(Version),\n    Proxy(Matcher),\n    SocketBind(Option<SocketBindOptions>),\n}\n\n/// A logical identifier for request grouping.\n///\n/// `Group` encapsulates the criteria that define a request's execution context.\n/// Requests with non-identical `Group` states are treated as belonging to\n/// different logical partitions, preventing unintended interaction or\n/// resource sharing between them.\n#[derive(Debug, Default, Clone, Hash, PartialEq, Eq)]\npub struct Group(BTreeMap<GroupId, GroupPart>);\n\nimpl Group {\n    /// Creates a new [`Group`] with a custom name.\n    #[inline]\n    pub fn named<N: Into<Cow<'static, str>>>(name: N) -> Self {\n        Group(BTreeMap::from([(\n            GroupId::Named,\n            GroupPart::Named(name.into()),\n        )]))\n    }\n\n    /// Creates a new [`Group`] with a numeric identifier.\n    pub fn number<V: Into<u64>>(value: V) -> Self {\n        Group(BTreeMap::from([(\n            GroupId::Number,\n            GroupPart::Number(value.into()),\n        )]))\n    }\n\n    /// Groups the request by a specific target [`Uri`].\n    #[inline]\n    pub(crate) fn uri(&mut self, uri: Uri) -> &mut Self {\n        self.extend(GroupId::Uri, GroupPart::Uri(uri))\n    }\n\n    /// Groups the request by its required HTTP [`Version`].\n    #[inline]\n    pub(crate) fn version(&mut self, version: Option<Version>) -> &mut Self {\n        self.extend(GroupId::Version, version.map(GroupPart::Version))\n    }\n\n    /// Groups the request based on its proxy [`Matcher`] criteria.\n    #[inline]\n    pub(crate) fn proxy(&mut self, proxy: Option<Matcher>) -> &mut Self {\n        self.extend(GroupId::Proxy, proxy.map(GroupPart::Proxy))\n    }\n\n    /// Groups the request by its resolved socket bind options.\n    #[inline]\n    pub(crate) fn socket_bind(&mut self, opts: Option<SocketBindOptions>) -> &mut Self {\n        self.extend(GroupId::SocketBind, GroupPart::SocketBind(opts))\n    }\n\n    /// Creates a nested request group.\n    #[inline]\n    pub(crate) fn request(&mut self, group: Group) -> &mut Self {\n        self.extend(GroupId::Request, GroupPart::Request(group))\n    }\n\n    /// Groups the request by its emulation-layer characteristics.\n    #[inline]\n    pub(crate) fn emulate(&mut self, group: Group) -> &mut Self {\n        self.extend(GroupId::Emulate, GroupPart::Emulate(group))\n    }\n\n    #[inline]\n    fn extend<T: Into<Option<GroupPart>>>(&mut self, id: GroupId, entry: T) -> &mut Self {\n        if let Some(entry) = entry.into() {\n            self.0.insert(id, entry);\n        }\n        self\n    }\n}\n\nimpl From<u64> for Group {\n    #[inline]\n    fn from(value: u64) -> Self {\n        Group::number(value)\n    }\n}\n\nimpl From<&'static str> for Group {\n    #[inline]\n    fn from(value: &'static str) -> Self {\n        Group::named(value)\n    }\n}\n\nimpl From<String> for Group {\n    #[inline]\n    fn from(value: String) -> Self {\n        Group::named(value)\n    }\n}\n\nimpl From<Cow<'static, str>> for Group {\n    #[inline]\n    fn from(value: Cow<'static, str>) -> Self {\n        Group::named(value)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::hash::{DefaultHasher, Hash, Hasher};\n\n    use super::*;\n\n    #[test]\n    fn test_group_identity_invariance() {\n        let mut g1 = Group::default();\n        g1.extend(GroupId::Number, GroupPart::Number(42));\n        g1.extend(GroupId::Named, GroupPart::Named(\"worker\".into()));\n\n        let mut g2 = Group::default();\n        g2.extend(GroupId::Named, GroupPart::Named(\"worker\".into()));\n        g2.extend(GroupId::Number, GroupPart::Number(42));\n\n        let mut h1 = DefaultHasher::new();\n        g1.hash(&mut h1);\n\n        let mut h2 = DefaultHasher::new();\n        g2.hash(&mut h2);\n\n        assert_eq!(\n            h1.finish(),\n            h2.finish(),\n            \"Request groups must maintain identical hashes regardless of criteria insertion order\"\n        );\n    }\n}\n"
  },
  {
    "path": "src/client/layer/client/exec.rs",
    "content": "use std::{future::Future, pin::Pin, sync::Arc};\n\nuse crate::client::core::rt::Executor;\n\npub(crate) type BoxSendFuture = Pin<Box<dyn Future<Output = ()> + Send>>;\n\n// Either the user provides an executor for background tasks, or we use `tokio::spawn`.\n#[derive(Clone)]\npub struct Exec(Arc<dyn Executor<BoxSendFuture> + Send + Sync>);\n\n// ===== impl Exec =====\n\nimpl Exec {\n    pub(super) fn new<E>(inner: E) -> Self\n    where\n        E: Executor<BoxSendFuture> + Send + Sync + 'static,\n    {\n        Exec(Arc::new(inner))\n    }\n}\n\nimpl<F> Executor<F> for Exec\nwhere\n    F: Future<Output = ()> + Send + 'static,\n{\n    fn execute(&self, fut: F) {\n        self.0.execute(Box::pin(fut));\n    }\n}\n"
  },
  {
    "path": "src/client/layer/client/lazy.rs",
    "content": "use std::{\n    future::Future,\n    pin::Pin,\n    task::{self, Poll},\n};\n\nuse pin_project_lite::pin_project;\n\npub(crate) trait Started: Future {\n    fn started(&self) -> bool;\n}\n\npub(crate) fn lazy<F, R>(func: F) -> Lazy<F, R>\nwhere\n    F: FnOnce() -> R,\n    R: Future + Unpin,\n{\n    Lazy {\n        inner: Inner::Init { func },\n    }\n}\n\n// FIXME: allow() required due to `impl Trait` leaking types to this lint\npin_project! {\n    pub(crate) struct Lazy<F, R> {\n        #[pin]\n        inner: Inner<F, R>,\n    }\n}\n\npin_project! {\n    #[project = InnerProj]\n    #[project_replace = InnerProjReplace]\n    enum Inner<F, R> {\n        Init { func: F },\n        Fut { #[pin] fut: R },\n        Empty,\n    }\n}\n\nimpl<F, R> Started for Lazy<F, R>\nwhere\n    F: FnOnce() -> R,\n    R: Future,\n{\n    fn started(&self) -> bool {\n        match self.inner {\n            Inner::Init { .. } => false,\n            Inner::Fut { .. } | Inner::Empty => true,\n        }\n    }\n}\n\nimpl<F, R> Future for Lazy<F, R>\nwhere\n    F: FnOnce() -> R,\n    R: Future,\n{\n    type Output = R::Output;\n\n    fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {\n        let mut this = self.project();\n\n        if let InnerProj::Fut { fut } = this.inner.as_mut().project() {\n            return fut.poll(cx);\n        }\n\n        match this.inner.as_mut().project_replace(Inner::Empty) {\n            InnerProjReplace::Init { func } => {\n                this.inner.set(Inner::Fut { fut: func() });\n                if let InnerProj::Fut { fut } = this.inner.project() {\n                    return fut.poll(cx);\n                }\n                unreachable!()\n            }\n            _ => unreachable!(\"lazy state wrong\"),\n        }\n    }\n}\n"
  },
  {
    "path": "src/client/layer/client/pool.rs",
    "content": "use std::{\n    collections::{HashMap, HashSet, VecDeque},\n    convert::Infallible,\n    error::Error as StdError,\n    fmt::{self, Debug},\n    future::Future,\n    hash::Hash,\n    num::NonZeroUsize,\n    ops::{Deref, DerefMut},\n    pin::Pin,\n    sync::{Arc, Weak},\n    task::{self, Poll, ready},\n    time::{Duration, Instant},\n};\n\nuse lru::LruCache;\nuse tokio::sync::oneshot;\n\nuse super::exec::{self, Exec};\nuse crate::{\n    client::core::rt::{Executor, Time, Timer},\n    sync::Mutex,\n};\n\npub struct Pool<T, K: Key> {\n    // If the pool is disabled, this is None.\n    inner: Option<Arc<Mutex<PoolInner<T, K>>>>,\n}\n\n// Before using a pooled connection, make sure the sender is not dead.\n//\n// This is a trait to allow the `client::pool::tests` to work for `i32`.\n//\n// See https://github.com/hyperium/hyper/issues/1429\npub trait Poolable: Unpin + Send + Sized + 'static {\n    fn is_open(&self) -> bool;\n    /// Reserve this connection.\n    ///\n    /// Allows for HTTP/2 to return a shared reservation.\n    fn reserve(self) -> Reservation<Self>;\n    fn can_share(&self) -> bool;\n}\n\npub trait Key: Eq + Hash + Clone + Debug + Unpin + Send + 'static {}\n\nimpl<T> Key for T where T: Eq + Hash + Clone + Debug + Unpin + Send + 'static {}\n\n/// A marker to identify what version a pooled connection is.\n#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]\n#[repr(u8)]\npub enum Ver {\n    Auto,\n    Http2,\n}\n\n/// When checking out a pooled connection, it might be that the connection\n/// only supports a single reservation, or it might be usable for many.\n///\n/// Specifically, HTTP/1 requires a unique reservation, but HTTP/2 can be\n/// used for multiple requests.\n// FIXME: allow() required due to `impl Trait` leaking types to this lint\npub enum Reservation<T> {\n    /// This connection could be used multiple times, the first one will be\n    /// reinserted into the `idle` pool, and the second will be given to\n    /// the `Checkout`.\n    Shared(T, T),\n    /// This connection requires unique access. It will be returned after\n    /// use is complete.\n    Unique(T),\n}\n\n/// Simple type alias in case the key type needs to be adjusted.\n// pub type Key = (http::uri::Scheme, http::uri::Authority); //Arc<String>;\nstruct PoolInner<T, K: Eq + Hash> {\n    // A flag that a connection is being established, and the connection\n    // should be shared. This prevents making multiple HTTP/2 connections\n    // to the same host.\n    connecting: HashSet<K>,\n    // These are internal Conns sitting in the event loop in the KeepAlive\n    // state, waiting to receive a new Request to send on the socket.\n    idle: LruCache<K, Vec<Idle<T>>>,\n    max_idle_per_host: usize,\n    // These are outstanding Checkouts that are waiting for a socket to be\n    // able to send a Request one. This is used when \"racing\" for a new\n    // connection.\n    //\n    // The Client starts 2 tasks, 1 to connect a new socket, and 1 to wait\n    // for the Pool to receive an idle Conn. When a Conn becomes idle,\n    // this list is checked for any parked Checkouts, and tries to notify\n    // them that the Conn could be used instead of waiting for a brand new\n    // connection.\n    waiters: HashMap<K, VecDeque<oneshot::Sender<T>>>,\n    // A oneshot channel is used to allow the interval to be notified when\n    // the Pool completely drops. That way, the interval can cancel immediately.\n    idle_interval_ref: Option<oneshot::Sender<Infallible>>,\n    exec: Exec,\n    timer: Time,\n    timeout: Option<Duration>,\n}\n\n// This is because `Weak::new()` *allocates* space for `T`, even if it\n// doesn't need it!\nstruct WeakOpt<T>(Option<Weak<T>>);\n\n#[derive(Clone, Copy, Debug)]\npub struct Config {\n    pub idle_timeout: Option<Duration>,\n    pub max_idle_per_host: usize,\n    pub max_pool_size: Option<NonZeroUsize>,\n}\n\nimpl Config {\n    pub fn is_enabled(&self) -> bool {\n        self.max_idle_per_host > 0\n    }\n}\n\nimpl<T, K: Key> Pool<T, K> {\n    pub fn new<E>(config: Config, executor: E, timer: Time) -> Pool<T, K>\n    where\n        E: Executor<exec::BoxSendFuture> + Send + Sync + Clone + 'static,\n    {\n        let inner = if config.is_enabled() {\n            Some(Arc::new(Mutex::new(PoolInner {\n                connecting: HashSet::default(),\n                idle: config\n                    .max_pool_size\n                    .map_or_else(LruCache::unbounded, LruCache::new),\n                idle_interval_ref: None,\n                max_idle_per_host: config.max_idle_per_host,\n                waiters: HashMap::default(),\n                exec: Exec::new(executor),\n                timer,\n                timeout: config.idle_timeout,\n            })))\n        } else {\n            None\n        };\n\n        Pool { inner }\n    }\n\n    pub(crate) fn is_enabled(&self) -> bool {\n        self.inner.is_some()\n    }\n}\n\nimpl<T: Poolable, K: Key> Pool<T, K> {\n    /// Returns a `Checkout` which is a future that resolves if an idle\n    /// connection becomes available.\n    pub fn checkout(&self, key: K) -> Checkout<T, K> {\n        Checkout {\n            key,\n            pool: self.clone(),\n            waiter: None,\n        }\n    }\n\n    /// Ensure that there is only ever 1 connecting task for HTTP/2\n    /// connections. This does nothing for HTTP/1.\n    pub fn connecting(&self, key: K, ver: Ver) -> Option<Connecting<T, K>> {\n        if ver == Ver::Http2 {\n            if let Some(ref enabled) = self.inner {\n                let mut inner = enabled.lock();\n                return if inner.connecting.insert(key.clone()) {\n                    let connecting = Connecting {\n                        key,\n                        pool: WeakOpt::downgrade(enabled),\n                    };\n                    Some(connecting)\n                } else {\n                    trace!(\"HTTP/2 connecting already in progress for {:?}\", key);\n                    None\n                };\n            }\n        }\n\n        // else\n        Some(Connecting {\n            key,\n            // in HTTP/1's case, there is never a lock, so we don't\n            // need to do anything in Drop.\n            pool: WeakOpt::none(),\n        })\n    }\n\n    pub fn pooled(&self, mut connecting: Connecting<T, K>, value: T) -> Pooled<T, K> {\n        let (value, pool_ref) = if let Some(ref enabled) = self.inner {\n            match value.reserve() {\n                Reservation::Shared(to_insert, to_return) => {\n                    let mut inner = enabled.lock();\n                    inner.put(&connecting.key, to_insert, enabled);\n                    // Do this here instead of Drop for Connecting because we\n                    // already have a lock, no need to lock the mutex twice.\n                    inner.connected(&connecting.key);\n                    drop(inner);\n                    // prevent the Drop of Connecting from repeating inner.connected()\n                    connecting.pool = WeakOpt::none();\n\n                    // Shared reservations don't need a reference to the pool,\n                    // since the pool always keeps a copy.\n                    (to_return, WeakOpt::none())\n                }\n                Reservation::Unique(value) => {\n                    // Unique reservations must take a reference to the pool\n                    // since they hope to reinsert once the reservation is\n                    // completed\n                    (value, WeakOpt::downgrade(enabled))\n                }\n            }\n        } else {\n            // If pool is not enabled, skip all the things...\n\n            // The Connecting should have had no pool ref\n            debug_assert!(connecting.pool.upgrade().is_none());\n\n            (value, WeakOpt::none())\n        };\n\n        Pooled {\n            key: connecting.key.clone(),\n            is_reused: false,\n            pool: pool_ref,\n            value: Some(value),\n        }\n    }\n\n    fn reuse(&self, key: &K, value: T) -> Pooled<T, K> {\n        debug!(\"reuse idle connection for {:?}\", key);\n        // TODO: unhack this\n        // In Pool::pooled(), which is used for inserting brand new connections,\n        // there's some code that adjusts the pool reference taken depending\n        // on if the Reservation can be shared or is unique. By the time\n        // reuse() is called, the reservation has already been made, and\n        // we just have the final value, without knowledge of if this is\n        // unique or shared. So, the hack is to just assume Ver::Http2 means\n        // shared... :(\n        let mut pool_ref = WeakOpt::none();\n        if !value.can_share() {\n            if let Some(ref enabled) = self.inner {\n                pool_ref = WeakOpt::downgrade(enabled);\n            }\n        }\n\n        Pooled {\n            is_reused: true,\n            key: key.clone(),\n            pool: pool_ref,\n            value: Some(value),\n        }\n    }\n}\n\n/// Pop off this list, looking for a usable connection that hasn't expired.\nstruct IdlePopper<'a, T, K> {\n    #[allow(dead_code)]\n    key: &'a K,\n    list: &'a mut Vec<Idle<T>>,\n}\n\nimpl<'a, T: Poolable + 'a, K: Debug> IdlePopper<'a, T, K> {\n    fn pop(self, expiration: &Expiration, now: Instant) -> Option<Idle<T>> {\n        while let Some(entry) = self.list.pop() {\n            // If the connection has been closed, or is older than our idle\n            // timeout, simply drop it and keep looking...\n            if !entry.value.is_open() {\n                trace!(\"removing closed connection for {:?}\", self.key);\n                continue;\n            }\n            // TODO: Actually, since the `idle` list is pushed to the end always,\n            // that would imply that if *this* entry is expired, then anything\n            // \"earlier\" in the list would *have* to be expired also... Right?\n            //\n            // In that case, we could just break out of the loop and drop the\n            // whole list...\n            if expiration.expires(entry.idle_at, now) {\n                trace!(\"removing expired connection for {:?}\", self.key);\n                continue;\n            }\n\n            let value = match entry.value.reserve() {\n                Reservation::Shared(to_reinsert, to_checkout) => {\n                    self.list.push(Idle {\n                        idle_at: now,\n                        value: to_reinsert,\n                    });\n                    to_checkout\n                }\n                Reservation::Unique(unique) => unique,\n            };\n\n            return Some(Idle {\n                idle_at: entry.idle_at,\n                value,\n            });\n        }\n\n        None\n    }\n}\n\nimpl<T: Poolable, K: Key> PoolInner<T, K> {\n    fn now(&self) -> Instant {\n        self.timer.now()\n    }\n\n    fn put(&mut self, key: &K, value: T, __pool_ref: &Arc<Mutex<PoolInner<T, K>>>) {\n        if value.can_share() && self.idle.peek(key).is_some() {\n            trace!(\"put; existing idle HTTP/2 connection for {:?}\", key);\n            return;\n        }\n        trace!(\"put; add idle connection for {:?}\", key);\n        let mut remove_waiters = false;\n        let mut value = Some(value);\n        if let Some(waiters) = self.waiters.get_mut(key) {\n            while let Some(tx) = waiters.pop_front() {\n                if !tx.is_closed() {\n                    let reserved = value.take().expect(\"value already sent\");\n                    let reserved = match reserved.reserve() {\n                        Reservation::Shared(to_keep, to_send) => {\n                            value = Some(to_keep);\n                            to_send\n                        }\n                        Reservation::Unique(uniq) => uniq,\n                    };\n                    match tx.send(reserved) {\n                        Ok(()) => {\n                            if value.is_none() {\n                                break;\n                            } else {\n                                continue;\n                            }\n                        }\n                        Err(e) => {\n                            value = Some(e);\n                        }\n                    }\n                }\n\n                trace!(\"put; removing canceled waiter for {:?}\", key);\n            }\n            remove_waiters = waiters.is_empty();\n        }\n\n        if remove_waiters {\n            self.waiters.remove(key);\n        }\n\n        if let Some(value) = value {\n            // borrow-check scope...\n            {\n                let now = self.now();\n                let idle_list = self\n                    .idle\n                    .get_or_insert_mut(key.clone(), Vec::<Idle<T>>::default);\n\n                if self.max_idle_per_host <= idle_list.len() {\n                    trace!(\"max idle per host for {:?}, dropping connection\", key);\n                    return;\n                }\n\n                debug!(\"pooling idle connection for {:?}\", key);\n                idle_list.push(Idle {\n                    value,\n                    idle_at: now,\n                });\n            }\n\n            self.spawn_idle_interval(__pool_ref);\n        } else {\n            trace!(\"put; found waiter for {:?}\", key)\n        }\n    }\n\n    /// A `Connecting` task is complete. Not necessarily successfully,\n    /// but the lock is going away, so clean up.\n    fn connected(&mut self, key: &K) {\n        let existed = self.connecting.remove(key);\n        debug_assert!(existed, \"Connecting dropped, key not in pool.connecting\");\n        // cancel any waiters. if there are any, it's because\n        // this Connecting task didn't complete successfully.\n        // those waiters would never receive a connection.\n        self.waiters.remove(key);\n    }\n\n    fn spawn_idle_interval(&mut self, pool_ref: &Arc<Mutex<PoolInner<T, K>>>) {\n        if self.idle_interval_ref.is_some() {\n            return;\n        }\n\n        let dur = if let Some(dur) = self.timeout {\n            dur\n        } else {\n            return;\n        };\n\n        if dur == Duration::ZERO {\n            return;\n        }\n\n        if matches!(self.timer, Time::Empty) {\n            return;\n        }\n\n        // While someone might want a shorter duration, and it will be respected\n        // at checkout time, there's no need to wake up and proactively evict\n        // faster than this.\n        //\n        // The value of 90ms was chosen as a balance between responsiveness and\n        // efficiency. A shorter interval could lead to unnecessary wake-ups and\n        // increased CPU usage, while a longer interval might delay the eviction\n        // of idle connections. This value has been empirically determined to\n        // work well in typical use cases.\n        const MIN_CHECK: Duration = Duration::from_millis(90);\n\n        let dur = dur.max(MIN_CHECK);\n\n        let (tx, rx) = oneshot::channel();\n        self.idle_interval_ref = Some(tx);\n\n        let interval = IdleTask {\n            timer: self.timer.clone(),\n            duration: dur,\n            pool: WeakOpt::downgrade(pool_ref),\n            pool_drop_notifier: rx,\n        };\n\n        self.exec.execute(interval.run());\n    }\n}\n\nimpl<T, K: Eq + Hash> PoolInner<T, K> {\n    /// Any `FutureResponse`s that were created will have made a `Checkout`,\n    /// and possibly inserted into the pool that it is waiting for an idle\n    /// connection. If a user ever dropped that future, we need to clean out\n    /// those parked senders.\n    fn clean_waiters(&mut self, key: &K) {\n        let mut remove_waiters = false;\n        if let Some(waiters) = self.waiters.get_mut(key) {\n            waiters.retain(|tx| !tx.is_closed());\n            remove_waiters = waiters.is_empty();\n        }\n        if remove_waiters {\n            self.waiters.remove(key);\n        }\n    }\n}\n\nimpl<T: Poolable, K: Key> PoolInner<T, K> {\n    /// This should *only* be called by the IdleTask\n    fn clear_expired(&mut self) {\n        let dur = self.timeout.expect(\"interval assumes timeout\");\n        let now = self.now();\n\n        let mut keys_to_remove = Vec::new();\n        for (key, values) in self.idle.iter_mut() {\n            values.retain(|entry| {\n                if !entry.value.is_open() {\n                    trace!(\"idle interval evicting closed for {:?}\", key);\n                    return false;\n                }\n\n                // Avoid `Instant::sub` to avoid issues like rust-lang/rust#86470.\n                if now.saturating_duration_since(entry.idle_at) > dur {\n                    trace!(\"idle interval evicting expired for {:?}\", key);\n                    return false;\n                }\n\n                // Otherwise, keep this value...\n                true\n            });\n\n            // If the list is empty, remove the key.\n            if values.is_empty() {\n                keys_to_remove.push(key.clone());\n            }\n        }\n\n        for key in keys_to_remove {\n            trace!(\"idle interval removing empty key {:?}\", key);\n            self.idle.pop(&key);\n        }\n    }\n}\n\nimpl<T, K: Key> Clone for Pool<T, K> {\n    fn clone(&self) -> Pool<T, K> {\n        Pool {\n            inner: self.inner.clone(),\n        }\n    }\n}\n\n/// A wrapped poolable value that tries to reinsert to the Pool on Drop.\n// Note: The bounds `T: Poolable` is needed for the Drop impl.\npub struct Pooled<T: Poolable, K: Key> {\n    value: Option<T>,\n    is_reused: bool,\n    key: K,\n    pool: WeakOpt<Mutex<PoolInner<T, K>>>,\n}\n\nimpl<T: Poolable, K: Key> Pooled<T, K> {\n    pub fn is_reused(&self) -> bool {\n        self.is_reused\n    }\n\n    pub fn is_pool_enabled(&self) -> bool {\n        self.pool.0.is_some()\n    }\n\n    fn as_ref(&self) -> &T {\n        self.value.as_ref().expect(\"not dropped\")\n    }\n\n    fn as_mut(&mut self) -> &mut T {\n        self.value.as_mut().expect(\"not dropped\")\n    }\n}\n\nimpl<T: Poolable, K: Key> Deref for Pooled<T, K> {\n    type Target = T;\n    fn deref(&self) -> &T {\n        self.as_ref()\n    }\n}\n\nimpl<T: Poolable, K: Key> DerefMut for Pooled<T, K> {\n    fn deref_mut(&mut self) -> &mut T {\n        self.as_mut()\n    }\n}\n\nimpl<T: Poolable, K: Key> Drop for Pooled<T, K> {\n    fn drop(&mut self) {\n        if let Some(value) = self.value.take() {\n            if !value.is_open() {\n                // If we *already* know the connection is done here,\n                // it shouldn't be re-inserted back into the pool.\n                return;\n            }\n\n            if let Some(pool) = self.pool.upgrade() {\n                let mut inner = pool.lock();\n                inner.put(&self.key, value, &pool);\n            } else if !value.can_share() {\n                trace!(\"pool dropped, dropping pooled ({:?})\", self.key);\n            }\n            // Ver::Http2 is already in the Pool (or dead), so we wouldn't\n            // have an actual reference to the Pool.\n        }\n    }\n}\n\nimpl<T: Poolable, K: Key> Debug for Pooled<T, K> {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        f.debug_struct(\"Pooled\").field(\"key\", &self.key).finish()\n    }\n}\n\nstruct Idle<T> {\n    idle_at: Instant,\n    value: T,\n}\n\npub struct Checkout<T, K: Key> {\n    key: K,\n    pool: Pool<T, K>,\n    waiter: Option<oneshot::Receiver<T>>,\n}\n\n#[derive(Debug)]\n#[non_exhaustive]\npub enum Error {\n    PoolDisabled,\n    CheckoutNoLongerWanted,\n    CheckedOutClosedValue,\n}\n\nimpl Error {\n    pub(super) fn is_canceled(&self) -> bool {\n        matches!(self, Error::CheckedOutClosedValue)\n    }\n}\n\nimpl fmt::Display for Error {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        f.write_str(match self {\n            Error::PoolDisabled => \"pool is disabled\",\n            Error::CheckedOutClosedValue => \"checked out connection was closed\",\n            Error::CheckoutNoLongerWanted => \"request was canceled\",\n        })\n    }\n}\n\nimpl StdError for Error {}\n\nimpl<T: Poolable, K: Key> Checkout<T, K> {\n    fn poll_waiter(\n        &mut self,\n        cx: &mut task::Context<'_>,\n    ) -> Poll<Option<Result<Pooled<T, K>, Error>>> {\n        if let Some(mut rx) = self.waiter.take() {\n            match Pin::new(&mut rx).poll(cx) {\n                Poll::Ready(Ok(value)) => {\n                    if value.is_open() {\n                        Poll::Ready(Some(Ok(self.pool.reuse(&self.key, value))))\n                    } else {\n                        Poll::Ready(Some(Err(Error::CheckedOutClosedValue)))\n                    }\n                }\n                Poll::Pending => {\n                    self.waiter = Some(rx);\n                    Poll::Pending\n                }\n                Poll::Ready(Err(_canceled)) => {\n                    Poll::Ready(Some(Err(Error::CheckoutNoLongerWanted)))\n                }\n            }\n        } else {\n            Poll::Ready(None)\n        }\n    }\n\n    fn checkout(&mut self, cx: &mut task::Context<'_>) -> Option<Pooled<T, K>> {\n        let entry = {\n            let mut inner = self.pool.inner.as_ref()?.lock();\n            let expiration = Expiration::new(inner.timeout);\n            let now = inner.now();\n            let maybe_entry = inner.idle.get_mut(&self.key).and_then(|list| {\n                trace!(\"take? {:?}: expiration = {:?}\", self.key, expiration.0);\n                // A block to end the mutable borrow on list,\n                // so the map below can check is_empty()\n                {\n                    let popper = IdlePopper {\n                        key: &self.key,\n                        list,\n                    };\n                    popper.pop(&expiration, now)\n                }\n                .map(|e| (e, list.is_empty()))\n            });\n\n            let (entry, empty) = if let Some((e, empty)) = maybe_entry {\n                (Some(e), empty)\n            } else {\n                // No entry found means nuke the list for sure.\n                (None, true)\n            };\n\n            if empty {\n                inner.idle.pop(&self.key);\n            }\n\n            if entry.is_none() && self.waiter.is_none() {\n                let (tx, mut rx) = oneshot::channel();\n                trace!(\"checkout waiting for idle connection: {:?}\", self.key);\n                inner\n                    .waiters\n                    .entry(self.key.clone())\n                    .or_insert_with(VecDeque::new)\n                    .push_back(tx);\n\n                // register the waker with this oneshot\n                assert!(Pin::new(&mut rx).poll(cx).is_pending());\n                self.waiter = Some(rx);\n            }\n\n            entry\n        };\n\n        entry.map(|e| self.pool.reuse(&self.key, e.value))\n    }\n}\n\nimpl<T: Poolable, K: Key> Future for Checkout<T, K> {\n    type Output = Result<Pooled<T, K>, Error>;\n\n    fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {\n        if let Some(pooled) = ready!(self.poll_waiter(cx)?) {\n            return Poll::Ready(Ok(pooled));\n        }\n\n        if let Some(pooled) = self.checkout(cx) {\n            Poll::Ready(Ok(pooled))\n        } else if !self.pool.is_enabled() {\n            Poll::Ready(Err(Error::PoolDisabled))\n        } else {\n            // There's a new waiter, already registered in self.checkout()\n            debug_assert!(self.waiter.is_some());\n            Poll::Pending\n        }\n    }\n}\n\nimpl<T, K: Key> Drop for Checkout<T, K> {\n    fn drop(&mut self) {\n        if self.waiter.take().is_some() {\n            trace!(\"checkout dropped for {:?}\", self.key);\n            if let Some(mut inner) = self.pool.inner.as_ref().map(|i| i.lock()) {\n                inner.clean_waiters(&self.key);\n            }\n        }\n    }\n}\n\npub struct Connecting<T: Poolable, K: Key> {\n    key: K,\n    pool: WeakOpt<Mutex<PoolInner<T, K>>>,\n}\n\nimpl<T: Poolable, K: Key> Connecting<T, K> {\n    pub fn alpn_h2(self, pool: &Pool<T, K>) -> Option<Self> {\n        debug_assert!(\n            self.pool.0.is_none(),\n            \"Connecting::alpn_h2 but already Http2\"\n        );\n\n        pool.connecting(self.key.clone(), Ver::Http2)\n    }\n}\n\nimpl<T: Poolable, K: Key> Drop for Connecting<T, K> {\n    fn drop(&mut self) {\n        if let Some(pool) = self.pool.upgrade() {\n            // No need to panic on drop, that could abort!\n            let mut inner = pool.lock();\n            inner.connected(&self.key);\n        }\n    }\n}\n\nstruct Expiration(Option<Duration>);\n\nimpl Expiration {\n    fn new(dur: Option<Duration>) -> Expiration {\n        Expiration(dur)\n    }\n\n    fn expires(&self, instant: Instant, now: Instant) -> bool {\n        match self.0 {\n            // Avoid `Instant::elapsed` to avoid issues like rust-lang/rust#86470.\n            Some(timeout) => now.saturating_duration_since(instant) > timeout,\n            None => false,\n        }\n    }\n}\n\nstruct IdleTask<T, K: Key> {\n    timer: Time,\n    duration: Duration,\n    pool: WeakOpt<Mutex<PoolInner<T, K>>>,\n    // This allows the IdleTask to be notified as soon as the entire\n    // Pool is fully dropped, and shutdown. This channel is never sent on,\n    // but Err(Canceled) will be received when the Pool is dropped.\n    pool_drop_notifier: oneshot::Receiver<Infallible>,\n}\n\nimpl<T: Poolable + 'static, K: Key> IdleTask<T, K> {\n    async fn run(self) {\n        use futures_util::future;\n\n        let mut sleep = self.timer.sleep_until(self.timer.now() + self.duration);\n        let mut on_pool_drop = self.pool_drop_notifier;\n        loop {\n            match future::select(&mut on_pool_drop, &mut sleep).await {\n                future::Either::Left(_) => {\n                    // pool dropped, bah-bye\n                    break;\n                }\n                future::Either::Right(((), _)) => {\n                    if let Some(inner) = self.pool.upgrade() {\n                        let mut inner = inner.lock();\n                        trace!(\"idle interval checking for expired\");\n                        inner.clear_expired();\n                        drop(inner);\n                    }\n\n                    let deadline = self.timer.now() + self.duration;\n                    self.timer.reset(&mut sleep, deadline);\n                }\n            }\n        }\n\n        trace!(\"pool closed, canceling idle interval\");\n    }\n}\n\nimpl<T> WeakOpt<T> {\n    fn none() -> Self {\n        WeakOpt(None)\n    }\n\n    fn downgrade(arc: &Arc<T>) -> Self {\n        WeakOpt(Some(Arc::downgrade(arc)))\n    }\n\n    fn upgrade(&self) -> Option<Arc<T>> {\n        self.0.as_ref().and_then(Weak::upgrade)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::{\n        fmt::Debug,\n        future::Future,\n        hash::Hash,\n        num::NonZero,\n        pin::Pin,\n        sync::Arc,\n        task::{self, Poll},\n        time::Duration,\n    };\n\n    use super::{Connecting, Key, Pool, Poolable, Reservation, WeakOpt};\n    use crate::{\n        client::core::rt::{Time, TokioExecutor, TokioTimer},\n        sync::MutexGuard,\n    };\n\n    #[derive(Clone, Debug, PartialEq, Eq, Hash)]\n    struct KeyImpl(http::uri::Scheme, http::uri::Authority);\n\n    /// Test unique reservations.\n    #[derive(Debug, PartialEq, Eq)]\n    struct Uniq<T>(T);\n\n    impl<T: Send + 'static + Unpin> Poolable for Uniq<T> {\n        fn is_open(&self) -> bool {\n            true\n        }\n\n        fn reserve(self) -> Reservation<Self> {\n            Reservation::Unique(self)\n        }\n\n        fn can_share(&self) -> bool {\n            false\n        }\n    }\n\n    fn c<T: Poolable, K: Key>(key: K) -> Connecting<T, K> {\n        Connecting {\n            key,\n            pool: WeakOpt::none(),\n        }\n    }\n\n    fn host_key(s: &str) -> KeyImpl {\n        KeyImpl(http::uri::Scheme::HTTP, s.parse().expect(\"host key\"))\n    }\n\n    fn pool_no_timer<T, K: Key>() -> Pool<T, K> {\n        pool_max_idle_no_timer(usize::MAX)\n    }\n\n    fn pool_max_idle_no_timer<T, K: Key>(max_idle: usize) -> Pool<T, K> {\n        Pool::new(\n            super::Config {\n                idle_timeout: Some(Duration::from_millis(100)),\n                max_idle_per_host: max_idle,\n                max_pool_size: None,\n            },\n            TokioExecutor::new(),\n            Time::Empty,\n        )\n    }\n\n    impl<T: Poolable, K: Key> Pool<T, K> {\n        fn locked(&self) -> MutexGuard<'_, super::PoolInner<T, K>> {\n            self.inner.as_ref().expect(\"enabled\").lock()\n        }\n    }\n\n    #[tokio::test]\n    async fn test_pool_checkout_smoke() {\n        let pool = pool_no_timer();\n        let key = host_key(\"foo\");\n        let pooled = pool.pooled(c(key.clone()), Uniq(41));\n\n        drop(pooled);\n\n        match pool.checkout(key).await {\n            Ok(pooled) => assert_eq!(*pooled, Uniq(41)),\n            Err(_) => panic!(\"not ready\"),\n        };\n    }\n\n    /// Helper to check if the future is ready after polling once.\n    struct PollOnce<'a, F>(&'a mut F);\n\n    impl<F, T, U> Future for PollOnce<'_, F>\n    where\n        F: Future<Output = Result<T, U>> + Unpin,\n    {\n        type Output = Option<()>;\n\n        fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {\n            match Pin::new(&mut self.0).poll(cx) {\n                Poll::Ready(Ok(_)) => Poll::Ready(Some(())),\n                Poll::Ready(Err(_)) => Poll::Ready(Some(())),\n                Poll::Pending => Poll::Ready(None),\n            }\n        }\n    }\n\n    #[tokio::test]\n    async fn test_pool_checkout_returns_none_if_expired() {\n        let pool = pool_no_timer();\n        let key = host_key(\"foo\");\n        let pooled = pool.pooled(c(key.clone()), Uniq(41));\n\n        drop(pooled);\n        let timeout = pool.locked().timeout.unwrap();\n        tokio::time::sleep(timeout).await;\n        let mut checkout = pool.checkout(key);\n        let poll_once = PollOnce(&mut checkout);\n        let is_not_ready = poll_once.await.is_none();\n        assert!(is_not_ready);\n    }\n\n    #[tokio::test]\n    async fn test_pool_checkout_removes_expired() {\n        let pool = pool_no_timer();\n        let key = host_key(\"foo\");\n\n        pool.pooled(c(key.clone()), Uniq(41));\n        pool.pooled(c(key.clone()), Uniq(5));\n        pool.pooled(c(key.clone()), Uniq(99));\n\n        assert_eq!(\n            pool.locked().idle.get(&key).map(|entries| entries.len()),\n            Some(3)\n        );\n        let timeout = pool.locked().timeout.unwrap();\n        tokio::time::sleep(timeout).await;\n\n        let mut checkout = pool.checkout(key.clone());\n        let poll_once = PollOnce(&mut checkout);\n        // checkout.await should clean out the expired\n        poll_once.await;\n        assert!(pool.locked().idle.get(&key).is_none());\n    }\n\n    #[test]\n    fn test_pool_max_idle_per_host() {\n        let pool = pool_max_idle_no_timer(2);\n        let key = host_key(\"foo\");\n\n        pool.pooled(c(key.clone()), Uniq(41));\n        pool.pooled(c(key.clone()), Uniq(5));\n        pool.pooled(c(key.clone()), Uniq(99));\n\n        // pooled and dropped 3, max_idle should only allow 2\n        assert_eq!(\n            pool.locked().idle.get(&key).map(|entries| entries.len()),\n            Some(2)\n        );\n    }\n\n    #[tokio::test]\n    async fn test_pool_timer_removes_expired_realtime() {\n        test_pool_timer_removes_expired_inner().await\n    }\n\n    #[tokio::test(start_paused = true)]\n    async fn test_pool_timer_removes_expired_faketime() {\n        test_pool_timer_removes_expired_inner().await\n    }\n\n    async fn test_pool_timer_removes_expired_inner() {\n        let pool = Pool::new(\n            super::Config {\n                idle_timeout: Some(Duration::from_millis(10)),\n                max_idle_per_host: usize::MAX,\n                max_pool_size: None,\n            },\n            TokioExecutor::new(),\n            Time::Timer(Arc::new(TokioTimer::new())),\n        );\n\n        let key = host_key(\"foo\");\n\n        pool.pooled(c(key.clone()), Uniq(41));\n        pool.pooled(c(key.clone()), Uniq(5));\n        pool.pooled(c(key.clone()), Uniq(99));\n\n        assert_eq!(\n            pool.locked().idle.get(&key).map(|entries| entries.len()),\n            Some(3)\n        );\n\n        // Let the timer tick passed the expiration...\n        tokio::time::sleep(Duration::from_millis(30)).await;\n\n        // But minimum interval is higher, so nothing should have been reaped\n        assert_eq!(\n            pool.locked().idle.get(&key).map(|entries| entries.len()),\n            Some(3)\n        );\n\n        // Now wait passed the minimum interval more\n        tokio::time::sleep(Duration::from_millis(70)).await;\n\n        assert!(pool.locked().idle.get(&key).is_none());\n    }\n\n    #[tokio::test]\n    async fn test_pool_checkout_task_unparked() {\n        use futures_util::{FutureExt, future::join};\n\n        let pool = pool_no_timer();\n        let key = host_key(\"foo\");\n        let pooled = pool.pooled(c(key.clone()), Uniq(41));\n\n        let checkout = join(pool.checkout(key), async {\n            // the checkout future will park first,\n            // and then this lazy future will be polled, which will insert\n            // the pooled back into the pool\n            //\n            // this test makes sure that doing so will unpark the checkout\n            drop(pooled);\n        })\n        .map(|(entry, _)| entry);\n\n        assert_eq!(*checkout.await.unwrap(), Uniq(41));\n    }\n\n    #[tokio::test]\n    async fn test_pool_checkout_drop_cleans_up_waiters() {\n        let pool = pool_no_timer::<Uniq<i32>, KeyImpl>();\n        let key = host_key(\"foo\");\n\n        let mut checkout1 = pool.checkout(key.clone());\n        let mut checkout2 = pool.checkout(key.clone());\n\n        let poll_once1 = PollOnce(&mut checkout1);\n        let poll_once2 = PollOnce(&mut checkout2);\n\n        // first poll needed to get into Pool's parked\n        poll_once1.await;\n        assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 1);\n        poll_once2.await;\n        assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 2);\n\n        // on drop, clean up Pool\n        drop(checkout1);\n        assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 1);\n\n        drop(checkout2);\n        assert!(!pool.locked().waiters.contains_key(&key));\n    }\n\n    #[derive(Debug)]\n    struct CanClose {\n        #[allow(unused)]\n        val: i32,\n        closed: bool,\n    }\n\n    impl Poolable for CanClose {\n        fn is_open(&self) -> bool {\n            !self.closed\n        }\n\n        fn reserve(self) -> Reservation<Self> {\n            Reservation::Unique(self)\n        }\n\n        fn can_share(&self) -> bool {\n            false\n        }\n    }\n\n    #[test]\n    fn pooled_drop_if_closed_doesnt_reinsert() {\n        let pool = pool_no_timer();\n        let key = host_key(\"foo\");\n        pool.pooled(\n            c(key.clone()),\n            CanClose {\n                val: 57,\n                closed: true,\n            },\n        );\n\n        assert!(pool.locked().idle.get(&key).is_none());\n    }\n\n    #[tokio::test]\n    async fn test_pool_size_limit() {\n        let pool = Pool::new(\n            super::Config {\n                idle_timeout: Some(Duration::from_millis(100)),\n                max_idle_per_host: usize::MAX,\n                max_pool_size: Some(NonZero::new(2).expect(\"max pool size\")),\n            },\n            TokioExecutor::new(),\n            Time::Empty,\n        );\n        let key1 = host_key(\"foo\");\n        let key2 = host_key(\"bar\");\n        let key3 = host_key(\"baz\");\n\n        pool.pooled(c(key1.clone()), Uniq(41));\n        pool.pooled(c(key2.clone()), Uniq(5));\n        pool.pooled(c(key3.clone()), Uniq(99));\n\n        assert!(pool.locked().idle.get(&key1).is_none());\n        assert!(pool.locked().idle.get(&key2).is_some());\n        assert!(pool.locked().idle.get(&key3).is_some());\n    }\n}\n"
  },
  {
    "path": "src/client/layer/client.rs",
    "content": "//! Much of this codebase is adapted and refined from [hyper](https://github.com/hyperium/hyper-util),\n\nmod exec;\nmod lazy;\nmod pool;\n\nuse std::{\n    error::Error as StdError,\n    fmt,\n    future::Future,\n    num::NonZeroUsize,\n    pin::Pin,\n    sync::Arc,\n    task::{self, Poll},\n    time::Duration,\n};\n\nuse bytes::Bytes;\nuse futures_util::future::{self, BoxFuture, Either, FutureExt, TryFutureExt};\nuse http::{\n    HeaderValue, Method, Request, Response, Uri, Version,\n    header::{HOST, PROXY_AUTHORIZATION},\n    uri::{Authority, PathAndQuery, Scheme},\n};\nuse http_body::Body;\nuse pool::Ver;\nuse tokio::io::{AsyncRead, AsyncWrite};\nuse tower::{BoxError, util::Oneshot};\n#[cfg(feature = \"cookies\")]\nuse {\n    crate::cookie::{CookieStore, Cookies},\n    http::header::COOKIE,\n};\n\nuse self::{\n    exec::Exec,\n    lazy::{Started as Lazy, lazy},\n};\n#[cfg(feature = \"socks\")]\nuse crate::client::conn::socks;\nuse crate::{\n    client::{\n        conn::{\n            Connected, Connection,\n            descriptor::{ConnectionDescriptor, ConnectionId},\n            tunnel,\n        },\n        core::{\n            self,\n            body::Incoming,\n            conn,\n            dispatch::TrySendError as ConnTrySendError,\n            http1::Http1Options,\n            http2::Http2Options,\n            rt::{Executor, Time, Timer},\n        },\n        layer::config::RequestOptions,\n    },\n    config::RequestConfig,\n    error::ProxyConnect,\n};\n\ntype BoxSendFuture = Pin<Box<dyn Future<Output = ()> + Send>>;\n\n/// A HttpClient to make outgoing HTTP requests.\n///\n/// `HttpClient` is cheap to clone and cloning is the recommended way to share a `HttpClient`. The\n/// underlying connection pool will be reused.\n#[must_use]\npub(crate) struct HttpClient<C, B> {\n    config: Config,\n    connector: C,\n    exec: Exec,\n    h1_builder: conn::http1::Builder,\n    h2_builder: conn::http2::Builder<Exec>,\n    pool: pool::Pool<PoolClient<B>, ConnectionId>,\n    #[cfg(feature = \"cookies\")]\n    cookie_store: RequestConfig<Arc<dyn CookieStore>>,\n}\n\n#[derive(Clone, Copy)]\nstruct Config {\n    retry_canceled_requests: bool,\n    set_host: bool,\n    ver: Ver,\n}\n\n#[derive(Debug)]\npub struct Error {\n    kind: ErrorKind,\n    source: Option<BoxError>,\n    #[allow(unused)]\n    connect_info: Option<Connected>,\n}\n\n#[derive(Debug)]\nenum ErrorKind {\n    Canceled,\n    ChannelClosed,\n    Connect,\n    ProxyConnect,\n    UserUnsupportedRequestMethod,\n    UserUnsupportedVersion,\n    UserAbsoluteUriRequired,\n    SendRequest,\n}\n\nenum ClientConnectError {\n    Normal(Error),\n    CheckoutIsClosed(pool::Error),\n}\n\n#[allow(clippy::large_enum_variant)]\nenum TrySendError<B> {\n    Retryable {\n        error: Error,\n        req: Request<B>,\n        connection_reused: bool,\n    },\n    Nope(Error),\n}\n\nmacro_rules! e {\n    ($kind:ident) => {\n        Error {\n            kind: ErrorKind::$kind,\n            source: None,\n            connect_info: None,\n        }\n    };\n    ($kind:ident, $src:expr) => {\n        Error {\n            kind: ErrorKind::$kind,\n            source: Some($src.into()),\n            connect_info: None,\n        }\n    };\n}\n\n// ===== impl HttpClient =====\n\nimpl HttpClient<(), ()> {\n    /// Create a builder to configure a new [`HttpClient`].\n    #[inline]\n    pub fn builder<E>(executor: E) -> Builder\n    where\n        E: Executor<BoxSendFuture> + Send + Sync + Clone + 'static,\n    {\n        Builder::new(executor)\n    }\n}\n\nimpl<C, B> HttpClient<C, B>\nwhere\n    C: tower::Service<ConnectionDescriptor> + Clone + Send + Sync + 'static,\n    C::Response: AsyncRead + AsyncWrite + Connection + Unpin + Send + 'static,\n    C::Error: Into<BoxError>,\n    C::Future: Unpin + Send + 'static,\n    B: Body + Send + 'static + Unpin,\n    B::Data: Send,\n    B::Error: Into<BoxError>,\n{\n    fn request(\n        &self,\n        mut req: Request<B>,\n    ) -> BoxFuture<'static, Result<Response<Incoming>, BoxError>> {\n        let is_http_connect = req.method() == Method::CONNECT;\n        // Validate HTTP version early\n        match req.version() {\n            Version::HTTP_10 if is_http_connect => {\n                warn!(\"CONNECT is not allowed for HTTP/1.0\");\n                return Box::pin(future::err(e!(UserUnsupportedRequestMethod).into()));\n            }\n            Version::HTTP_10 | Version::HTTP_11 | Version::HTTP_2 => {}\n            // completely unsupported HTTP version (like HTTP/0.9)!\n            _unsupported => {\n                warn!(\"Request has unsupported version: {:?}\", _unsupported);\n                return Box::pin(future::err(e!(UserUnsupportedVersion).into()));\n            }\n        };\n\n        // Extract and normalize URI\n        let uri = match normalize_uri(&mut req, is_http_connect) {\n            Ok(uri) => uri,\n            Err(err) => {\n                return Box::pin(future::err(e!(UserAbsoluteUriRequired, err).into()));\n            }\n        };\n\n        let mut this = self.clone();\n\n        // Extract per-request options from the request extensions and apply them to the client.\n        let descriptor = {\n            let RequestOptions {\n                group,\n                proxy,\n                version,\n                tls_options,\n                http1_options,\n                http2_options,\n                socket_bind_options,\n            } = RequestConfig::<RequestOptions>::remove(req.extensions_mut()).unwrap_or_default();\n\n            if let Some(opts) = http1_options {\n                this.h1_builder.options(opts);\n            }\n            if let Some(opts) = http2_options {\n                this.h2_builder.options(opts);\n            }\n            ConnectionDescriptor::new(uri, group, proxy, version, tls_options, socket_bind_options)\n        };\n\n        Box::pin(this.send_request(req, descriptor).map_err(Into::into))\n    }\n\n    async fn send_request(\n        self,\n        mut req: Request<B>,\n        descriptor: ConnectionDescriptor,\n    ) -> Result<Response<Incoming>, Error> {\n        let uri = req.uri().clone();\n\n        loop {\n            req = match self.try_send_request(req, descriptor.clone()).await {\n                Ok(resp) => return Ok(resp),\n                Err(TrySendError::Nope(err)) => return Err(err),\n                Err(TrySendError::Retryable {\n                    mut req,\n                    error,\n                    connection_reused,\n                }) => {\n                    if !self.config.retry_canceled_requests || !connection_reused {\n                        // if client disabled, don't retry\n                        // a fresh connection means we definitely can't retry\n                        return Err(error);\n                    }\n\n                    trace!(\n                        \"unstarted request canceled, trying again (reason={:?})\",\n                        error\n                    );\n                    *req.uri_mut() = uri.clone();\n                    req\n                }\n            }\n        }\n    }\n\n    async fn try_send_request(\n        &self,\n        mut req: Request<B>,\n        descriptor: ConnectionDescriptor,\n    ) -> Result<Response<Incoming>, TrySendError<B>> {\n        let mut pooled = self\n            .connection_for(descriptor)\n            .await\n            // `connection_for` already retries checkout errors, so if\n            // it returns an error, there's not much else to retry\n            .map_err(TrySendError::Nope)?;\n\n        let uri = req.uri().clone();\n\n        if pooled.is_http1() {\n            if req.version() == Version::HTTP_2 {\n                warn!(\"Connection is HTTP/1, but request requires HTTP/2\");\n                return Err(TrySendError::Nope(\n                    e!(UserUnsupportedVersion).with_connect_info(pooled.conn_info.clone()),\n                ));\n            }\n\n            if self.config.set_host {\n                req.headers_mut()\n                    .entry(HOST)\n                    .or_insert_with(|| generate_host_header(&uri));\n            }\n\n            // CONNECT always sends authority-form, so check it first...\n            if req.method() == Method::CONNECT {\n                authority_form(req.uri_mut());\n            } else if pooled.conn_info.is_proxied() {\n                if let Some(auth) = pooled.conn_info.proxy_auth() {\n                    req.headers_mut()\n                        .entry(PROXY_AUTHORIZATION)\n                        .or_insert_with(|| auth.clone());\n                }\n\n                if let Some(headers) = pooled.conn_info.proxy_headers() {\n                    crate::util::replace_headers(req.headers_mut(), headers.clone());\n                }\n\n                absolute_form(req.uri_mut());\n            } else {\n                origin_form(req.uri_mut());\n            }\n        } else if req.method() == Method::CONNECT && !pooled.is_http2() {\n            authority_form(req.uri_mut());\n        }\n\n        #[cfg(feature = \"cookies\")]\n        let cookie_store = self.cookie_store.fetch(req.extensions()).cloned();\n\n        #[cfg(feature = \"cookies\")]\n        if let Some(ref cookie_store) = cookie_store {\n            let headers = req.headers_mut();\n\n            if !headers.contains_key(COOKIE) {\n                let version = if pooled.is_http2() {\n                    Version::HTTP_2\n                } else {\n                    Version::HTTP_11\n                };\n\n                match cookie_store.cookies(&uri, version) {\n                    Cookies::Compressed(value) => {\n                        headers.insert(COOKIE, value);\n                    }\n                    Cookies::Uncompressed(values) => {\n                        for value in values {\n                            headers.append(COOKIE, value);\n                        }\n                    }\n                    Cookies::Empty => (),\n                }\n            }\n        }\n\n        let mut res = match pooled.try_send_request(req).await {\n            Ok(res) => res,\n            Err(mut err) => {\n                return if let Some(req) = err.take_message() {\n                    Err(TrySendError::Retryable {\n                        connection_reused: pooled.is_reused(),\n                        error: Error::new(ErrorKind::Canceled, err.into_error())\n                            .with_connect_info(pooled.conn_info.clone()),\n                        req,\n                    })\n                } else {\n                    Err(TrySendError::Nope(\n                        Error::new(ErrorKind::SendRequest, err.into_error())\n                            .with_connect_info(pooled.conn_info.clone()),\n                    ))\n                };\n            }\n        };\n\n        #[cfg(feature = \"cookies\")]\n        if let Some(cookie_store) = cookie_store {\n            let mut cookies = res\n                .headers()\n                .get_all(http::header::SET_COOKIE)\n                .iter()\n                .peekable();\n            if cookies.peek().is_some() {\n                cookie_store.set_cookies(&mut cookies, &uri);\n            }\n        }\n\n        // If the Connector included 'extra' info, add to Response...\n        pooled.conn_info.set_extras(res.extensions_mut());\n\n        // If the Connector included connection info, add to Response...\n        res.extensions_mut().insert(pooled.conn_info.clone());\n\n        // If pooled is HTTP/2, we can toss this reference immediately.\n        //\n        // when pooled is dropped, it will try to insert back into the\n        // pool. To delay that, spawn a future that completes once the\n        // sender is ready again.\n        //\n        // This *should* only be once the related `Connection` has polled\n        // for a new request to start.\n        //\n        // It won't be ready if there is a body to stream.\n        if pooled.is_http2() || !pooled.is_pool_enabled() || pooled.is_ready() {\n            drop(pooled);\n        } else {\n            let on_idle = std::future::poll_fn(move |cx| pooled.poll_ready(cx)).map(|_| ());\n            self.exec.execute(on_idle);\n        }\n\n        Ok(res)\n    }\n\n    async fn connection_for(\n        &self,\n        descriptor: ConnectionDescriptor,\n    ) -> Result<pool::Pooled<PoolClient<B>, ConnectionId>, Error> {\n        loop {\n            match self.one_connection_for(descriptor.clone()).await {\n                Ok(pooled) => return Ok(pooled),\n                Err(ClientConnectError::Normal(err)) => return Err(err),\n                Err(ClientConnectError::CheckoutIsClosed(reason)) => {\n                    if !self.config.retry_canceled_requests {\n                        return Err(Error::new(ErrorKind::Connect, reason));\n                    }\n\n                    trace!(\n                        \"unstarted request canceled, trying again (reason={:?})\",\n                        reason,\n                    );\n                    continue;\n                }\n            };\n        }\n    }\n\n    async fn one_connection_for(\n        &self,\n        descriptor: ConnectionDescriptor,\n    ) -> Result<pool::Pooled<PoolClient<B>, ConnectionId>, ClientConnectError> {\n        // Return a single connection if pooling is not enabled\n        if !self.pool.is_enabled() {\n            return self\n                .connect_to(descriptor)\n                .await\n                .map_err(ClientConnectError::Normal);\n        }\n\n        // This actually races 2 different futures to try to get a ready\n        // connection the fastest, and to reduce connection churn.\n        //\n        // - If the pool has an idle connection waiting, that's used immediately.\n        // - Otherwise, the Connector is asked to start connecting to the destination Uri.\n        // - Meanwhile, the pool Checkout is watching to see if any other request finishes and tries\n        //   to insert an idle connection.\n        // - If a new connection is started, but the Checkout wins after (an idle connection became\n        //   available first), the started connection future is spawned into the runtime to\n        //   complete, and then be inserted into the pool as an idle connection.\n        let checkout = self.pool.checkout(descriptor.id());\n        let connect = self.connect_to(descriptor);\n        let is_ver_h2 = self.config.ver == Ver::Http2;\n\n        // The order of the `select` is depended on below...\n\n        match futures_util::future::select(checkout, connect).await {\n            // Checkout won, connect future may have been started or not.\n            //\n            // If it has, let it finish and insert back into the pool,\n            // so as to not waste the socket...\n            Either::Left((Ok(checked_out), connecting)) => {\n                // This depends on the `select` above having the correct\n                // order, such that if the checkout future were ready\n                // immediately, the connect future will never have been\n                // started.\n                //\n                // If it *wasn't* ready yet, then the connect future will\n                // have been started...\n                if connecting.started() {\n                    let bg = connecting\n                        .map_err(|_err| {\n                            trace!(\"background connect error: {}\", _err);\n                        })\n                        .map(|_pooled| {\n                            // dropping here should just place it in\n                            // the Pool for us...\n                        });\n                    // An execute error here isn't important, we're just trying\n                    // to prevent a waste of a socket...\n                    self.exec.execute(bg);\n                }\n                Ok(checked_out)\n            }\n            // Connect won, checkout can just be dropped.\n            Either::Right((Ok(connected), _checkout)) => Ok(connected),\n            // Either checkout or connect could get canceled:\n            //\n            // 1. Connect is canceled if this is HTTP/2 and there is an outstanding HTTP/2\n            //    connecting task.\n            // 2. Checkout is canceled if the pool cannot deliver an idle connection reliably.\n            //\n            // In both cases, we should just wait for the other future.\n            Either::Left((Err(err), connecting)) => {\n                if err.is_canceled() {\n                    connecting.await.map_err(ClientConnectError::Normal)\n                } else {\n                    Err(ClientConnectError::Normal(Error::new(\n                        ErrorKind::Connect,\n                        err,\n                    )))\n                }\n            }\n            Either::Right((Err(err), checkout)) => {\n                if err.is_canceled() {\n                    checkout.await.map_err(move |err| {\n                        if is_ver_h2 && err.is_canceled() {\n                            ClientConnectError::CheckoutIsClosed(err)\n                        } else {\n                            ClientConnectError::Normal(Error::new(ErrorKind::Connect, err))\n                        }\n                    })\n                } else {\n                    Err(ClientConnectError::Normal(err))\n                }\n            }\n        }\n    }\n\n    fn connect_to(\n        &self,\n        descriptor: ConnectionDescriptor,\n    ) -> impl Lazy<Output = Result<pool::Pooled<PoolClient<B>, ConnectionId>, Error>>\n    + Send\n    + Unpin\n    + 'static {\n        let executor = self.exec.clone();\n        let pool = self.pool.clone();\n\n        let h1_builder = self.h1_builder.clone();\n        let h2_builder = self.h2_builder.clone();\n        let ver = match descriptor.version() {\n            Some(Version::HTTP_2) => Ver::Http2,\n            _ => self.config.ver,\n        };\n        let is_ver_h2 = ver == Ver::Http2;\n        let connector = self.connector.clone();\n        lazy(move || {\n            // Try to take a \"connecting lock\".\n            //\n            // If the pool_key is for HTTP/2, and there is already a\n            // connection being established, then this can't take a\n            // second lock. The \"connect_to\" future is Canceled.\n            let connecting = match pool.connecting(descriptor.id(), ver) {\n                Some(lock) => lock,\n                None => {\n                    // HTTP/2 connection in progress.\n                    return Either::Right(futures_util::future::err(e!(Canceled)));\n                }\n            };\n            Either::Left(\n                Oneshot::new(connector, descriptor)\n                    .map_err(|src| Error::new(ErrorKind::Connect, src))\n                    .and_then(move |io| {\n                        let connected = io.connected();\n                        // If ALPN is h2 and we aren't http2_only already,\n                        // then we need to convert our pool checkout into\n                        // a single HTTP2 one.\n                        let connecting = if connected.is_negotiated_h2() && !is_ver_h2 {\n                            match connecting.alpn_h2(&pool) {\n                                Some(lock) => {\n                                    trace!(\"ALPN negotiated h2, updating pool\");\n                                    lock\n                                }\n                                None => {\n                                    // Another connection has already upgraded,\n                                    // the pool checkout should finish up for us.\n                                    let canceled =Error::new(ErrorKind::Canceled, \"ALPN upgraded to HTTP/2\");\n                                    return Either::Right(futures_util::future::err(canceled));\n                                }\n                            }\n                        } else {\n                            connecting\n                        };\n\n                        let is_h2 = is_ver_h2 || connected.is_negotiated_h2();\n\n                        Either::Left(Box::pin(async move {\n                            let tx = if is_h2 {\n                               {\n                                    let (mut tx, conn) =\n                                        h2_builder.handshake(io).await.map_err(Error::tx)?;\n\n                                    trace!(\n                                        \"http2 handshake complete, spawning background dispatcher task\"\n                                    );\n                                    executor.execute(\n                                        conn.map_err(|_e| debug!(\"client connection error: {}\", _e))\n                                            .map(|_| ()),\n                                    );\n\n                                    // Wait for 'conn' to ready up before we\n                                    // declare this tx as usable\n                                    tx.ready().await.map_err(Error::tx)?;\n                                    PoolTx::Http2(tx)\n                                }\n                            } else {\n                                 {\n                                    // Perform the HTTP/1.1 handshake on the provided I/O stream. More actions\n                                    // Uses the h1_builder to establish a connection, returning a sender (tx) for requests\n                                    // and a connection task (conn) that manages the connection lifecycle.\n                                    let (mut tx, conn) =\n                                        h1_builder.handshake(io).await.map_err(Error::tx)?;\n\n                                    // Log that the HTTP/1.1 handshake has completed successfully.\n                                    // This indicates the connection is established and ready for request processing.\n                                    trace!(\n                                        \"http1 handshake complete, spawning background dispatcher task\"\n                                    );\n\n                                    // Create a oneshot channel to communicate errors from the connection task.\n                                    // err_tx sends errors from the connection task, and err_rx receives them\n                                    // to correlate connection failures with request readiness errors.\n                                    let (err_tx, err_rx) = tokio::sync::oneshot::channel();\n                                    // Spawn the connection task in the background using the executor.\n                                    // The task manages the HTTP/1.1 connection, including upgrades (e.g., WebSocket).\n                                    // Errors are sent via err_tx to ensure they can be checked if the sender (tx) fails.\n                                    executor.execute(\n                                        conn.with_upgrades()\n                                                .map_err(|e| {\n                                                // Log the connection error at debug level for diagnostic purposes.\n                                                debug!(\"client connection error: {:?}\", e);\n                                                // Log that the error is being sent to the error channel.\n                                                trace!(\"sending connection error to error channel\");\n                                                // Send the error via the oneshot channel, ignoring send failures\n                                                // (e.g., if the receiver is dropped, which is handled later).\n                                                let _ = err_tx.send(e);\n                                            })\n                                            .map(|_| ()),\n                                    );\n\n                                    // Log that the client is waiting for the connection to be ready.\n                                    // Readiness indicates the sender (tx) can accept a request without blocking. More actions\n                                    trace!(\"waiting for connection to be ready\");\n\n                                    // Check if the sender is ready to accept a request.\n                                    // This ensures the connection is fully established before proceeding.\n                                    // Wait for 'conn' to ready up before we\n                                    // declare this tx as usable\n                                    match tx.ready().await {\n                                        // If ready, the connection is usable for sending requests.\n                                        Ok(_) => {\n                                            // Log that the connection is ready for use.\n                                            trace!(\"connection is ready\");\n                                            // Drop the error receiver, as it’s no longer needed since the sender is ready.\n                                            // This prevents waiting for errors that won’t occur in a successful case.\n                                            drop(err_rx);\n                                            // Wrap the sender in PoolTx::Http1 for use in the connection pool.\n                                            PoolTx::Http1(tx)\n                                        }\n                                        // If the sender fails with a closed channel error, check for a specific connection error.\n                                        // This distinguishes between a vague ChannelClosed error and an actual connection failure.\n                                        Err(e) if e.is_closed() => {\n                                            // Log that the channel is closed, indicating a potential connection issue.\n                                            trace!(\"connection channel closed, checking for connection error\");\n                                            // Check the oneshot channel for a specific error from the connection task.\n                                            match err_rx.await {\n                                                // If an error was received, it’s a specific connection failure.\n                                                Ok(err) => {\n                                                     // Log the specific connection error for diagnostics.\n                                                    trace!(\"received connection error: {:?}\", err);\n                                                    // Return the error wrapped in Error::tx to propagate it.\n                                                    return Err(Error::tx(err));\n                                                }\n                                                // If the error channel is closed, no specific error was sent.\n                                                // Fall back to the vague ChannelClosed error.\n                                                Err(_) => {\n                                                    // Log that the error channel is closed, indicating no specific error.\n                                                    trace!(\"error channel closed, returning the vague ChannelClosed error\");\n                                                    // Return the original error wrapped in Error::tx.\n                                                    return Err(Error::tx(e));\n                                                }\n                                            }\n                                        }\n                                        // For other errors (e.g., timeout, I/O issues), propagate them directly.\n                                        // These are not ChannelClosed errors and don’t require error channel checks.\n                                        Err(e) => {\n                                            // Log the specific readiness failure for diagnostics.\n                                            trace!(\"connection readiness failed: {:?}\", e);\n                                            // Return the error wrapped in Error::tx to propagate it.\n                                            return Err(Error::tx(e));\n                                        }\n                                    }\n                                }\n                            };\n\n                            Ok(pool.pooled(\n                                connecting,\n                                PoolClient {\n                                    conn_info: connected,\n                                    tx,\n                                },\n                            ))\n                        }))\n                    }),\n            )\n        })\n    }\n}\n\nimpl<C, B> tower::Service<Request<B>> for HttpClient<C, B>\nwhere\n    C: tower::Service<ConnectionDescriptor> + Clone + Send + Sync + 'static,\n    C::Response: AsyncRead + AsyncWrite + Connection + Unpin + Send + 'static,\n    C::Error: Into<BoxError>,\n    C::Future: Unpin + Send + 'static,\n    B: Body + Send + 'static + Unpin,\n    B::Data: Send,\n    B::Error: Into<BoxError>,\n{\n    type Response = Response<Incoming>;\n    type Error = BoxError;\n    type Future = BoxFuture<'static, Result<Response<Incoming>, Self::Error>>;\n\n    fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {\n        Poll::Ready(Ok(()))\n    }\n\n    fn call(&mut self, req: Request<B>) -> Self::Future {\n        self.request(req)\n    }\n}\n\nimpl<C: Clone, B> Clone for HttpClient<C, B> {\n    fn clone(&self) -> HttpClient<C, B> {\n        HttpClient {\n            config: self.config,\n            exec: self.exec.clone(),\n            h1_builder: self.h1_builder.clone(),\n            h2_builder: self.h2_builder.clone(),\n            connector: self.connector.clone(),\n            pool: self.pool.clone(),\n            #[cfg(feature = \"cookies\")]\n            cookie_store: self.cookie_store.clone(),\n        }\n    }\n}\n\n/// A pooled HTTP connection that can send requests\nstruct PoolClient<B> {\n    conn_info: Connected,\n    tx: PoolTx<B>,\n}\n\nenum PoolTx<B> {\n    Http1(conn::http1::SendRequest<B>),\n    Http2(conn::http2::SendRequest<B>),\n}\n\n// ===== impl PoolClient =====\n\nimpl<B> PoolClient<B> {\n    #[inline]\n    fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Error>> {\n        match self.tx {\n            PoolTx::Http1(ref mut tx) => tx.poll_ready(cx).map_err(Error::closed),\n            PoolTx::Http2(_) => Poll::Ready(Ok(())),\n        }\n    }\n\n    #[inline]\n    fn is_http1(&self) -> bool {\n        !self.is_http2()\n    }\n\n    #[inline]\n    fn is_http2(&self) -> bool {\n        match self.tx {\n            PoolTx::Http1(_) => false,\n            PoolTx::Http2(_) => true,\n        }\n    }\n\n    #[inline]\n    fn is_poisoned(&self) -> bool {\n        self.conn_info.poisoned()\n    }\n\n    #[inline]\n    fn is_ready(&self) -> bool {\n        match self.tx {\n            PoolTx::Http1(ref tx) => tx.is_ready(),\n            PoolTx::Http2(ref tx) => tx.is_ready(),\n        }\n    }\n}\n\nimpl<B: Body + 'static> PoolClient<B> {\n    #[inline]\n    fn try_send_request(\n        &mut self,\n        req: Request<B>,\n    ) -> impl Future<Output = Result<Response<Incoming>, ConnTrySendError<Request<B>>>>\n    where\n        B: Send,\n    {\n        match self.tx {\n            PoolTx::Http1(ref mut tx) => Either::Left(tx.try_send_request(req)),\n            PoolTx::Http2(ref mut tx) => Either::Right(tx.try_send_request(req)),\n        }\n    }\n}\n\nimpl<B> pool::Poolable for PoolClient<B>\nwhere\n    B: Send + 'static,\n{\n    #[inline]\n    fn is_open(&self) -> bool {\n        !self.is_poisoned() && self.is_ready()\n    }\n\n    fn reserve(self) -> pool::Reservation<Self> {\n        match self.tx {\n            PoolTx::Http1(tx) => pool::Reservation::Unique(PoolClient {\n                conn_info: self.conn_info,\n                tx: PoolTx::Http1(tx),\n            }),\n\n            PoolTx::Http2(tx) => {\n                let b = PoolClient {\n                    conn_info: self.conn_info.clone(),\n                    tx: PoolTx::Http2(tx.clone()),\n                };\n                let a = PoolClient {\n                    conn_info: self.conn_info,\n                    tx: PoolTx::Http2(tx),\n                };\n                pool::Reservation::Shared(a, b)\n            }\n        }\n    }\n\n    #[inline]\n    fn can_share(&self) -> bool {\n        self.is_http2()\n    }\n}\n\n/// A builder to configure a new [`HttpClient`].\n#[derive(Clone)]\npub struct Builder {\n    config: Config,\n    exec: Exec,\n    h1_builder: conn::http1::Builder,\n    h2_builder: conn::http2::Builder<Exec>,\n    pool_config: pool::Config,\n    pool_timer: Time,\n    #[cfg(feature = \"cookies\")]\n    cookie_store: Option<Arc<dyn CookieStore>>,\n}\n\n// ===== impl Builder =====\n\nimpl Builder {\n    /// Construct a new Builder.\n    pub fn new<E>(executor: E) -> Self\n    where\n        E: Executor<BoxSendFuture> + Send + Sync + Clone + 'static,\n    {\n        let exec = Exec::new(executor);\n        Self {\n            config: Config {\n                retry_canceled_requests: true,\n                set_host: true,\n                ver: Ver::Auto,\n            },\n            exec: exec.clone(),\n            h1_builder: conn::http1::Builder::new(),\n            h2_builder: conn::http2::Builder::new(exec),\n            pool_config: pool::Config {\n                idle_timeout: Some(Duration::from_secs(90)),\n                max_idle_per_host: usize::MAX,\n                max_pool_size: None,\n            },\n            pool_timer: Time::Empty,\n            #[cfg(feature = \"cookies\")]\n            cookie_store: None,\n        }\n    }\n    /// Set an optional timeout for idle sockets being kept-alive.\n    /// A `Timer` is required for this to take effect. See `Builder::pool_timer`\n    ///\n    /// Pass `None` to disable timeout.\n    ///\n    /// Default is 90 seconds.\n    #[inline]\n    pub fn pool_idle_timeout<D>(mut self, val: D) -> Self\n    where\n        D: Into<Option<Duration>>,\n    {\n        self.pool_config.idle_timeout = val.into();\n        self\n    }\n\n    /// Sets the maximum idle connection per host allowed in the pool.\n    ///\n    /// Default is `usize::MAX` (no limit).\n    #[inline]\n    pub fn pool_max_idle_per_host(mut self, max_idle: usize) -> Self {\n        self.pool_config.max_idle_per_host = max_idle;\n        self\n    }\n\n    /// Sets the maximum number of connections in the pool.\n    ///\n    /// Default is `None` (no limit).\n    #[inline]\n    pub fn pool_max_size(mut self, max_size: impl Into<Option<NonZeroUsize>>) -> Self {\n        self.pool_config.max_pool_size = max_size.into();\n        self\n    }\n\n    /// Set whether the connection **must** use HTTP/2.\n    ///\n    /// The destination must either allow HTTP2 Prior Knowledge, or the\n    /// `Connect` should be configured to do use ALPN to upgrade to `h2`\n    /// as part of the connection process. This will not make the `HttpClient`\n    /// utilize ALPN by itself.\n    ///\n    /// Note that setting this to true prevents HTTP/1 from being allowed.\n    ///\n    /// Default is false.\n    #[inline]\n    pub fn http2_only(mut self, val: bool) -> Self {\n        self.config.ver = if val { Ver::Http2 } else { Ver::Auto };\n        self\n    }\n\n    /// Provide a timer to be used for http2\n    ///\n    /// See the documentation of [`http2::client::Builder::timer`] for more\n    /// details.\n    ///\n    /// [`http2::client::Builder::timer`]: https://docs.rs/http2/latest/http2/client/struct.Builder.html#method.timer\n    #[inline]\n    pub fn http2_timer<M>(mut self, timer: M) -> Self\n    where\n        M: Timer + Send + Sync + 'static,\n    {\n        self.h2_builder.timer(timer);\n        self\n    }\n\n    /// Provide a configuration for HTTP/1.\n    #[inline]\n    pub fn http1_options<O>(mut self, opts: O) -> Self\n    where\n        O: Into<Option<Http1Options>>,\n    {\n        if let Some(opts) = opts.into() {\n            self.h1_builder.options(opts);\n        }\n\n        self\n    }\n\n    /// Provide a configuration for HTTP/2.\n    #[inline]\n    pub fn http2_options<O>(mut self, opts: O) -> Self\n    where\n        O: Into<Option<Http2Options>>,\n    {\n        if let Some(opts) = opts.into() {\n            self.h2_builder.options(opts);\n        }\n        self\n    }\n\n    /// Provide a timer to be used for timeouts and intervals in connection pools.\n    #[inline]\n    pub fn pool_timer<M>(mut self, timer: M) -> Self\n    where\n        M: Timer + Clone + Send + Sync + 'static,\n    {\n        self.pool_timer = Time::Timer(Arc::new(timer));\n        self\n    }\n\n    /// Provide a cookie store for automatic cookie management.\n    #[inline]\n    #[cfg(feature = \"cookies\")]\n    pub fn cookie_store(mut self, cookie_store: Option<Arc<dyn CookieStore>>) -> Self {\n        self.cookie_store = cookie_store;\n        self\n    }\n\n    /// Combine the configuration of this builder with a connector to create a `HttpClient`.\n    pub fn build<C, B>(self, connector: C) -> HttpClient<C, B>\n    where\n        C: tower::Service<ConnectionDescriptor> + Clone + Send + Sync + 'static,\n        C::Response: AsyncRead + AsyncWrite + Connection + Unpin + Send + 'static,\n        C::Error: Into<BoxError>,\n        C::Future: Unpin + Send + 'static,\n        B: Body + Send,\n        B::Data: Send,\n    {\n        let exec = self.exec.clone();\n        let timer = self.pool_timer.clone();\n        HttpClient {\n            config: self.config,\n            exec: exec.clone(),\n            connector,\n            h1_builder: self.h1_builder,\n            h2_builder: self.h2_builder,\n            pool: pool::Pool::new(self.pool_config, exec, timer),\n            #[cfg(feature = \"cookies\")]\n            cookie_store: RequestConfig::new(self.cookie_store),\n        }\n    }\n}\n\n// ==== impl Error ====\n\nimpl fmt::Display for Error {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        write!(f, \"client error ({:?})\", self.kind)\n    }\n}\n\nimpl StdError for Error {\n    fn source(&self) -> Option<&(dyn StdError + 'static)> {\n        self.source.as_ref().map(|e| &**e as _)\n    }\n}\n\nimpl Error {\n    fn new<E>(kind: ErrorKind, error: E) -> Self\n    where\n        E: Into<BoxError>,\n    {\n        let error = error.into();\n        let kind = if error.is::<tunnel::TunnelError>() || error.is::<ProxyConnect>() || {\n            #[cfg(feature = \"socks\")]\n            {\n                error.is::<socks::SocksError>()\n            }\n            #[cfg(not(feature = \"socks\"))]\n            {\n                false\n            }\n        } {\n            ErrorKind::ProxyConnect\n        } else {\n            kind\n        };\n\n        Self {\n            kind,\n            source: Some(error),\n            connect_info: None,\n        }\n    }\n\n    /// Returns true if this was an error from [`ErrorKind::Connect`].\n    #[inline]\n    pub fn is_connect(&self) -> bool {\n        matches!(self.kind, ErrorKind::Connect)\n    }\n\n    /// Returns true if this was an error from [`ErrorKind::ProxyConnect`].\n    #[inline]\n    pub fn is_proxy_connect(&self) -> bool {\n        matches!(self.kind, ErrorKind::ProxyConnect)\n    }\n\n    #[inline]\n    fn with_connect_info(self, connect_info: Connected) -> Self {\n        Self {\n            connect_info: Some(connect_info),\n            ..self\n        }\n    }\n\n    #[inline]\n    fn is_canceled(&self) -> bool {\n        matches!(self.kind, ErrorKind::Canceled)\n    }\n\n    #[inline]\n    fn tx(src: core::Error) -> Self {\n        Self::new(ErrorKind::SendRequest, src)\n    }\n\n    #[inline]\n    fn closed(src: core::Error) -> Self {\n        Self::new(ErrorKind::ChannelClosed, src)\n    }\n}\n\nfn origin_form(uri: &mut Uri) {\n    let path = match uri.path_and_query() {\n        Some(path) if path.as_str() != \"/\" => {\n            let mut parts = ::http::uri::Parts::default();\n            parts.path_and_query.replace(path.clone());\n            Uri::from_parts(parts).expect(\"path is valid uri\")\n        }\n        _none_or_just_slash => {\n            debug_assert!(Uri::default() == \"/\");\n            Uri::default()\n        }\n    };\n    *uri = path\n}\n\nfn absolute_form(uri: &mut Uri) {\n    debug_assert!(uri.scheme().is_some(), \"absolute_form needs a scheme\");\n    debug_assert!(\n        uri.authority().is_some(),\n        \"absolute_form needs an authority\"\n    );\n}\n\nfn authority_form(uri: &mut Uri) {\n    if let Some(path) = uri.path_and_query() {\n        // `https://hyper.rs` would parse with `/` path, don't\n        // annoy people about that...\n        if path != \"/\" {\n            warn!(\"HTTP/1.1 CONNECT request stripping path: {:?}\", path);\n        }\n    }\n    *uri = match uri.authority() {\n        Some(auth) => {\n            let mut parts = ::http::uri::Parts::default();\n            parts.authority = Some(auth.clone());\n            Uri::from_parts(parts).expect(\"authority is valid\")\n        }\n        None => {\n            unreachable!(\"authority_form with relative uri\");\n        }\n    };\n}\n\nfn normalize_uri<B>(req: &mut Request<B>, is_http_connect: bool) -> Result<Uri, Error> {\n    let uri = req.uri().clone();\n\n    let build_base_uri = |scheme: Scheme, authority: Authority| {\n        Uri::builder()\n            .scheme(scheme)\n            .authority(authority)\n            .path_and_query(PathAndQuery::from_static(\"/\"))\n            .build()\n            .expect(\"valid base URI\")\n    };\n\n    match (uri.scheme(), uri.authority()) {\n        (Some(scheme), Some(auth)) => Ok(build_base_uri(scheme.clone(), auth.clone())),\n        (None, Some(auth)) if is_http_connect => {\n            let scheme = match auth.port_u16() {\n                Some(443) => Scheme::HTTPS,\n                _ => Scheme::HTTP,\n            };\n            set_scheme(req.uri_mut(), scheme.clone());\n            Ok(build_base_uri(scheme, auth.clone()))\n        }\n        _ => {\n            debug!(\"Client requires absolute-form URIs, received: {:?}\", uri);\n            Err(e!(UserAbsoluteUriRequired))\n        }\n    }\n}\n\nfn generate_host_header(uri: &Uri) -> HeaderValue {\n    let hostname = uri.host().expect(\"authority implies host\");\n    let port = match (uri.port().map(|p| p.as_u16()), is_schema_secure(uri)) {\n        (Some(443), true) | (Some(80), false) => None,\n        _ => uri.port(),\n    };\n    if let Some(port) = port {\n        let host = format!(\"{hostname}:{port}\");\n        HeaderValue::from_maybe_shared(Bytes::from(host))\n    } else {\n        HeaderValue::from_str(hostname)\n    }\n    .expect(\"uri host is valid header value\")\n}\n\nfn set_scheme(uri: &mut Uri, scheme: Scheme) {\n    debug_assert!(\n        uri.scheme().is_none(),\n        \"set_scheme expects no existing scheme\"\n    );\n    let old = std::mem::take(uri);\n    let mut parts: ::http::uri::Parts = old.into();\n    parts.scheme = Some(scheme);\n    parts.path_and_query = Some(PathAndQuery::from_static(\"/\"));\n    *uri = Uri::from_parts(parts).expect(\"scheme is valid\");\n}\n\nfn is_schema_secure(uri: &Uri) -> bool {\n    uri.scheme_str()\n        .map(|scheme_str| matches!(scheme_str, \"wss\" | \"https\"))\n        .unwrap_or_default()\n}\n"
  },
  {
    "path": "src/client/layer/config.rs",
    "content": "use std::{\n    sync::Arc,\n    task::{Context, Poll},\n};\n\nuse futures_util::future::{self, Either, Ready};\nuse http::{HeaderMap, Request, Response, Version};\nuse tower::{Layer, Service};\n\nuse crate::{\n    Error,\n    client::{\n        conn::SocketBindOptions,\n        core::{http1::Http1Options, http2::Http2Options},\n        group::Group,\n    },\n    config::RequestConfig,\n    ext::UriExt,\n    header::OrigHeaderMap,\n    proxy::Matcher,\n    tls::TlsOptions,\n};\n\n/// A marker type for the default headers configuration value.\n#[derive(Clone, Copy)]\npub(crate) struct DefaultHeaders;\n\n/// Per-request configuration for proxy, protocol, and transport options.\n/// Overrides client defaults for a single request.\n#[derive(Debug, Default, Clone)]\n#[non_exhaustive]\npub(crate) struct RequestOptions {\n    pub group: Group,\n    pub proxy: Option<Matcher>,\n    pub version: Option<Version>,\n    pub tls_options: Option<TlsOptions>,\n    pub http1_options: Option<Http1Options>,\n    pub http2_options: Option<Http2Options>,\n    pub socket_bind_options: Option<SocketBindOptions>,\n}\n\n/// Configuration for the [`ConfigService`].\nstruct Config {\n    https_only: bool,\n    headers: HeaderMap,\n    orig_headers: RequestConfig<OrigHeaderMap>,\n    default_headers: RequestConfig<DefaultHeaders>,\n}\n\n/// Middleware layer to use [`ConfigService`].\npub struct ConfigServiceLayer {\n    config: Arc<Config>,\n}\n\n/// Middleware service to use [`Config`].\n#[derive(Clone)]\npub struct ConfigService<S> {\n    inner: S,\n    config: Arc<Config>,\n}\n\n// ===== impl DefaultHeaders =====\n\nimpl_request_config_value!(DefaultHeaders, bool);\n\n// ===== impl RequestOptions =====\n\nimpl_request_config_value!(RequestOptions);\n\n// ===== impl ConfigServiceLayer =====\n\nimpl ConfigServiceLayer {\n    /// Creates a new [`ConfigServiceLayer`].\n    pub fn new(https_only: bool, headers: HeaderMap, orig_headers: OrigHeaderMap) -> Self {\n        let org_headers = (!orig_headers.is_empty()).then_some(orig_headers);\n        ConfigServiceLayer {\n            config: Arc::new(Config {\n                https_only,\n                headers,\n                orig_headers: RequestConfig::new(org_headers),\n                default_headers: RequestConfig::new(Some(true)),\n            }),\n        }\n    }\n}\n\nimpl<S> Layer<S> for ConfigServiceLayer {\n    type Service = ConfigService<S>;\n\n    #[inline(always)]\n    fn layer(&self, inner: S) -> Self::Service {\n        ConfigService {\n            inner,\n            config: self.config.clone(),\n        }\n    }\n}\n\n// ===== impl ConfigService =====\n\nimpl<ReqBody, ResBody, S> Service<Request<ReqBody>> for ConfigService<S>\nwhere\n    S: Service<Request<ReqBody>, Response = Response<ResBody>>,\n    S::Error: From<Error>,\n{\n    type Response = S::Response;\n    type Error = S::Error;\n    type Future = Either<S::Future, Ready<Result<Self::Response, Self::Error>>>;\n\n    #[inline(always)]\n    fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {\n        self.inner.poll_ready(cx)\n    }\n\n    fn call(&mut self, mut req: Request<ReqBody>) -> Self::Future {\n        let uri = req.uri().clone();\n\n        // check if the request URI scheme is valid.\n        if !(uri.is_http() || uri.is_https()) || (self.config.https_only && !uri.is_https()) {\n            return Either::Right(future::err(Error::uri_bad_scheme(uri.clone()).into()));\n        }\n\n        // check if the request ignores the default headers.\n        if self\n            .config\n            .default_headers\n            .fetch(req.extensions())\n            .copied()\n            .unwrap_or_default()\n        {\n            // insert default headers in the request headers\n            // without overwriting already appended headers.\n            let mut dest = self.config.headers.clone();\n            crate::util::replace_headers(&mut dest, std::mem::take(req.headers_mut()));\n            std::mem::swap(req.headers_mut(), &mut dest);\n        }\n\n        // store the original headers in request extensions\n        self.config.orig_headers.store(req.extensions_mut());\n\n        Either::Left(self.inner.call(req))\n    }\n}\n"
  },
  {
    "path": "src/client/layer/decoder.rs",
    "content": "//! Middleware for decoding\n\nuse std::task::{Context, Poll};\n\nuse http::{Request, Response};\nuse http_body::Body;\nuse tower::{Layer, Service};\nuse tower_http::decompression::{self, DecompressionBody, ResponseFuture};\n\nuse crate::config::RequestConfig;\n\n/// Configuration for supported content-encoding algorithms.\n///\n/// `AcceptEncoding` controls which compression formats are enabled for decoding\n/// response bodies. Each field corresponds to a specific algorithm and is only\n/// available if the corresponding feature is enabled.\n#[derive(Clone)]\npub(crate) struct AcceptEncoding {\n    #[cfg(feature = \"gzip\")]\n    pub(crate) gzip: bool,\n    #[cfg(feature = \"brotli\")]\n    pub(crate) brotli: bool,\n    #[cfg(feature = \"zstd\")]\n    pub(crate) zstd: bool,\n    #[cfg(feature = \"deflate\")]\n    pub(crate) deflate: bool,\n}\n\n/// Layer that adds response body decompression to a service.\n#[derive(Clone)]\npub struct DecompressionLayer {\n    accept: AcceptEncoding,\n}\n\n/// Service that decompresses response bodies based on the [`AcceptEncoding`] configuration.\n#[derive(Clone)]\npub struct Decompression<S>(Option<decompression::Decompression<S>>);\n\n// ===== AcceptEncoding =====\n\nimpl Default for AcceptEncoding {\n    fn default() -> AcceptEncoding {\n        AcceptEncoding {\n            #[cfg(feature = \"gzip\")]\n            gzip: true,\n            #[cfg(feature = \"brotli\")]\n            brotli: true,\n            #[cfg(feature = \"zstd\")]\n            zstd: true,\n            #[cfg(feature = \"deflate\")]\n            deflate: true,\n        }\n    }\n}\n\nimpl_request_config_value!(AcceptEncoding);\n\n// ===== impl DecompressionLayer =====\n\nimpl DecompressionLayer {\n    /// Creates a new [`DecompressionLayer`] with the specified [`AcceptEncoding`].\n    #[inline(always)]\n    pub fn new(accept: AcceptEncoding) -> Self {\n        Self { accept }\n    }\n}\n\nimpl<S> Layer<S> for DecompressionLayer {\n    type Service = Decompression<S>;\n\n    #[inline(always)]\n    fn layer(&self, service: S) -> Self::Service {\n        Decompression(Some(Decompression::<S>::accept_in_place(\n            decompression::Decompression::new(service),\n            &self.accept,\n        )))\n    }\n}\n\n// ===== impl Decompression =====\n\nimpl<S> Decompression<S> {\n    const BUG_MSG: &str = \"[BUG] Decompression service not initialized; bug in setup\";\n\n    fn accept_in_place(\n        mut decoder: decompression::Decompression<S>,\n        accept: &AcceptEncoding,\n    ) -> decompression::Decompression<S> {\n        #[cfg(feature = \"gzip\")]\n        {\n            decoder = decoder.gzip(accept.gzip);\n        }\n\n        #[cfg(feature = \"deflate\")]\n        {\n            decoder = decoder.deflate(accept.deflate);\n        }\n\n        #[cfg(feature = \"brotli\")]\n        {\n            decoder = decoder.br(accept.brotli);\n        }\n\n        #[cfg(feature = \"zstd\")]\n        {\n            decoder = decoder.zstd(accept.zstd);\n        }\n\n        decoder\n    }\n}\n\nimpl<S, ReqBody, ResBody> Service<Request<ReqBody>> for Decompression<S>\nwhere\n    S: Service<Request<ReqBody>, Response = Response<ResBody>>,\n    ReqBody: Body,\n    ResBody: Body,\n{\n    type Response = Response<DecompressionBody<ResBody>>;\n    type Error = S::Error;\n    type Future = ResponseFuture<S::Future>;\n\n    #[inline(always)]\n    fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {\n        self.0.as_mut().expect(Self::BUG_MSG).poll_ready(cx)\n    }\n\n    fn call(&mut self, req: Request<ReqBody>) -> Self::Future {\n        if let Some(accept) = RequestConfig::<AcceptEncoding>::get(req.extensions()) {\n            if let Some(decoder) = self.0.take() {\n                self.0\n                    .replace(Decompression::accept_in_place(decoder, accept));\n            }\n            debug_assert!(self.0.is_some());\n        }\n\n        self.0.as_mut().expect(Self::BUG_MSG).call(req)\n    }\n}\n"
  },
  {
    "path": "src/client/layer/redirect/future.rs",
    "content": "use std::{\n    future::Future,\n    pin::Pin,\n    str,\n    task::{Context, Poll, ready},\n};\n\nuse futures_util::future::Either;\nuse http::{\n    HeaderMap, Method, Request, Response, StatusCode, Uri,\n    header::{CONTENT_ENCODING, CONTENT_LENGTH, CONTENT_TYPE, LOCATION, TRANSFER_ENCODING},\n    request::Parts,\n};\nuse http_body::Body;\nuse pin_project_lite::pin_project;\nuse tower::{BoxError, Service, util::Oneshot};\nuse url::Url;\n\nuse super::{\n    BodyRepr,\n    policy::{Action, Attempt, Policy},\n};\nuse crate::{Error, ext::RequestUri, into_uri::IntoUriSealed};\n\n/// Pending future state for handling redirects.\npub struct Pending<ReqBody, Response> {\n    future: Pin<Box<dyn Future<Output = Action> + Send>>,\n    location: Uri,\n    body: ReqBody,\n    res: Response,\n}\n\npin_project! {\n    /// Response future for [`FollowRedirect`].\n    #[project = ResponseFutureProj]\n    pub enum ResponseFuture<S, B, P>\n    where\n        S: Service<Request<B>>,\n    {\n        Redirect {\n            #[pin]\n            future: Either<S::Future, Oneshot<S, Request<B>>>,\n            pending_future: Option<Pending<B, S::Response>>,\n            service: S,\n            policy: P,\n            parts: Parts,\n            body_repr: BodyRepr<B>,\n        },\n\n        Direct {\n            #[pin]\n            future: S::Future,\n        },\n    }\n}\n\nimpl<S, ReqBody, ResBody, P> Future for ResponseFuture<S, ReqBody, P>\nwhere\n    S: Service<Request<ReqBody>, Response = Response<ResBody>> + Clone,\n    S::Error: From<BoxError>,\n    P: Policy<ReqBody, S::Error>,\n    ReqBody: Body + Default,\n{\n    type Output = Result<Response<ResBody>, S::Error>;\n\n    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {\n        match self.project() {\n            ResponseFutureProj::Direct { mut future } => future.as_mut().poll(cx),\n            ResponseFutureProj::Redirect {\n                mut future,\n                pending_future,\n                service,\n                policy,\n                parts,\n                body_repr,\n            } => {\n                // Check if we have a pending action to resolve\n                if let Some(mut state) = pending_future.take() {\n                    let action = match state.future.as_mut().poll(cx) {\n                        Poll::Ready(action) => action,\n                        Poll::Pending => {\n                            *pending_future = Some(state);\n                            return Poll::Pending;\n                        }\n                    };\n\n                    return handle_action(\n                        cx,\n                        RedirectAction {\n                            action,\n                            future: &mut future,\n                            service,\n                            policy,\n                            parts,\n                            body: state.body,\n                            body_repr,\n                            res: state.res,\n                            location: state.location,\n                        },\n                    );\n                }\n\n                // Poll the current future to get the response\n                let mut res = {\n                    let mut res = ready!(future.as_mut().poll(cx)?);\n                    res.extensions_mut().insert(RequestUri(parts.uri.clone()));\n                    res\n                };\n\n                // Determine if the response is a redirect\n                match res.status() {\n                    StatusCode::MOVED_PERMANENTLY | StatusCode::FOUND => {\n                        // User agents MAY change the request method from POST to GET\n                        // (RFC 7231 section 6.4.2. and 6.4.3.).\n                        if parts.method == Method::POST {\n                            parts.method = Method::GET;\n                            *body_repr = BodyRepr::Empty;\n                            drop_payload_headers(&mut parts.headers);\n                        }\n                    }\n                    StatusCode::SEE_OTHER => {\n                        // A user agent can perform a GET or HEAD request (RFC 7231 section 6.4.4.).\n                        if parts.method != Method::HEAD {\n                            parts.method = Method::GET;\n                        }\n                        *body_repr = BodyRepr::Empty;\n                        drop_payload_headers(&mut parts.headers);\n                    }\n                    StatusCode::TEMPORARY_REDIRECT | StatusCode::PERMANENT_REDIRECT => {}\n                    _ => {\n                        // Not a redirect status code, return the response as is.\n                        policy.on_response(&mut res);\n                        return Poll::Ready(Ok(res));\n                    }\n                };\n\n                // Extract the request body for potential reuse\n                let Some(body) = body_repr.take() else {\n                    return Poll::Ready(Ok(res));\n                };\n\n                // Get and resolve the Location header\n                let Some(location) = res\n                    .headers()\n                    .get(LOCATION)\n                    .and_then(|loc| loc.to_str().ok())\n                    .and_then(|loc| resolve_uri(loc, &parts.uri))\n                else {\n                    return Poll::Ready(Ok(res));\n                };\n\n                // Prepare the attempt for the policy decision\n                let attempt = Attempt {\n                    status: res.status(),\n                    headers: res.headers(),\n                    location: &location,\n                    previous: &parts.uri,\n                };\n\n                // Resolve the action, awaiting if it's pending\n                let action = match policy.redirect(attempt)? {\n                    Action::Pending(future) => {\n                        // Save the task and necessary state for next poll\n                        *pending_future = Some(Pending {\n                            future,\n                            location,\n                            body,\n                            res,\n                        });\n                        cx.waker().wake_by_ref();\n                        return Poll::Pending;\n                    }\n                    action => action,\n                };\n\n                handle_action(\n                    cx,\n                    RedirectAction {\n                        action,\n                        future: &mut future,\n                        service,\n                        policy,\n                        parts,\n                        body,\n                        body_repr,\n                        res,\n                        location,\n                    },\n                )\n            }\n        }\n    }\n}\n\n/// Try to resolve a URI reference `relative` against a base URI `base`.\nfn resolve_uri(relative: &str, base: &Uri) -> Option<Uri> {\n    Url::parse(&base.to_string())\n        .ok()?\n        .join(relative)\n        .map(String::from)\n        .ok()?\n        .into_uri()\n        .ok()\n}\n\n/// Handle the response based on its status code\nfn drop_payload_headers(headers: &mut HeaderMap) {\n    for header in &[\n        CONTENT_TYPE,\n        CONTENT_LENGTH,\n        CONTENT_ENCODING,\n        TRANSFER_ENCODING,\n    ] {\n        headers.remove(header);\n    }\n}\n\ntype RedirectFuturePin<'a, S, ReqBody> =\n    Pin<&'a mut Either<<S as Service<Request<ReqBody>>>::Future, Oneshot<S, Request<ReqBody>>>>;\n\nstruct RedirectAction<'a, S, ReqBody, ResBody, P>\nwhere\n    S: Service<Request<ReqBody>, Response = Response<ResBody>> + Clone,\n    P: Policy<ReqBody, S::Error>,\n{\n    action: Action,\n    future: &'a mut RedirectFuturePin<'a, S, ReqBody>,\n    service: &'a S,\n    policy: &'a mut P,\n    parts: &'a mut Parts,\n    body: ReqBody,\n    body_repr: &'a mut BodyRepr<ReqBody>,\n    res: Response<ResBody>,\n    location: Uri,\n}\n\nfn handle_action<S, ReqBody, ResBody, P>(\n    cx: &mut Context<'_>,\n    redirect: RedirectAction<'_, S, ReqBody, ResBody, P>,\n) -> Poll<Result<Response<ResBody>, S::Error>>\nwhere\n    S: Service<Request<ReqBody>, Response = Response<ResBody>> + Clone,\n    S::Error: From<BoxError>,\n    P: Policy<ReqBody, S::Error>,\n    ReqBody: Body + Default,\n{\n    match redirect.action {\n        Action::Follow => {\n            redirect.parts.uri = redirect.location;\n            redirect\n                .body_repr\n                .try_clone_from(&redirect.body, redirect.policy);\n\n            let mut req = Request::from_parts(redirect.parts.clone(), redirect.body);\n            redirect.policy.on_request(&mut req);\n            redirect\n                .future\n                .set(Either::Right(Oneshot::new(redirect.service.clone(), req)));\n\n            cx.waker().wake_by_ref();\n            Poll::Pending\n        }\n        Action::Stop => Poll::Ready(Ok(redirect.res)),\n        Action::Pending(_) => Poll::Ready(Err(S::Error::from(\n            Error::redirect(\n                \"Nested pending Action is not supported in redirect policy\",\n                redirect.parts.uri.clone(),\n            )\n            .into(),\n        ))),\n        Action::Error(err) => Poll::Ready(Err(err.into())),\n    }\n}\n"
  },
  {
    "path": "src/client/layer/redirect/policy.rs",
    "content": "//! Tools for customizing the behavior of a [`FollowRedirect`][super::FollowRedirect] middleware.\n\nuse std::{fmt, pin::Pin};\n\nuse http::{HeaderMap, Request, Response, StatusCode, Uri};\n\nuse crate::error::BoxError;\n\n/// Trait for the policy on handling redirection responses.\npub trait Policy<B, E> {\n    /// Invoked when the service received a response with a redirection status code (`3xx`).\n    ///\n    /// This method returns an [`Action`] which indicates whether the service should follow\n    /// the redirection.\n    fn redirect(&mut self, attempt: Attempt<'_>) -> Result<Action, E>;\n\n    /// Returns whether redirection is currently permitted by this policy.\n    ///\n    /// This method is called to determine whether the client should follow redirects at all.\n    /// It allows policies to enable or disable redirection behavior based on the [`Request`].\n    fn follow_redirects(&mut self, _request: &mut Request<B>) -> bool;\n\n    /// Invoked right before the service makes a [`Request`].\n    fn on_request(&mut self, _request: &mut Request<B>);\n\n    /// Invoked right after the service received a [`Response`].\n    fn on_response<Body>(&mut self, _response: &mut Response<Body>);\n\n    /// Try to clone a request body before the service makes a redirected request.\n    fn clone_body(&self, _body: &B) -> Option<B>;\n}\n\n/// A type that holds information on a redirection attempt.\npub struct Attempt<'a> {\n    pub(crate) status: StatusCode,\n    pub(crate) headers: &'a HeaderMap,\n    pub(crate) location: &'a Uri,\n    pub(crate) previous: &'a Uri,\n}\n\n/// A value returned by [`Policy::redirect`] which indicates the action\n/// [`FollowRedirect`][super::FollowRedirect] should take for a redirection response.\npub enum Action {\n    /// Follow the redirection.\n    Follow,\n    /// Do not follow the redirection, and return the redirection response as-is.\n    Stop,\n    /// Pending async decision. The async task will be awaited to determine the final action.\n    Pending(Pin<Box<dyn Future<Output = Action> + Send>>),\n    /// An error occurred while determining the redirection action.\n    Error(BoxError),\n}\n\nimpl fmt::Debug for Action {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        match self {\n            Action::Follow => f.debug_tuple(\"Follow\").finish(),\n            Action::Stop => f.debug_tuple(\"Stop\").finish(),\n            Action::Pending(_) => f.debug_tuple(\"Pending\").finish(),\n            Action::Error(_) => f.debug_tuple(\"Error\").finish(),\n        }\n    }\n}\n"
  },
  {
    "path": "src/client/layer/redirect.rs",
    "content": "//! Middleware for following redirections.\n\nmod future;\nmod policy;\n\nuse std::{\n    mem,\n    task::{Context, Poll},\n};\n\nuse futures_util::future::Either;\nuse http::{Request, Response};\nuse http_body::Body;\nuse tower::{BoxError, Layer, Service};\n\nuse self::future::ResponseFuture;\npub use self::policy::{Action, Attempt, Policy};\n\nenum BodyRepr<B> {\n    Some(B),\n    Empty,\n    None,\n}\n\nimpl<B> BodyRepr<B>\nwhere\n    B: Body + Default,\n{\n    fn take(&mut self) -> Option<B> {\n        match mem::replace(self, BodyRepr::None) {\n            BodyRepr::Some(body) => Some(body),\n            BodyRepr::Empty => {\n                *self = BodyRepr::Empty;\n                Some(B::default())\n            }\n            BodyRepr::None => None,\n        }\n    }\n\n    fn try_clone_from<P, E>(&mut self, body: &B, policy: &P)\n    where\n        P: Policy<B, E>,\n    {\n        match self {\n            BodyRepr::Some(_) | BodyRepr::Empty => {}\n            BodyRepr::None => {\n                if body.size_hint().exact() == Some(0) {\n                    *self = BodyRepr::Some(B::default());\n                } else if let Some(cloned) = policy.clone_body(body) {\n                    *self = BodyRepr::Some(cloned);\n                }\n            }\n        }\n    }\n}\n\n/// [`Layer`] for retrying requests with a [`Service`] to follow redirection responses.\n#[derive(Clone, Copy, Default)]\npub struct FollowRedirectLayer<P> {\n    policy: P,\n}\n\nimpl<P> FollowRedirectLayer<P> {\n    /// Create a new [`FollowRedirectLayer`] with the given redirection [`Policy`].\n    #[inline(always)]\n    pub fn with_policy(policy: P) -> Self {\n        FollowRedirectLayer { policy }\n    }\n}\n\nimpl<S, P> Layer<S> for FollowRedirectLayer<P>\nwhere\n    S: Clone,\n    P: Clone,\n{\n    type Service = FollowRedirect<S, P>;\n\n    #[inline(always)]\n    fn layer(&self, inner: S) -> Self::Service {\n        FollowRedirect::with_policy(inner, self.policy.clone())\n    }\n}\n\n/// Middleware that retries requests with a [`Service`] to follow redirection responses.\n#[derive(Clone, Copy)]\npub struct FollowRedirect<S, P> {\n    inner: S,\n    policy: P,\n}\n\nimpl<S, P> FollowRedirect<S, P>\nwhere\n    P: Clone,\n{\n    /// Create a new [`FollowRedirect`] with the given redirection [`Policy`].\n    #[inline(always)]\n    pub fn with_policy(inner: S, policy: P) -> Self {\n        FollowRedirect { inner, policy }\n    }\n}\n\nimpl<ReqBody, ResBody, S, P> Service<Request<ReqBody>> for FollowRedirect<S, P>\nwhere\n    S: Service<Request<ReqBody>, Response = Response<ResBody>> + Clone,\n    S::Error: From<BoxError>,\n    P: Policy<ReqBody, S::Error> + Clone,\n    ReqBody: Body + Default,\n{\n    type Response = Response<ResBody>;\n    type Error = S::Error;\n    type Future = ResponseFuture<S, ReqBody, P>;\n\n    #[inline(always)]\n    fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {\n        self.inner.poll_ready(cx)\n    }\n\n    fn call(&mut self, mut req: Request<ReqBody>) -> Self::Future {\n        if self.policy.follow_redirects(&mut req) {\n            let service = self.inner.clone();\n            let mut service = mem::replace(&mut self.inner, service);\n            let mut policy = self.policy.clone();\n\n            let mut body_repr = BodyRepr::None;\n            body_repr.try_clone_from(req.body(), &policy);\n            policy.on_request(&mut req);\n\n            let (parts, body) = req.into_parts();\n            let req = Request::from_parts(parts.clone(), body);\n            ResponseFuture::Redirect {\n                future: Either::Left(service.call(req)),\n                pending_future: None,\n                service,\n                policy,\n                parts,\n                body_repr,\n            }\n        } else {\n            ResponseFuture::Direct {\n                future: self.inner.call(req),\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "src/client/layer/retry/classify.rs",
    "content": "use std::{error::Error as StdError, sync::Arc};\n\nuse http::{Method, StatusCode, Uri};\n\nuse super::{Req, Res};\nuse crate::error::BoxError;\n\npub trait Classify: Send + Sync + 'static {\n    fn classify(&self, req_rep: ReqRep<'_>) -> Action;\n}\n\n// For Future Whoever: making a blanket impl for any closure sounds nice,\n// but it causes inference issues at the call site. Every closure would\n// need to include `: ReqRep` in the arguments.\n//\n// An alternative is to make things like `ClassifyFn`. Slightly more\n// annoying, but also more forwards-compatible. :shrug:\npub struct ClassifyFn<F>(pub(crate) F);\n\nimpl<F> Classify for ClassifyFn<F>\nwhere\n    F: Fn(ReqRep<'_>) -> Action + Send + Sync + 'static,\n{\n    fn classify(&self, req_rep: ReqRep<'_>) -> Action {\n        (self.0)(req_rep)\n    }\n}\n\n/// Represents a request-response pair for classification purposes.\n#[derive(Debug)]\npub struct ReqRep<'a>(&'a Req, Result<StatusCode, &'a BoxError>);\n\nimpl ReqRep<'_> {\n    /// Returns the HTTP method of the request.\n    pub fn method(&self) -> &Method {\n        self.0.method()\n    }\n\n    /// Returns the URI of the request.\n    pub fn uri(&self) -> &Uri {\n        self.0.uri()\n    }\n\n    /// Returns the HTTP status code if the response was successful.\n    pub fn status(&self) -> Option<StatusCode> {\n        self.1.ok()\n    }\n\n    /// Returns the error if the request failed.\n    pub fn error(&self) -> Option<&(dyn StdError + 'static)> {\n        self.1.as_ref().err().map(|&e| &**e as _)\n    }\n\n    /// Returns a retryable action.\n    pub fn retryable(self) -> Action {\n        Action::Retryable\n    }\n\n    /// Returns a success action.\n    pub fn success(self) -> Action {\n        Action::Success\n    }\n}\n\n/// The action to take after classifying a request/response pair.\n#[must_use]\npub enum Action {\n    /// The request was successful and should not be retried.\n    Success,\n    /// The request failed but can be retried.\n    Retryable,\n}\n\n/// Determines whether a request should be retried based on the response or error.\n#[derive(Clone)]\npub(crate) enum Classifier {\n    /// Never retry any requests.\n    Never,\n    /// Retry protocol-level errors (connection issues, timeouts, etc.).\n    ProtocolNacks,\n    /// Use custom classification logic.\n    Dyn(Arc<dyn Classify>),\n}\n\nimpl Classifier {\n    /// Classifies a request/response pair to determine the appropriate retry action.\n    pub(super) fn classify(&mut self, req: &Req, res: &Result<Res, BoxError>) -> Action {\n        let req_rep = ReqRep(req, res.as_ref().map(|r| r.status()));\n        match self {\n            Classifier::Never => Action::Success,\n            Classifier::ProtocolNacks => {\n                let is_protocol_nack = req_rep\n                    .error()\n                    .map(super::is_retryable_error)\n                    .unwrap_or(false);\n                if is_protocol_nack {\n                    Action::Retryable\n                } else {\n                    Action::Success\n                }\n            }\n            Classifier::Dyn(c) => c.classify(req_rep),\n        }\n    }\n}\n"
  },
  {
    "path": "src/client/layer/retry/scope.rs",
    "content": "use std::sync::Arc;\n\nuse super::Req;\n\npub trait Scope: Send + Sync + 'static {\n    fn applies_to(&self, req: &super::Req) -> bool;\n}\n\n// I think scopes likely make the most sense being to hosts.\n// If that's the case, then it should probably be easiest to check for\n// the host. Perhaps also considering the ability to add more things\n// to scope off in the future...\n\n// For Future Whoever: making a blanket impl for any closure sounds nice,\n// but it causes inference issues at the call site. Every closure would\n// need to include `: ReqRep` in the arguments.\n//\n// An alternative is to make things like `ScopeFn`. Slightly more annoying,\n// but also more forwards-compatible. :shrug:\n\npub struct ScopeFn<F>(pub(crate) F);\n\nimpl<F> Scope for ScopeFn<F>\nwhere\n    F: Fn(&Req) -> bool + Send + Sync + 'static,\n{\n    fn applies_to(&self, req: &Req) -> bool {\n        (self.0)(req)\n    }\n}\n\n/// Defines the scope of requests that are eligible for retry.\n#[derive(Clone)]\npub(crate) enum Scoped {\n    /// All requests are eligible for retry regardless of their properties.\n    Unscoped,\n    /// Use custom logic to determine if a request is eligible for retry.\n    Dyn(Arc<dyn Scope>),\n}\n\nimpl Scoped {\n    /// Checks if the given request falls within the retry scope.\n    pub(super) fn applies_to(&self, req: &super::Req) -> bool {\n        let ret = match self {\n            Scoped::Unscoped => true,\n            Scoped::Dyn(s) => s.applies_to(req),\n        };\n        trace!(\"retry in scope: {ret}\");\n        ret\n    }\n}\n"
  },
  {
    "path": "src/client/layer/retry.rs",
    "content": "//! Middleware for retrying requests.\n\nmod classify;\nmod scope;\n\nuse std::{error::Error as StdError, future::Ready, sync::Arc, time::Duration};\n\nuse http::{Request, Response};\nuse tower::{\n    BoxError,\n    retry::{\n        Policy,\n        budget::{Budget, TpsBudget},\n    },\n};\n\npub(crate) use self::{\n    classify::{Action, Classifier, ClassifyFn, ReqRep},\n    scope::{ScopeFn, Scoped},\n};\nuse super::super::core::body::Incoming;\nuse crate::{Body, retry};\n\n/// A retry policy for HTTP requests.\n#[derive(Clone)]\npub struct RetryPolicy {\n    budget: Option<Arc<TpsBudget>>,\n    classifier: Classifier,\n    max_retries_per_request: u32,\n    retry_cnt: u32,\n    scope: Scoped,\n}\n\nimpl RetryPolicy {\n    /// Create a new `RetryPolicy`.\n    #[inline]\n    pub fn new(policy: retry::Policy) -> Self {\n        Self {\n            budget: policy\n                .budget\n                .map(|budget| Arc::new(TpsBudget::new(Duration::from_secs(10), 10, budget))),\n            classifier: policy.classifier,\n            max_retries_per_request: policy.max_retries_per_request,\n            retry_cnt: 0,\n            scope: policy.scope,\n        }\n    }\n}\n\ntype Req = Request<Body>;\n\ntype Res = Response<Incoming>;\n\nimpl Policy<Req, Res, BoxError> for RetryPolicy {\n    type Future = Ready<()>;\n\n    fn retry(&mut self, req: &mut Req, result: &mut Result<Res, BoxError>) -> Option<Self::Future> {\n        match self.classifier.classify(req, result) {\n            Action::Success => {\n                trace!(\n                    \"Request successful, no retry needed: {} {}\",\n                    req.method(),\n                    req.uri()\n                );\n\n                if let Some(ref budget) = self.budget {\n                    budget.deposit();\n                    trace!(\"Token deposited back to retry budget\");\n                }\n                None\n            }\n            Action::Retryable => {\n                if self.budget.as_ref().map(|b| b.withdraw()).unwrap_or(true) {\n                    self.retry_cnt += 1;\n\n                    trace!(\n                        \"Retrying request ({}/{} attempts): {} {} - {}\",\n                        self.retry_cnt,\n                        self.max_retries_per_request,\n                        req.method(),\n                        req.uri(),\n                        match result {\n                            Ok(res) => format!(\"HTTP {}\", res.status()),\n                            Err(e) => format!(\"Error: {}\", e),\n                        }\n                    );\n\n                    Some(std::future::ready(()))\n                } else {\n                    debug!(\n                        \"Request is retryable but retry budget exhausted: {} {}\",\n                        req.method(),\n                        req.uri()\n                    );\n                    None\n                }\n            }\n        }\n    }\n\n    fn clone_request(&mut self, req: &Req) -> Option<Req> {\n        if self.retry_cnt > 0 && !self.scope.applies_to(req) {\n            trace!(\"not in scope, not retrying\");\n            return None;\n        }\n\n        if self.retry_cnt >= self.max_retries_per_request {\n            trace!(\"max_retries_per_request hit\");\n            return None;\n        }\n\n        let body = req.body().try_clone()?;\n        let mut new = http::Request::new(body);\n        *new.method_mut() = req.method().clone();\n        *new.uri_mut() = req.uri().clone();\n        *new.version_mut() = req.version();\n        *new.headers_mut() = req.headers().clone();\n        *new.extensions_mut() = req.extensions().clone();\n\n        Some(new)\n    }\n}\n\n/// Determines whether the given error is considered retryable for HTTP/2 requests.\n///\n/// Returns `true` if the error type or content indicates that the request can be retried,\n/// otherwise returns `false`.\nfn is_retryable_error(err: &(dyn StdError + 'static)) -> bool {\n    let err = if let Some(err) = err.source() {\n        err\n    } else {\n        return false;\n    };\n\n    if let Some(cause) = err.source() {\n        if let Some(err) = cause.downcast_ref::<http2::Error>() {\n            // They sent us a graceful shutdown, try with a new connection!\n            if err.is_go_away() && err.is_remote() && err.reason() == Some(http2::Reason::NO_ERROR)\n            {\n                return true;\n            }\n\n            // REFUSED_STREAM was sent from the server, which is safe to retry.\n            // https://www.rfc-editor.org/rfc/rfc9113.html#section-8.7-3.2\n            if err.is_reset()\n                && err.is_remote()\n                && err.reason() == Some(http2::Reason::REFUSED_STREAM)\n            {\n                return true;\n            }\n        }\n    }\n    false\n}\n"
  },
  {
    "path": "src/client/layer/timeout/body.rs",
    "content": "use std::{\n    future::Future,\n    pin::Pin,\n    task::{Context, Poll, ready},\n    time::Duration,\n};\n\nuse http_body::Body;\nuse pin_project_lite::pin_project;\n\nuse crate::{\n    Error,\n    client::core::rt::{Sleep, Time, Timer},\n    error::{BoxError, TimedOut},\n};\n\npin_project! {\n    /// A wrapper body that applies timeout strategies to an inner HTTP body.\n    #[project = TimeoutBodyProj]\n    pub enum TimeoutBody<B> {\n        Plain {\n            #[pin]\n            body: B,\n        },\n        TotalTimeout {\n            #[pin]\n            body: TotalTimeoutBody<B>,\n        },\n        ReadTimeout {\n            #[pin]\n            body: ReadTimeoutBody<B>\n        },\n        CombinedTimeout {\n            #[pin]\n            body: TotalTimeoutBody<ReadTimeoutBody<B>>,\n        }\n    }\n}\n\npin_project! {\n    /// A body wrapper that enforces a total timeout for the entire stream.\n    ///\n    /// The timeout applies to the whole body: if the deadline is reached before\n    /// the body is fully read, an error is returned. The timer does **not** reset\n    /// between chunks.\n    pub struct TotalTimeoutBody<B> {\n        #[pin]\n        body: B,\n        timeout: Pin<Box<dyn Sleep>>,\n    }\n}\n\npin_project! {\n    /// A body wrapper that enforces a timeout for each read operation.\n    ///\n    /// The timeout resets after every successful read. If a single read\n    /// takes longer than the specified duration, an error is returned.\n    pub struct ReadTimeoutBody<B> {\n        timeout: Duration,\n        #[pin]\n        sleep: Option<Pin<Box<dyn Sleep>>>,\n        #[pin]\n        body: B,\n        timer: Time,\n    }\n}\n\n/// ==== impl TimeoutBody ====\nimpl<B> TimeoutBody<B> {\n    /// Creates a new [`TimeoutBody`] with no timeout.\n    pub fn new(\n        timer: Time,\n        deadline: Option<Duration>,\n        read_timeout: Option<Duration>,\n        body: B,\n    ) -> Self {\n        let deadline = deadline.map(|deadline| timer.sleep(deadline));\n        match (deadline, read_timeout) {\n            (Some(total_timeout), Some(read_timeout)) => TimeoutBody::CombinedTimeout {\n                body: TotalTimeoutBody {\n                    timeout: total_timeout,\n                    body: ReadTimeoutBody {\n                        timeout: read_timeout,\n                        sleep: None,\n                        body,\n                        timer,\n                    },\n                },\n            },\n            (Some(timeout), None) => TimeoutBody::TotalTimeout {\n                body: TotalTimeoutBody { body, timeout },\n            },\n            (None, Some(timeout)) => TimeoutBody::ReadTimeout {\n                body: ReadTimeoutBody {\n                    timeout,\n                    sleep: None,\n                    body,\n                    timer,\n                },\n            },\n            (None, None) => TimeoutBody::Plain { body },\n        }\n    }\n}\n\nimpl<B> Body for TimeoutBody<B>\nwhere\n    B: Body,\n    B::Error: Into<BoxError>,\n{\n    type Data = B::Data;\n    type Error = BoxError;\n\n    #[inline(always)]\n    fn poll_frame(\n        self: Pin<&mut Self>,\n        cx: &mut Context<'_>,\n    ) -> Poll<Option<Result<http_body::Frame<Self::Data>, Self::Error>>> {\n        match self.project() {\n            TimeoutBodyProj::TotalTimeout { body } => body.poll_frame(cx),\n            TimeoutBodyProj::ReadTimeout { body } => body.poll_frame(cx),\n            TimeoutBodyProj::CombinedTimeout { body } => body.poll_frame(cx),\n            TimeoutBodyProj::Plain { body } => poll_and_map_body(body, cx),\n        }\n    }\n\n    #[inline(always)]\n    fn size_hint(&self) -> http_body::SizeHint {\n        match self {\n            TimeoutBody::TotalTimeout { body } => body.size_hint(),\n            TimeoutBody::ReadTimeout { body } => body.size_hint(),\n            TimeoutBody::CombinedTimeout { body } => body.size_hint(),\n            TimeoutBody::Plain { body } => body.size_hint(),\n        }\n    }\n\n    #[inline(always)]\n    fn is_end_stream(&self) -> bool {\n        match self {\n            TimeoutBody::TotalTimeout { body } => body.is_end_stream(),\n            TimeoutBody::ReadTimeout { body } => body.is_end_stream(),\n            TimeoutBody::CombinedTimeout { body } => body.is_end_stream(),\n            TimeoutBody::Plain { body } => body.is_end_stream(),\n        }\n    }\n}\n\n#[inline(always)]\nfn poll_and_map_body<B>(\n    body: Pin<&mut B>,\n    cx: &mut Context<'_>,\n) -> Poll<Option<Result<http_body::Frame<B::Data>, BoxError>>>\nwhere\n    B: Body,\n    B::Error: Into<BoxError>,\n{\n    Poll::Ready(\n        ready!(body.poll_frame(cx)).map(|opt| opt.map_err(Error::decode).map_err(Into::into)),\n    )\n}\n\n// ==== impl TotalTimeoutBody ====\nimpl<B> Body for TotalTimeoutBody<B>\nwhere\n    B: Body,\n    B::Error: Into<BoxError>,\n{\n    type Data = B::Data;\n    type Error = BoxError;\n\n    fn poll_frame(\n        self: Pin<&mut Self>,\n        cx: &mut Context,\n    ) -> Poll<Option<Result<http_body::Frame<Self::Data>, Self::Error>>> {\n        let this = self.project();\n        if let Poll::Ready(()) = this.timeout.as_mut().poll(cx) {\n            return Poll::Ready(Some(Err(Error::body(TimedOut).into())));\n        }\n        poll_and_map_body(this.body, cx)\n    }\n\n    #[inline(always)]\n    fn size_hint(&self) -> http_body::SizeHint {\n        self.body.size_hint()\n    }\n\n    #[inline(always)]\n    fn is_end_stream(&self) -> bool {\n        self.body.is_end_stream()\n    }\n}\n\n/// ==== impl ReadTimeoutBody ====\nimpl<B> Body for ReadTimeoutBody<B>\nwhere\n    B: Body,\n    B::Error: Into<BoxError>,\n{\n    type Data = B::Data;\n    type Error = BoxError;\n\n    fn poll_frame(\n        self: Pin<&mut Self>,\n        cx: &mut Context<'_>,\n    ) -> Poll<Option<Result<http_body::Frame<Self::Data>, Self::Error>>> {\n        let mut this = self.project();\n\n        // Error if the timeout has expired.\n        if this.sleep.is_none() {\n            this.sleep.set(Some(this.timer.sleep(*this.timeout)));\n        }\n\n        // Error if the timeout has expired.\n        if let Some(sleep) = this.sleep.as_mut().as_pin_mut() {\n            if sleep.poll(cx).is_ready() {\n                return Poll::Ready(Some(Err(Box::new(TimedOut))));\n            }\n        }\n\n        // Poll the actual body\n        match ready!(this.body.poll_frame(cx)) {\n            Some(Ok(frame)) => {\n                // Reset timeout on successful read\n                this.sleep.set(None);\n                Poll::Ready(Some(Ok(frame)))\n            }\n            Some(Err(err)) => Poll::Ready(Some(Err(err.into()))),\n            None => Poll::Ready(None),\n        }\n    }\n\n    #[inline(always)]\n    fn size_hint(&self) -> http_body::SizeHint {\n        self.body.size_hint()\n    }\n\n    #[inline(always)]\n    fn is_end_stream(&self) -> bool {\n        self.body.is_end_stream()\n    }\n}\n"
  },
  {
    "path": "src/client/layer/timeout/future.rs",
    "content": "use std::{\n    future::Future,\n    pin::Pin,\n    task::{Context, Poll, ready},\n    time::Duration,\n};\n\nuse http::Response;\nuse pin_project_lite::pin_project;\nuse tokio::time::Sleep;\n\nuse super::body::TimeoutBody;\nuse crate::{\n    client::core::rt::Time,\n    error::{BoxError, Error, TimedOut},\n};\n\npin_project! {\n    /// [`Timeout`] response future\n    pub struct ResponseFuture<F> {\n        #[pin]\n        pub(crate) response: F,\n        #[pin]\n        pub(crate) total_timeout: Option<Sleep>,\n        #[pin]\n        pub(crate) read_timeout: Option<Sleep>,\n    }\n}\n\nimpl<F, T, E> Future for ResponseFuture<F>\nwhere\n    F: Future<Output = Result<T, E>>,\n    E: Into<BoxError>,\n{\n    type Output = Result<T, BoxError>;\n\n    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {\n        let mut this = self.project();\n\n        // First, try polling the future\n        match this.response.poll(cx) {\n            Poll::Ready(v) => return Poll::Ready(v.map_err(Into::into)),\n            Poll::Pending => {}\n        }\n\n        // Helper closure for polling a timeout and returning a TimedOut error\n        let mut check_timeout = |sleep: Option<Pin<&mut Sleep>>| {\n            if let Some(sleep) = sleep {\n                if sleep.poll(cx).is_ready() {\n                    return Some(Poll::Ready(Err(Error::request(TimedOut).into())));\n                }\n            }\n            None\n        };\n\n        // Check total timeout first\n        if let Some(poll) = check_timeout(this.total_timeout.as_mut().as_pin_mut()) {\n            return poll;\n        }\n\n        // Check read timeout\n        if let Some(poll) = check_timeout(this.read_timeout.as_mut().as_pin_mut()) {\n            return poll;\n        }\n\n        Poll::Pending\n    }\n}\n\npin_project! {\n    /// Response future for [`ResponseBodyTimeout`].\n    pub struct ResponseBodyTimeoutFuture<Fut> {\n        #[pin]\n        pub(super) inner: Fut,\n        pub(super) total_timeout: Option<Duration>,\n        pub(super) read_timeout: Option<Duration>,\n        pub(super) timer: Time,\n    }\n}\n\nimpl<Fut, ResBody, E> Future for ResponseBodyTimeoutFuture<Fut>\nwhere\n    Fut: Future<Output = Result<Response<ResBody>, E>>,\n{\n    type Output = Result<Response<TimeoutBody<ResBody>>, E>;\n\n    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {\n        let timer = self.timer.clone();\n        let total_timeout = self.total_timeout;\n        let read_timeout = self.read_timeout;\n        let res = ready!(self.project().inner.poll(cx))?\n            .map(|body| TimeoutBody::new(timer, total_timeout, read_timeout, body));\n        Poll::Ready(Ok(res))\n    }\n}\n"
  },
  {
    "path": "src/client/layer/timeout.rs",
    "content": "//! Middleware for setting a timeout on the response.\n\nmod body;\nmod future;\n\nuse std::{\n    sync::Arc,\n    task::{Context, Poll},\n    time::Duration,\n};\n\nuse http::{Request, Response};\nuse tower::{BoxError, Layer, Service};\n\npub use self::body::TimeoutBody;\nuse self::future::{ResponseBodyTimeoutFuture, ResponseFuture};\nuse crate::{\n    client::core::rt::{Time, Timer},\n    config::RequestConfig,\n};\n\n/// Options for configuring timeouts.\n#[derive(Clone, Copy, Default)]\npub struct TimeoutOptions {\n    total_timeout: Option<Duration>,\n    read_timeout: Option<Duration>,\n}\n\nimpl TimeoutOptions {\n    /// Sets the read timeout for the options.\n    #[inline]\n    pub fn read_timeout(&mut self, read_timeout: Duration) -> &mut Self {\n        self.read_timeout = Some(read_timeout);\n        self\n    }\n\n    /// Sets the total timeout for the options.\n    #[inline]\n    pub fn total_timeout(&mut self, total_timeout: Duration) -> &mut Self {\n        self.total_timeout = Some(total_timeout);\n        self\n    }\n}\n\nimpl_request_config_value!(TimeoutOptions);\n\n/// [`Layer`] that applies a [`Timeout`] middleware to a service.\n// This layer allows you to set a total timeout and a read timeout for requests.\n#[derive(Clone)]\npub struct TimeoutLayer {\n    timeout: RequestConfig<TimeoutOptions>,\n}\n\nimpl TimeoutLayer {\n    /// Create a new [`TimeoutLayer`].\n    pub fn new(options: TimeoutOptions) -> Self {\n        TimeoutLayer {\n            timeout: RequestConfig::new(Some(options)),\n        }\n    }\n}\n\nimpl<S> Layer<S> for TimeoutLayer {\n    type Service = Timeout<S>;\n\n    #[inline(always)]\n    fn layer(&self, service: S) -> Self::Service {\n        Timeout {\n            inner: service,\n            timeout: self.timeout,\n        }\n    }\n}\n\n/// Middleware that applies total and per-read timeouts to a [`Service`] response body.\n#[derive(Clone)]\npub struct Timeout<T> {\n    inner: T,\n    timeout: RequestConfig<TimeoutOptions>,\n}\n\nimpl<ReqBody, ResBody, S> Service<Request<ReqBody>> for Timeout<S>\nwhere\n    S: Service<Request<ReqBody>, Response = Response<ResBody>, Error = BoxError>,\n{\n    type Response = S::Response;\n    type Error = BoxError;\n    type Future = ResponseFuture<S::Future>;\n\n    #[inline(always)]\n    fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {\n        self.inner.poll_ready(cx)\n    }\n\n    #[inline(always)]\n    fn call(&mut self, req: Request<ReqBody>) -> Self::Future {\n        let (total_timeout, read_timeout) = fetch_timeout_options(&self.timeout, req.extensions());\n        ResponseFuture {\n            response: self.inner.call(req),\n            total_timeout: total_timeout.map(tokio::time::sleep),\n            read_timeout: read_timeout.map(tokio::time::sleep),\n        }\n    }\n}\n\n/// [`Layer`] that applies a [`ResponseBodyTimeout`] middleware to a service.\n// This layer allows you to set a total timeout and a read timeout for the response body.\n#[derive(Clone)]\npub struct ResponseBodyTimeoutLayer {\n    timer: Time,\n    timeout: RequestConfig<TimeoutOptions>,\n}\n\nimpl ResponseBodyTimeoutLayer {\n    /// Creates a new [`ResponseBodyTimeoutLayer`].\n    pub fn new<M>(timer: M, options: TimeoutOptions) -> Self\n    where\n        M: Timer + Send + Sync + 'static,\n    {\n        Self {\n            timer: Time::Timer(Arc::new(timer)),\n            timeout: RequestConfig::new(Some(options)),\n        }\n    }\n}\n\nimpl<S> Layer<S> for ResponseBodyTimeoutLayer {\n    type Service = ResponseBodyTimeout<S>;\n\n    #[inline(always)]\n    fn layer(&self, inner: S) -> Self::Service {\n        ResponseBodyTimeout {\n            inner,\n            timeout: self.timeout,\n            timer: self.timer.clone(),\n        }\n    }\n}\n\n/// Middleware that timeouts the response body of a request with a [`Service`] to a total timeout\n/// and a read timeout.\n#[derive(Clone)]\npub struct ResponseBodyTimeout<S> {\n    inner: S,\n    timeout: RequestConfig<TimeoutOptions>,\n    timer: Time,\n}\n\nimpl<S, ReqBody, ResBody> Service<Request<ReqBody>> for ResponseBodyTimeout<S>\nwhere\n    S: Service<Request<ReqBody>, Response = Response<ResBody>>,\n{\n    type Response = Response<TimeoutBody<ResBody>>;\n    type Error = S::Error;\n    type Future = ResponseBodyTimeoutFuture<S::Future>;\n\n    #[inline(always)]\n    fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {\n        self.inner.poll_ready(cx)\n    }\n\n    #[inline(always)]\n    fn call(&mut self, req: Request<ReqBody>) -> Self::Future {\n        let (total_timeout, read_timeout) = fetch_timeout_options(&self.timeout, req.extensions());\n        ResponseBodyTimeoutFuture {\n            inner: self.inner.call(req),\n            total_timeout,\n            read_timeout,\n            timer: self.timer.clone(),\n        }\n    }\n}\n\nfn fetch_timeout_options(\n    opts: &RequestConfig<TimeoutOptions>,\n    extensions: &http::Extensions,\n) -> (Option<Duration>, Option<Duration>) {\n    match (opts.as_ref(), opts.fetch(extensions)) {\n        (Some(opts), Some(request_opts)) => (\n            request_opts.total_timeout.or(opts.total_timeout),\n            request_opts.read_timeout.or(opts.read_timeout),\n        ),\n        (Some(opts), None) => (opts.total_timeout, opts.read_timeout),\n        (None, Some(opts)) => (opts.total_timeout, opts.read_timeout),\n        (None, None) => (None, None),\n    }\n}\n"
  },
  {
    "path": "src/client/layer.rs",
    "content": "//! Middleware for the client.\n\npub mod client;\npub mod config;\n#[cfg(any(\n    feature = \"gzip\",\n    feature = \"zstd\",\n    feature = \"brotli\",\n    feature = \"deflate\",\n))]\npub mod decoder;\npub mod redirect;\npub mod retry;\npub mod timeout;\n"
  },
  {
    "path": "src/client/multipart.rs",
    "content": "//! multipart/form-data\n\nuse std::{borrow::Cow, pin::Pin};\n\nuse bytes::Bytes;\nuse futures_util::{Stream, StreamExt, future, stream};\nuse http::header::HeaderMap;\nuse http_body_util::BodyExt;\nuse mime_guess::Mime;\nuse percent_encoding::{self, AsciiSet, NON_ALPHANUMERIC};\n#[cfg(feature = \"stream\")]\nuse {std::io, std::path::Path, tokio::fs::File};\n\nuse super::Body;\n\n/// An async multipart/form-data request.\n#[derive(Debug)]\npub struct Form {\n    boundary: Cow<'static, str>,\n    computed_headers: Vec<Vec<u8>>,\n    fields: Vec<(Cow<'static, str>, Part)>,\n    percent_encoding: PercentEncoding,\n}\n\n/// A field in a multipart form.\n#[derive(Debug)]\npub struct Part {\n    meta: PartMetadata,\n    value: Body,\n    body_length: Option<u64>,\n}\n\n#[derive(Debug)]\nstruct PartMetadata {\n    mime: Option<Mime>,\n    file_name: Option<Cow<'static, str>>,\n    headers: HeaderMap,\n}\n\n// ===== impl Form =====\n\nimpl Default for Form {\n    fn default() -> Self {\n        Self::new()\n    }\n}\n\nimpl Form {\n    /// Creates a new async Form without any content.\n    pub fn new() -> Form {\n        Form::with_boundary(gen_boundary())\n    }\n\n    /// Creates a new async Form with a custom boundary.\n    ///\n    /// **Setting a custom boundary incurs significant risk of generating\n    /// corrupted bodies.** Only use this if you need it and you understand the\n    /// risk!\n    pub fn with_boundary<S>(boundary: S) -> Form\n    where\n        S: Into<Cow<'static, str>>,\n    {\n        Form {\n            boundary: boundary.into(),\n            computed_headers: Vec::new(),\n            fields: Vec::new(),\n            percent_encoding: PercentEncoding::PathSegment,\n        }\n    }\n\n    /// Get the boundary that this form will use.\n    pub fn boundary(&self) -> &str {\n        &self.boundary\n    }\n\n    /// Add a data field with supplied name and value.\n    ///\n    /// # Examples\n    ///\n    /// ```\n    /// let form = wreq::multipart::Form::new()\n    ///     .text(\"username\", \"seanmonstar\")\n    ///     .text(\"password\", \"secret\");\n    /// ```\n    pub fn text<T, U>(self, name: T, value: U) -> Form\n    where\n        T: Into<Cow<'static, str>>,\n        U: Into<Cow<'static, str>>,\n    {\n        self.part(name, Part::text(value))\n    }\n\n    /// Adds a file field.\n    ///\n    /// The path will be used to try to guess the filename and mime.\n    ///\n    /// # Examples\n    ///\n    /// ```no_run\n    /// # async fn run() -> std::io::Result<()> {\n    /// let form = wreq::multipart::Form::new()\n    ///     .file(\"key\", \"/path/to/file\")\n    ///     .await?;\n    /// # Ok(())\n    /// # }\n    /// ```\n    ///\n    /// # Errors\n    ///\n    /// Errors when the file cannot be opened.\n    #[cfg(feature = \"stream\")]\n    #[cfg_attr(docsrs, doc(cfg(feature = \"stream\")))]\n    pub async fn file<T, U>(self, name: T, path: U) -> io::Result<Form>\n    where\n        T: Into<Cow<'static, str>>,\n        U: AsRef<Path>,\n    {\n        Ok(self.part(name, Part::file(path).await?))\n    }\n\n    /// Adds a customized Part.\n    pub fn part<T>(mut self, name: T, part: Part) -> Form\n    where\n        T: Into<Cow<'static, str>>,\n    {\n        self.fields.push((name.into(), part));\n        self\n    }\n\n    /// Configure this `Form` to percent-encode using the `path-segment` rules.\n    pub fn percent_encode_path_segment(mut self) -> Form {\n        self.percent_encoding = PercentEncoding::PathSegment;\n        self\n    }\n\n    /// Configure this `Form` to percent-encode using the `attr-char` rules.\n    pub fn percent_encode_attr_chars(mut self) -> Form {\n        self.percent_encoding = PercentEncoding::AttrChar;\n        self\n    }\n\n    /// Configure this `Form` to skip percent-encoding\n    pub fn percent_encode_noop(mut self) -> Form {\n        self.percent_encoding = PercentEncoding::NoOp;\n        self\n    }\n\n    /// Consume this instance and transform into an instance of Body for use in a request.\n    pub(crate) fn stream(self) -> Body {\n        if self.fields.is_empty() {\n            return Body::empty();\n        }\n\n        Body::stream(self.into_stream())\n    }\n\n    /// Produce a stream of the bytes in this `Form`, consuming it.\n    pub fn into_stream(mut self) -> impl Stream<Item = Result<Bytes, crate::Error>> + Send + Sync {\n        if self.fields.is_empty() {\n            let empty_stream: Pin<\n                Box<dyn Stream<Item = Result<Bytes, crate::Error>> + Send + Sync>,\n            > = Box::pin(futures_util::stream::empty());\n            return empty_stream;\n        }\n\n        // create initial part to init reduce chain\n        let (name, part) = self.fields.remove(0);\n        let start = Box::pin(self.part_stream(name, part))\n            as Pin<Box<dyn Stream<Item = crate::Result<Bytes>> + Send + Sync>>;\n\n        let fields = self.take_fields();\n        // for each field, chain an additional stream\n        let stream = fields.into_iter().fold(start, |memo, (name, part)| {\n            let part_stream = self.part_stream(name, part);\n            Box::pin(memo.chain(part_stream))\n                as Pin<Box<dyn Stream<Item = crate::Result<Bytes>> + Send + Sync>>\n        });\n        // append special ending boundary\n        let last = stream::once(future::ready(Ok(\n            format!(\"--{}--\\r\\n\", self.boundary).into()\n        )));\n        Box::pin(stream.chain(last))\n    }\n\n    /// Generate a crate::core::Body stream for a single Part instance of a Form request.\n    pub(crate) fn part_stream<T>(\n        &mut self,\n        name: T,\n        part: Part,\n    ) -> impl Stream<Item = Result<Bytes, crate::Error>> + use<T>\n    where\n        T: Into<Cow<'static, str>>,\n    {\n        // start with boundary\n        let boundary = stream::once(future::ready(Ok(format!(\"--{}\\r\\n\", self.boundary).into())));\n        // append headers\n        let header = stream::once(future::ready(Ok({\n            let mut h = self\n                .percent_encoding\n                .encode_headers(&name.into(), &part.meta);\n            h.extend_from_slice(b\"\\r\\n\\r\\n\");\n            h.into()\n        })));\n        // then append form data followed by terminating CRLF\n        boundary\n            .chain(header)\n            .chain(part.value.into_data_stream())\n            .chain(stream::once(future::ready(Ok(\"\\r\\n\".into()))))\n    }\n\n    // If predictable, computes the length the request will have\n    // The length should be predictable if only String and file fields have been added,\n    // but not if a generic reader has been added;\n    pub(crate) fn compute_length(&mut self) -> Option<u64> {\n        let mut length = 0u64;\n        for (name, field) in self.fields.iter() {\n            match field.value_len() {\n                Some(value_length) => {\n                    // We are constructing the header just to get its length. To not have to\n                    // construct it again when the request is sent we cache these headers.\n                    let header = self.percent_encoding.encode_headers(name, field.metadata());\n                    let header_length = header.len();\n                    self.computed_headers.push(header);\n                    // The additions mimic the format string out of which the field is constructed\n                    // in Reader. Not the cleanest solution because if that format string is\n                    // ever changed then this formula needs to be changed too which is not an\n                    // obvious dependency in the code.\n                    length += 2\n                        + self.boundary.len() as u64\n                        + 2\n                        + header_length as u64\n                        + 4\n                        + value_length\n                        + 2\n                }\n                _ => return None,\n            }\n        }\n        // If there is at least one field there is a special boundary for the very last field.\n        if !self.fields.is_empty() {\n            length += 2 + self.boundary.len() as u64 + 4\n        }\n        Some(length)\n    }\n\n    /// Take the fields vector of this instance, replacing with an empty vector.\n    fn take_fields(&mut self) -> Vec<(Cow<'static, str>, Part)> {\n        std::mem::take(&mut self.fields)\n    }\n}\n\n// ===== impl Part =====\n\nimpl Part {\n    /// Makes a text parameter.\n    pub fn text<T>(value: T) -> Part\n    where\n        T: Into<Cow<'static, str>>,\n    {\n        let body = match value.into() {\n            Cow::Borrowed(slice) => Body::from(slice),\n            Cow::Owned(string) => Body::from(string),\n        };\n        Part::new(body, None)\n    }\n\n    /// Makes a new parameter from arbitrary bytes.\n    pub fn bytes<T>(value: T) -> Part\n    where\n        T: Into<Cow<'static, [u8]>>,\n    {\n        let body = match value.into() {\n            Cow::Borrowed(slice) => Body::from(slice),\n            Cow::Owned(vec) => Body::from(vec),\n        };\n        Part::new(body, None)\n    }\n\n    /// Makes a new parameter from an arbitrary stream.\n    pub fn stream<T: Into<Body>>(value: T) -> Part {\n        Part::new(value.into(), None)\n    }\n\n    /// Makes a new parameter from an arbitrary stream with a known length. This is particularly\n    /// useful when adding something like file contents as a stream, where you can know the content\n    /// length beforehand.\n    pub fn stream_with_length<T: Into<Body>>(value: T, length: u64) -> Part {\n        Part::new(value.into(), Some(length))\n    }\n\n    /// Makes a file parameter.\n    ///\n    /// # Errors\n    ///\n    /// Errors when the file cannot be opened.\n    #[cfg(feature = \"stream\")]\n    #[cfg_attr(docsrs, doc(cfg(feature = \"stream\")))]\n    pub async fn file<T: AsRef<Path>>(path: T) -> io::Result<Part> {\n        let path = path.as_ref();\n        let file_name = path\n            .file_name()\n            .map(|filename| filename.to_string_lossy().into_owned());\n        let ext = path.extension().and_then(|ext| ext.to_str()).unwrap_or(\"\");\n        let mime = mime_guess::from_ext(ext).first_or_octet_stream();\n        let file = File::open(path).await?;\n        let len = file.metadata().await.map(|m| m.len()).ok();\n        let field = match len {\n            Some(len) => Part::stream_with_length(file, len),\n            None => Part::stream(file),\n        }\n        .mime(mime);\n\n        Ok(if let Some(file_name) = file_name {\n            field.file_name(file_name)\n        } else {\n            field\n        })\n    }\n\n    fn new(value: Body, body_length: Option<u64>) -> Part {\n        Part {\n            meta: PartMetadata::new(),\n            value,\n            body_length,\n        }\n    }\n\n    /// Tries to set the mime of this part.\n    pub fn mime_str(self, mime: &str) -> crate::Result<Part> {\n        Ok(self.mime(mime.parse().map_err(crate::Error::builder)?))\n    }\n\n    // Re-export when mime 0.4 is available, with split MediaType/MediaRange.\n    fn mime(self, mime: Mime) -> Part {\n        self.with_inner(move |inner| inner.mime(mime))\n    }\n\n    /// Sets the filename, builder style.\n    pub fn file_name<T>(self, filename: T) -> Part\n    where\n        T: Into<Cow<'static, str>>,\n    {\n        self.with_inner(move |inner| inner.file_name(filename))\n    }\n\n    /// Sets custom headers for the part.\n    pub fn headers(self, headers: HeaderMap) -> Part {\n        self.with_inner(move |inner| inner.headers(headers))\n    }\n\n    fn value_len(&self) -> Option<u64> {\n        if self.body_length.is_some() {\n            self.body_length\n        } else {\n            self.value.content_length()\n        }\n    }\n\n    fn metadata(&self) -> &PartMetadata {\n        &self.meta\n    }\n\n    fn with_inner<F>(self, func: F) -> Self\n    where\n        F: FnOnce(PartMetadata) -> PartMetadata,\n    {\n        Part {\n            meta: func(self.meta),\n            ..self\n        }\n    }\n}\n\n// ===== impl PartMetadata =====\n\nimpl PartMetadata {\n    fn new() -> Self {\n        PartMetadata {\n            mime: None,\n            file_name: None,\n            headers: HeaderMap::default(),\n        }\n    }\n\n    fn mime(mut self, mime: Mime) -> Self {\n        self.mime = Some(mime);\n        self\n    }\n\n    fn file_name<T>(mut self, filename: T) -> Self\n    where\n        T: Into<Cow<'static, str>>,\n    {\n        self.file_name = Some(filename.into());\n        self\n    }\n\n    fn headers<T>(mut self, headers: T) -> Self\n    where\n        T: Into<HeaderMap>,\n    {\n        self.headers = headers.into();\n        self\n    }\n}\n\n// https://url.spec.whatwg.org/#fragment-percent-encode-set\nconst FRAGMENT_ENCODE_SET: &AsciiSet = &percent_encoding::CONTROLS\n    .add(b' ')\n    .add(b'\"')\n    .add(b'<')\n    .add(b'>')\n    .add(b'`');\n\n// https://url.spec.whatwg.org/#path-percent-encode-set\nconst PATH_ENCODE_SET: &AsciiSet = &FRAGMENT_ENCODE_SET.add(b'#').add(b'?').add(b'{').add(b'}');\n\nconst PATH_SEGMENT_ENCODE_SET: &AsciiSet = &PATH_ENCODE_SET.add(b'/').add(b'%');\n\n// https://tools.ietf.org/html/rfc8187#section-3.2.1\nconst ATTR_CHAR_ENCODE_SET: &AsciiSet = &NON_ALPHANUMERIC\n    .remove(b'!')\n    .remove(b'#')\n    .remove(b'$')\n    .remove(b'&')\n    .remove(b'+')\n    .remove(b'-')\n    .remove(b'.')\n    .remove(b'^')\n    .remove(b'_')\n    .remove(b'`')\n    .remove(b'|')\n    .remove(b'~');\n\n#[derive(Debug)]\nenum PercentEncoding {\n    PathSegment,\n    AttrChar,\n    NoOp,\n}\n\nimpl PercentEncoding {\n    fn encode_headers(&self, name: &str, field: &PartMetadata) -> Vec<u8> {\n        let mut buf = Vec::new();\n        buf.extend_from_slice(b\"Content-Disposition: form-data; \");\n\n        match self.percent_encode(name) {\n            Cow::Borrowed(value) => {\n                // nothing has been percent encoded\n                buf.extend_from_slice(b\"name=\\\"\");\n                buf.extend_from_slice(value.as_bytes());\n                buf.extend_from_slice(b\"\\\"\");\n            }\n            Cow::Owned(value) => {\n                // something has been percent encoded\n                buf.extend_from_slice(b\"name*=utf-8''\");\n                buf.extend_from_slice(value.as_bytes());\n            }\n        }\n\n        // According to RFC7578 Section 4.2, `filename*=` syntax is invalid.\n        // See https://github.com/seanmonstar/reqwest/issues/419.\n        if let Some(filename) = &field.file_name {\n            buf.extend_from_slice(b\"; filename=\\\"\");\n            let legal_filename = filename\n                .replace('\\\\', \"\\\\\\\\\")\n                .replace('\"', \"\\\\\\\"\")\n                .replace('\\r', \"\\\\\\r\")\n                .replace('\\n', \"\\\\\\n\");\n            buf.extend_from_slice(legal_filename.as_bytes());\n            buf.extend_from_slice(b\"\\\"\");\n        }\n\n        if let Some(mime) = &field.mime {\n            buf.extend_from_slice(b\"\\r\\nContent-Type: \");\n            buf.extend_from_slice(mime.as_ref().as_bytes());\n        }\n\n        for (k, v) in field.headers.iter() {\n            buf.extend_from_slice(b\"\\r\\n\");\n            buf.extend_from_slice(k.as_str().as_bytes());\n            buf.extend_from_slice(b\": \");\n            buf.extend_from_slice(v.as_bytes());\n        }\n        buf\n    }\n\n    fn percent_encode<'a>(&self, value: &'a str) -> Cow<'a, str> {\n        use percent_encoding::utf8_percent_encode as percent_encode;\n\n        match self {\n            Self::PathSegment => percent_encode(value, PATH_SEGMENT_ENCODE_SET).into(),\n            Self::AttrChar => percent_encode(value, ATTR_CHAR_ENCODE_SET).into(),\n            Self::NoOp => value.into(),\n        }\n    }\n}\n\n/// See chromium's implementation: https://source.chromium.org/chromium/chromium/src/+/main:third_party/blink/renderer/platform/network/form_data_encoder.cc\nfn gen_boundary() -> String {\n    use crate::util::fast_random as random;\n\n    const PREFIX: &[u8; 22] = b\"----WebKitFormBoundary\";\n\n    // The RFC 2046 spec says the alphanumeric characters plus the\n    // following characters are legal for boundaries:  '()+_,-./:=?\n    // However the following characters, though legal, cause some sites\n    // to fail: (),./:=+\n    // Note that our algorithm makes it twice as much likely for 'A' or 'B'\n    // to appear in the boundary string, because 0x41 and 0x42 are present in\n    // the below array twice.\n    const ALPHA_NUMERIC_ENCODING_MAP: [u8; 64] = [\n        0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F,\n        0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x61, 0x62, 0x63, 0x64,\n        0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73,\n        0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,\n        0x38, 0x39, 0x41, 0x42,\n    ];\n\n    // Pre-allocate a buffer for the boundary string. The final length will be 22 (prefix) + 16\n    // (random chars) = 38.\n    let mut boundary = Vec::with_capacity(38);\n    // Start with an informative prefix.\n    boundary.extend_from_slice(PREFIX);\n\n    // Append 16 random 7bit ascii AlphaNumeric characters.\n    for _ in 0..2 {\n        let mut randomness = random();\n        for _ in 0..8 {\n            let index = (randomness & 0x3F) as usize;\n            boundary.push(ALPHA_NUMERIC_ENCODING_MAP[index]);\n            randomness >>= 6;\n        }\n    }\n\n    assert_eq!(boundary.len(), 38);\n    String::from_utf8(boundary).expect(\"Invalid UTF-8 generated\")\n}\n\n#[cfg(test)]\nmod tests {\n    use std::future;\n\n    use futures_util::{TryStreamExt, stream};\n    use tokio::{self, runtime};\n\n    use super::*;\n\n    #[test]\n    fn form_empty() {\n        let form = Form::new();\n\n        let rt = runtime::Builder::new_current_thread()\n            .enable_all()\n            .build()\n            .expect(\"new rt\");\n        let body = form.stream().into_data_stream();\n        let s = body.map_ok(|try_c| try_c.to_vec()).try_concat();\n\n        let out = rt.block_on(s);\n        assert!(out.unwrap().is_empty());\n    }\n\n    #[test]\n    fn stream_to_end() {\n        let mut form = Form::new()\n            .part(\n                \"reader1\",\n                Part::stream(Body::stream(stream::once(future::ready::<\n                    Result<String, crate::Error>,\n                >(Ok(\n                    \"part1\".to_owned()\n                ))))),\n            )\n            .part(\"key1\", Part::text(\"value1\"))\n            .part(\n                \"key2\",\n                Part::text(\"value2\").mime(mime_guess::mime::IMAGE_BMP),\n            )\n            .part(\n                \"reader2\",\n                Part::stream(Body::stream(stream::once(future::ready::<\n                    Result<String, crate::Error>,\n                >(Ok(\n                    \"part2\".to_owned()\n                ))))),\n            )\n            .part(\"key3\", Part::text(\"value3\").file_name(\"filename\"));\n        form.boundary = \"boundary\".into();\n        let expected = \"--boundary\\r\\n\\\n             Content-Disposition: form-data; name=\\\"reader1\\\"\\r\\n\\r\\n\\\n             part1\\r\\n\\\n             --boundary\\r\\n\\\n             Content-Disposition: form-data; name=\\\"key1\\\"\\r\\n\\r\\n\\\n             value1\\r\\n\\\n             --boundary\\r\\n\\\n             Content-Disposition: form-data; name=\\\"key2\\\"\\r\\n\\\n             Content-Type: image/bmp\\r\\n\\r\\n\\\n             value2\\r\\n\\\n             --boundary\\r\\n\\\n             Content-Disposition: form-data; name=\\\"reader2\\\"\\r\\n\\r\\n\\\n             part2\\r\\n\\\n             --boundary\\r\\n\\\n             Content-Disposition: form-data; name=\\\"key3\\\"; filename=\\\"filename\\\"\\r\\n\\r\\n\\\n             value3\\r\\n--boundary--\\r\\n\";\n        let rt = runtime::Builder::new_current_thread()\n            .enable_all()\n            .build()\n            .expect(\"new rt\");\n        let body = form.stream().into_data_stream();\n        let s = body.map(|try_c| try_c.map(|r| r.to_vec())).try_concat();\n\n        let out = rt.block_on(s).unwrap();\n        // These prints are for debug purposes in case the test fails\n        println!(\n            \"START REAL\\n{}\\nEND REAL\",\n            std::str::from_utf8(&out).unwrap()\n        );\n        println!(\"START EXPECTED\\n{expected}\\nEND EXPECTED\");\n        assert_eq!(std::str::from_utf8(&out).unwrap(), expected);\n    }\n\n    #[test]\n    fn stream_to_end_with_header() {\n        let mut part = Part::text(\"value2\").mime(mime_guess::mime::IMAGE_BMP);\n        let mut headers = HeaderMap::new();\n        headers.insert(\"Hdr3\", \"/a/b/c\".parse().unwrap());\n        part = part.headers(headers);\n        let mut form = Form::new().part(\"key2\", part);\n        form.boundary = \"boundary\".into();\n        let expected = \"--boundary\\r\\n\\\n                        Content-Disposition: form-data; name=\\\"key2\\\"\\r\\n\\\n                        Content-Type: image/bmp\\r\\n\\\n                        hdr3: /a/b/c\\r\\n\\\n                        \\r\\n\\\n                        value2\\r\\n\\\n                        --boundary--\\r\\n\";\n        let rt = runtime::Builder::new_current_thread()\n            .enable_all()\n            .build()\n            .expect(\"new rt\");\n        let body = form.stream().into_data_stream();\n        let s = body.map(|try_c| try_c.map(|r| r.to_vec())).try_concat();\n\n        let out = rt.block_on(s).unwrap();\n        // These prints are for debug purposes in case the test fails\n        println!(\n            \"START REAL\\n{}\\nEND REAL\",\n            std::str::from_utf8(&out).unwrap()\n        );\n        println!(\"START EXPECTED\\n{expected}\\nEND EXPECTED\");\n        assert_eq!(std::str::from_utf8(&out).unwrap(), expected);\n    }\n\n    #[test]\n    fn correct_content_length() {\n        // Setup an arbitrary data stream\n        let stream_data = b\"just some stream data\";\n        let stream_len = stream_data.len();\n        let stream_data = stream_data\n            .chunks(3)\n            .map(|c| Ok::<_, std::io::Error>(Bytes::from(c)));\n        let the_stream = futures_util::stream::iter(stream_data);\n\n        let bytes_data = b\"some bytes data\".to_vec();\n        let bytes_len = bytes_data.len();\n\n        let stream_part = Part::stream_with_length(Body::stream(the_stream), stream_len as u64);\n        let body_part = Part::bytes(bytes_data);\n\n        // A simple check to make sure we get the configured body length\n        assert_eq!(stream_part.value_len().unwrap(), stream_len as u64);\n\n        // Make sure it delegates to the underlying body if length is not specified\n        assert_eq!(body_part.value_len().unwrap(), bytes_len as u64);\n    }\n\n    #[test]\n    fn header_percent_encoding() {\n        let name = \"start%'\\\"\\r\\nßend\";\n        let field = Part::text(\"\");\n\n        assert_eq!(\n            PercentEncoding::PathSegment.encode_headers(name, &field.meta),\n            &b\"Content-Disposition: form-data; name*=utf-8''start%25'%22%0D%0A%C3%9Fend\"[..]\n        );\n\n        assert_eq!(\n            PercentEncoding::AttrChar.encode_headers(name, &field.meta),\n            &b\"Content-Disposition: form-data; name*=utf-8''start%25%27%22%0D%0A%C3%9Fend\"[..]\n        );\n    }\n\n    #[test]\n    fn custom_boundary_is_applied() {\n        let form = Form::with_boundary(\"----WebKitFormBoundary0123456789\");\n\n        assert_eq!(form.boundary(), \"----WebKitFormBoundary0123456789\");\n    }\n}\n"
  },
  {
    "path": "src/client/request.rs",
    "content": "use std::{\n    convert::TryFrom,\n    fmt,\n    future::Future,\n    net::{IpAddr, Ipv4Addr, Ipv6Addr},\n    time::Duration,\n};\n\n#[cfg(any(feature = \"form\", feature = \"json\", feature = \"multipart\"))]\nuse http::header::CONTENT_TYPE;\nuse http::{Extensions, Uri, Version};\n#[cfg(any(feature = \"query\", feature = \"form\", feature = \"json\"))]\nuse serde::Serialize;\n#[cfg(feature = \"multipart\")]\nuse {super::multipart, bytes::Bytes, http::header::CONTENT_LENGTH};\n#[cfg(feature = \"cookies\")]\nuse {\n    crate::cookie::{CookieStore, IntoCookieStore},\n    std::sync::Arc,\n};\n\n#[cfg(any(\n    feature = \"gzip\",\n    feature = \"zstd\",\n    feature = \"brotli\",\n    feature = \"deflate\",\n))]\nuse super::layer::decoder::AcceptEncoding;\nuse super::{\n    Body, Client, IntoEmulation, Response,\n    future::Pending,\n    group::Group,\n    layer::{\n        config::{DefaultHeaders, RequestOptions},\n        timeout::TimeoutOptions,\n    },\n};\nuse crate::{\n    Error, Method, Proxy,\n    config::{RequestConfig, RequestConfigValue},\n    ext::UriExt,\n    header::{AUTHORIZATION, HeaderMap, HeaderName, HeaderValue, OrigHeaderMap},\n    redirect,\n};\n\n/// A request which can be executed with [`Client::execute()`].\n#[derive(Debug)]\npub struct Request(http::Request<Option<Body>>);\n\n/// A builder to construct the properties of a [`Request`].\n///\n/// To construct a [`RequestBuilder`], refer to the [`Client`] documentation.\n#[must_use = \"RequestBuilder does nothing until you 'send' it\"]\npub struct RequestBuilder {\n    client: Client,\n    request: crate::Result<Request>,\n}\n\nimpl Request {\n    /// Constructs a new [`Request`].\n    pub fn new(method: Method, uri: Uri) -> Self {\n        let mut request = http::Request::new(None);\n        *request.method_mut() = method;\n        *request.uri_mut() = uri;\n        Request(request)\n    }\n\n    /// Get the method.\n    #[inline]\n    pub fn method(&self) -> &Method {\n        self.0.method()\n    }\n\n    /// Get a mutable reference to the method.\n    #[inline]\n    pub fn method_mut(&mut self) -> &mut Method {\n        self.0.method_mut()\n    }\n\n    /// Get the uri.\n    #[inline]\n    pub fn uri(&self) -> &Uri {\n        self.0.uri()\n    }\n\n    /// Get a mutable reference to the uri.\n    #[inline]\n    pub fn uri_mut(&mut self) -> &mut Uri {\n        self.0.uri_mut()\n    }\n\n    /// Get the headers.\n    #[inline]\n    pub fn headers(&self) -> &HeaderMap {\n        self.0.headers()\n    }\n\n    /// Get a mutable reference to the headers.\n    #[inline]\n    pub fn headers_mut(&mut self) -> &mut HeaderMap {\n        self.0.headers_mut()\n    }\n\n    /// Get the body.\n    #[inline]\n    pub fn body(&self) -> Option<&Body> {\n        self.0.body().as_ref()\n    }\n\n    /// Get a mutable reference to the body.\n    #[inline]\n    pub fn body_mut(&mut self) -> &mut Option<Body> {\n        self.0.body_mut()\n    }\n\n    /// Get the http version.\n    #[inline]\n    pub fn version(&self) -> Option<Version> {\n        self.config::<RequestOptions>()\n            .and_then(|opts| opts.version)\n    }\n\n    /// Get a mutable reference to the http version.\n    #[inline]\n    pub fn version_mut(&mut self) -> &mut Option<Version> {\n        &mut self\n            .config_mut::<RequestOptions>()\n            .get_or_insert_default()\n            .version\n    }\n\n    /// Returns a reference to the associated extensions.\n    ///\n    /// # Examples\n    ///\n    /// ```\n    /// # use wreq;\n    /// let request = wreq::get(\"http://httpbin.org/get\")\n    ///     .build()\n    ///     .expect(\"failed to build request\");\n    /// assert!(request.extensions().get::<i32>().is_none());\n    /// ```\n    #[inline]\n    pub fn extensions(&self) -> &Extensions {\n        self.0.extensions()\n    }\n\n    /// Returns a mutable reference to the associated extensions.\n    ///\n    /// # Examples\n    ///\n    /// ```\n    /// # use wreq;\n    /// let mut request = wreq::get(\"http://httpbin.org/get\")\n    ///     .build()\n    ///     .expect(\"failed to build request\");\n    /// request.extensions_mut().insert(\"hello\");\n    /// assert_eq!(request.extensions().get(), Some(&\"hello\"));\n    /// ```\n    #[inline]\n    pub fn extensions_mut(&mut self) -> &mut Extensions {\n        self.0.extensions_mut()\n    }\n\n    /// Attempt to clone the request.\n    ///\n    /// `None` is returned if the request can not be cloned, i.e. if the body is a stream.\n    pub fn try_clone(&self) -> Option<Request> {\n        let body = match self.body() {\n            Some(body) => Some(body.try_clone()?),\n            None => None,\n        };\n        let mut req = Request::new(self.method().clone(), self.uri().clone());\n        *req.headers_mut() = self.headers().clone();\n        *req.version_mut() = self.version();\n        *req.extensions_mut() = self.extensions().clone();\n        *req.body_mut() = body;\n        Some(req)\n    }\n\n    #[inline]\n    pub(crate) fn config<T>(&self) -> Option<&T::Value>\n    where\n        T: RequestConfigValue,\n    {\n        RequestConfig::<T>::get(self.extensions())\n    }\n\n    #[inline]\n    pub(crate) fn config_mut<T>(&mut self) -> &mut Option<T::Value>\n    where\n        T: RequestConfigValue,\n    {\n        RequestConfig::<T>::get_mut(self.extensions_mut())\n    }\n}\n\nimpl RequestBuilder {\n    pub(super) fn new(client: Client, request: crate::Result<Request>) -> RequestBuilder {\n        let mut builder = RequestBuilder { client, request };\n\n        let auth = builder\n            .request\n            .as_mut()\n            .ok()\n            .and_then(|req| extract_authority(req.uri_mut()));\n\n        if let Some((username, password)) = auth {\n            builder.basic_auth(username, password)\n        } else {\n            builder\n        }\n    }\n\n    /// Assemble a builder starting from an existing `Client` and a `Request`.\n    pub fn from_parts(client: Client, request: Request) -> RequestBuilder {\n        RequestBuilder {\n            client,\n            request: crate::Result::Ok(request),\n        }\n    }\n\n    /// Add a `Header` to this Request with ability to define if `header_value` is sensitive.\n    fn header_sensitive<K, V>(mut self, key: K, value: V, sensitive: bool) -> RequestBuilder\n    where\n        HeaderName: TryFrom<K>,\n        <HeaderName as TryFrom<K>>::Error: Into<http::Error>,\n        HeaderValue: TryFrom<V>,\n        <HeaderValue as TryFrom<V>>::Error: Into<http::Error>,\n    {\n        let mut error = None;\n        if let Ok(ref mut req) = self.request {\n            match <HeaderName as TryFrom<K>>::try_from(key) {\n                Ok(key) => match <HeaderValue as TryFrom<V>>::try_from(value) {\n                    Ok(mut value) => {\n                        // We want to potentially make an non-sensitive header\n                        // to be sensitive, not the reverse. So, don't turn off\n                        // a previously sensitive header.\n                        if sensitive {\n                            value.set_sensitive(true);\n                        }\n                        req.headers_mut().append(key, value);\n                    }\n                    Err(e) => error = Some(Error::builder(e.into())),\n                },\n                Err(e) => error = Some(Error::builder(e.into())),\n            };\n        }\n        if let Some(err) = error {\n            self.request = Err(err);\n        }\n        self\n    }\n\n    /// Add a `Header` to this Request.\n    ///\n    /// If the header is already present, the value will be replaced.\n    #[inline]\n    pub fn header<K, V>(self, key: K, value: V) -> RequestBuilder\n    where\n        HeaderName: TryFrom<K>,\n        <HeaderName as TryFrom<K>>::Error: Into<http::Error>,\n        HeaderValue: TryFrom<V>,\n        <HeaderValue as TryFrom<V>>::Error: Into<http::Error>,\n    {\n        self.header_sensitive(key, value, false)\n    }\n\n    /// Add a set of Headers to the existing ones on this Request.\n    ///\n    /// The headers will be merged in to any already set.\n    pub fn headers(mut self, headers: HeaderMap) -> RequestBuilder {\n        if let Ok(ref mut req) = self.request {\n            crate::util::replace_headers(req.headers_mut(), headers);\n        }\n        self\n    }\n\n    /// Set the original headers for this request.\n    pub fn orig_headers(mut self, orig_headers: OrigHeaderMap) -> RequestBuilder {\n        if let Ok(ref mut req) = self.request {\n            req.config_mut::<OrigHeaderMap>().replace(orig_headers);\n        }\n        self\n    }\n\n    /// Enable or disable client default headers for this request.\n    ///\n    /// By default, client default headers are included. Set to `false` to skip them.\n    pub fn default_headers(mut self, enable: bool) -> RequestBuilder {\n        if let Ok(ref mut req) = self.request {\n            req.config_mut::<DefaultHeaders>().replace(enable);\n        }\n        self\n    }\n\n    /// Enable HTTP authentication.\n    ///\n    /// ```rust\n    /// # use wreq::Error;\n    /// #\n    /// # async fn run() -> Result<(), Error> {\n    /// let client = wreq::Client::new();\n    /// let resp = client\n    ///     .get(\"http://httpbin.org/get\")\n    ///     .auth(\"your_token_here\")\n    ///     .send()\n    ///     .await?;\n    /// # Ok(())\n    /// # }\n    /// ```\n    pub fn auth<V>(self, token: V) -> RequestBuilder\n    where\n        HeaderValue: TryFrom<V>,\n        <HeaderValue as TryFrom<V>>::Error: Into<http::Error>,\n    {\n        self.header_sensitive(AUTHORIZATION, token, true)\n    }\n\n    /// Enable HTTP basic authentication.\n    ///\n    /// ```rust\n    /// # use wreq::Error;\n    ///\n    /// # async fn run() -> Result<(), Error> {\n    /// let client = wreq::Client::new();\n    /// let resp = client\n    ///     .delete(\"http://httpbin.org/delete\")\n    ///     .basic_auth(\"admin\", Some(\"good password\"))\n    ///     .send()\n    ///     .await?;\n    /// # Ok(())\n    /// # }\n    /// ```\n    pub fn basic_auth<U, P>(self, username: U, password: Option<P>) -> RequestBuilder\n    where\n        U: fmt::Display,\n        P: fmt::Display,\n    {\n        let header_value = crate::util::basic_auth(username, password);\n        self.header_sensitive(AUTHORIZATION, header_value, true)\n    }\n\n    /// Enable HTTP bearer authentication.\n    ///\n    /// ```rust\n    /// # use wreq::Error;\n    /// #\n    /// # async fn run() -> Result<(), Error> {\n    /// let client = wreq::Client::new();\n    /// let resp = client\n    ///     .get(\"http://httpbin.org/get\")\n    ///     .bearer_auth(\"your_token_here\")\n    ///     .send()\n    ///     .await?;\n    /// # Ok(())\n    /// # }\n    /// ```\n    pub fn bearer_auth<T: fmt::Display>(self, token: T) -> RequestBuilder {\n        let header_value = format!(\"Bearer {token}\");\n        self.header_sensitive(AUTHORIZATION, header_value, true)\n    }\n\n    /// Enables a request timeout.\n    ///\n    /// The timeout is applied from when the request starts connecting until the\n    /// response body has finished. It affects only this request and overrides\n    /// the timeout configured using `ClientBuilder::timeout()`.\n    pub fn timeout(mut self, timeout: Duration) -> RequestBuilder {\n        if let Ok(ref mut req) = self.request {\n            req.config_mut::<TimeoutOptions>()\n                .get_or_insert_default()\n                .total_timeout(timeout);\n        }\n        self\n    }\n\n    /// Enables a read timeout.\n    ///\n    /// The read timeout is applied from when the response body starts being read\n    /// until the response body has finished. It affects only this request and\n    /// overrides the read timeout configured using `ClientBuilder::read_timeout()`.\n    pub fn read_timeout(mut self, timeout: Duration) -> RequestBuilder {\n        if let Ok(ref mut req) = self.request {\n            req.config_mut::<TimeoutOptions>()\n                .get_or_insert_default()\n                .read_timeout(timeout);\n        }\n        self\n    }\n\n    /// Modify the query string of the URI.\n    ///\n    /// Modifies the URI of this request, adding the parameters provided.\n    /// This method appends and does not overwrite. This means that it can\n    /// be called multiple times and that existing query parameters are not\n    /// overwritten if the same key is used. The key will simply show up\n    /// twice in the query string.\n    /// Calling `.query(&[(\"foo\", \"a\"), (\"foo\", \"b\")])` gives `\"foo=a&foo=b\"`.\n    ///\n    /// # Note\n    /// This method does not support serializing a single key-value\n    /// pair. Instead of using `.query((\"key\", \"val\"))`, use a sequence, such\n    /// as `.query(&[(\"key\", \"val\")])`. It's also possible to serialize structs\n    /// and maps into a key-value pair.\n    ///\n    /// # Errors\n    /// This method will fail if the object you provide cannot be serialized\n    /// into a query string.\n    #[cfg(feature = \"query\")]\n    #[cfg_attr(docsrs, doc(cfg(feature = \"query\")))]\n    pub fn query<T: Serialize + ?Sized>(mut self, query: &T) -> RequestBuilder {\n        let mut error = None;\n        if let Ok(ref mut req) = self.request {\n            match serde_html_form::to_string(query) {\n                Ok(serializer) => {\n                    let uri = req.uri_mut();\n                    uri.set_query(serializer);\n                }\n                Err(err) => error = Some(Error::builder(err)),\n            }\n        }\n        if let Some(err) = error {\n            self.request = Err(err);\n        }\n        self\n    }\n\n    /// Send a form body.\n    ///\n    /// Sets the body to the uri encoded serialization of the passed value,\n    /// and also sets the `Content-Type: application/x-www-form-urlencoded`\n    /// header.\n    ///\n    /// ```rust\n    /// # use wreq::Error;\n    /// # use std::collections::HashMap;\n    /// #\n    /// # async fn run() -> Result<(), Error> {\n    /// let mut params = HashMap::new();\n    /// params.insert(\"lang\", \"rust\");\n    ///\n    /// let client = wreq::Client::new();\n    /// let res = client\n    ///     .post(\"http://httpbin.org\")\n    ///     .form(&params)\n    ///     .send()\n    ///     .await?;\n    /// # Ok(())\n    /// # }\n    /// ```\n    ///\n    /// # Errors\n    ///\n    /// This method fails if the passed value cannot be serialized into\n    /// uri encoded format\n    #[cfg(feature = \"form\")]\n    #[cfg_attr(docsrs, doc(cfg(feature = \"form\")))]\n    pub fn form<T: Serialize + ?Sized>(mut self, form: &T) -> RequestBuilder {\n        if let Ok(ref mut req) = self.request {\n            match serde_html_form::to_string(form) {\n                Ok(body) => {\n                    const HEADER_VALUE: HeaderValue =\n                        HeaderValue::from_static(\"application/x-www-form-urlencoded\");\n\n                    req.headers_mut()\n                        .entry(CONTENT_TYPE)\n                        .or_insert(HEADER_VALUE);\n                    req.body_mut().replace(body.into());\n                }\n                Err(err) => self.request = Err(Error::builder(err)),\n            }\n        }\n        self\n    }\n\n    /// Send a JSON body.\n    ///\n    /// # Optional\n    ///\n    /// This requires the optional `json` feature enabled.\n    ///\n    /// # Errors\n    ///\n    /// Serialization can fail if `T`'s implementation of `Serialize` decides to\n    /// fail, or if `T` contains a map with non-string keys.\n    #[cfg(feature = \"json\")]\n    #[cfg_attr(docsrs, doc(cfg(feature = \"json\")))]\n    pub fn json<T: Serialize + ?Sized>(mut self, json: &T) -> RequestBuilder {\n        if let Ok(ref mut req) = self.request {\n            match serde_json::to_vec(json) {\n                Ok(body) => {\n                    const HEADER_VALUE: HeaderValue = HeaderValue::from_static(\"application/json\");\n\n                    req.headers_mut()\n                        .entry(CONTENT_TYPE)\n                        .or_insert(HEADER_VALUE);\n                    req.body_mut().replace(body.into());\n                }\n                Err(err) => self.request = Err(Error::builder(err)),\n            }\n        }\n\n        self\n    }\n\n    /// Set the request body.\n    pub fn body<T: Into<Body>>(mut self, body: T) -> RequestBuilder {\n        if let Ok(ref mut req) = self.request {\n            *req.body_mut() = Some(body.into());\n        }\n        self\n    }\n\n    /// Sends a multipart/form-data body.\n    ///\n    /// ```\n    /// # use wreq::Error;\n    ///\n    /// # async fn run() -> Result<(), Error> {\n    /// let client = wreq::Client::new();\n    /// let form = wreq::multipart::Form::new()\n    ///     .text(\"key3\", \"value3\")\n    ///     .text(\"key4\", \"value4\");\n    ///\n    /// let response = client.post(\"your uri\").multipart(form).send().await?;\n    /// # Ok(())\n    /// # }\n    /// ```\n    #[cfg(feature = \"multipart\")]\n    #[cfg_attr(docsrs, doc(cfg(feature = \"multipart\")))]\n    pub fn multipart(mut self, mut multipart: multipart::Form) -> RequestBuilder {\n        if let Ok(ref mut req) = self.request {\n            match HeaderValue::from_maybe_shared(Bytes::from(format!(\n                \"multipart/form-data; boundary={}\",\n                multipart.boundary()\n            ))) {\n                Ok(content_type) => {\n                    req.headers_mut()\n                        .entry(CONTENT_TYPE)\n                        .or_insert(content_type);\n\n                    if let Some(length) = multipart.compute_length() {\n                        req.headers_mut()\n                            .entry(CONTENT_LENGTH)\n                            .or_insert_with(|| HeaderValue::from(length));\n                    }\n\n                    *req.body_mut() = Some(multipart.stream())\n                }\n                Err(err) => {\n                    self.request = Err(Error::builder(err));\n                }\n            };\n        }\n\n        self\n    }\n\n    /// Set HTTP version\n    pub fn version(mut self, version: Version) -> RequestBuilder {\n        if let Ok(ref mut req) = self.request {\n            req.version_mut().replace(version);\n            req.config_mut::<RequestOptions>()\n                .get_or_insert_default()\n                .version = Some(version);\n        }\n        self\n    }\n\n    /// Set the redirect policy for this request.\n    pub fn redirect(mut self, policy: redirect::Policy) -> RequestBuilder {\n        if let Ok(ref mut req) = self.request {\n            req.config_mut::<redirect::Policy>().replace(policy);\n        }\n        self\n    }\n\n    /// Set the persistent cookie store for the request.\n    #[cfg(feature = \"cookies\")]\n    #[cfg_attr(docsrs, doc(cfg(feature = \"cookies\")))]\n    pub fn cookie_provider<C: IntoCookieStore>(mut self, cookie_store: C) -> RequestBuilder {\n        if let Ok(ref mut req) = self.request {\n            req.config_mut::<Arc<dyn CookieStore>>()\n                .replace(cookie_store.into_shared());\n        }\n        self\n    }\n\n    /// Sets if this request will announce that it accepts gzip encoding.\n    #[cfg(feature = \"gzip\")]\n    #[cfg_attr(docsrs, doc(cfg(feature = \"gzip\")))]\n    pub fn gzip(mut self, gzip: bool) -> RequestBuilder {\n        if let Ok(ref mut req) = self.request {\n            req.config_mut::<AcceptEncoding>()\n                .get_or_insert_default()\n                .gzip = gzip;\n        }\n        self\n    }\n\n    /// Sets if this request will announce that it accepts brotli encoding.\n    #[cfg(feature = \"brotli\")]\n    #[cfg_attr(docsrs, doc(cfg(feature = \"brotli\")))]\n    pub fn brotli(mut self, brotli: bool) -> RequestBuilder {\n        if let Ok(ref mut req) = self.request {\n            req.config_mut::<AcceptEncoding>()\n                .get_or_insert_default()\n                .brotli = brotli;\n        }\n        self\n    }\n\n    /// Sets if this request will announce that it accepts deflate encoding.\n    #[cfg(feature = \"deflate\")]\n    #[cfg_attr(docsrs, doc(cfg(feature = \"deflate\")))]\n    pub fn deflate(mut self, deflate: bool) -> RequestBuilder {\n        if let Ok(ref mut req) = self.request {\n            req.config_mut::<AcceptEncoding>()\n                .get_or_insert_default()\n                .deflate = deflate;\n        }\n        self\n    }\n\n    /// Sets if this request will announce that it accepts zstd encoding.\n    #[cfg(feature = \"zstd\")]\n    #[cfg_attr(docsrs, doc(cfg(feature = \"zstd\")))]\n    pub fn zstd(mut self, zstd: bool) -> RequestBuilder {\n        if let Ok(ref mut req) = self.request {\n            req.config_mut::<AcceptEncoding>()\n                .get_or_insert_default()\n                .zstd = zstd;\n        }\n        self\n    }\n\n    /// Set the proxy for this request.\n    pub fn proxy(mut self, proxy: Proxy) -> RequestBuilder {\n        if let Ok(ref mut req) = self.request {\n            req.config_mut::<RequestOptions>()\n                .get_or_insert_default()\n                .proxy = Some(proxy.into_matcher());\n        }\n        self\n    }\n\n    /// Set the local address for this request.\n    pub fn local_address<V>(mut self, local_address: V) -> RequestBuilder\n    where\n        V: Into<Option<IpAddr>>,\n    {\n        if let Ok(ref mut req) = self.request {\n            req.config_mut::<RequestOptions>()\n                .get_or_insert_default()\n                .socket_bind_options\n                .get_or_insert_default()\n                .set_local_address(local_address);\n        }\n        self\n    }\n\n    /// Set the local addresses for this request.\n    pub fn local_addresses<V4, V6>(mut self, ipv4_address: V4, ipv6_address: V6) -> RequestBuilder\n    where\n        V4: Into<Option<Ipv4Addr>>,\n        V6: Into<Option<Ipv6Addr>>,\n    {\n        if let Ok(ref mut req) = self.request {\n            req.config_mut::<RequestOptions>()\n                .get_or_insert_default()\n                .socket_bind_options\n                .get_or_insert_default()\n                .set_local_addresses(ipv4_address, ipv6_address);\n        }\n        self\n    }\n\n    /// Bind connections only on the specified network interface.\n    ///\n    /// This option is only available on the following operating systems:\n    ///\n    /// - Android\n    /// - Fuchsia\n    /// - Linux,\n    /// - macOS and macOS-like systems (iOS, tvOS, watchOS and visionOS)\n    /// - Solaris and illumos\n    ///\n    /// On Android, Linux, and Fuchsia, this uses the\n    /// [`SO_BINDTODEVICE`][man-7-socket] socket option. On macOS and macOS-like\n    /// systems, Solaris, and illumos, this instead uses the [`IP_BOUND_IF` and\n    /// `IPV6_BOUND_IF`][man-7p-ip] socket options (as appropriate).\n    ///\n    /// Note that connections will fail if the provided interface name is not a\n    /// network interface that currently exists when a connection is established.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # fn doc() -> Result<(), wreq::Error> {\n    /// let interface = \"lo\";\n    /// let client = wreq::Client::builder()\n    ///     .interface(interface)\n    ///     .build()?;\n    /// # Ok(())\n    /// # }\n    /// ```\n    ///\n    /// [man-7-socket]: https://man7.org/linux/man-pages/man7/socket.7.html\n    /// [man-7p-ip]: https://docs.oracle.com/cd/E86824_01/html/E54777/ip-7p.html\n    #[cfg(any(\n        target_os = \"android\",\n        target_os = \"fuchsia\",\n        target_os = \"illumos\",\n        target_os = \"ios\",\n        target_os = \"linux\",\n        target_os = \"macos\",\n        target_os = \"solaris\",\n        target_os = \"tvos\",\n        target_os = \"visionos\",\n        target_os = \"watchos\",\n    ))]\n    #[cfg_attr(\n        docsrs,\n        doc(cfg(any(\n            target_os = \"android\",\n            target_os = \"fuchsia\",\n            target_os = \"illumos\",\n            target_os = \"ios\",\n            target_os = \"linux\",\n            target_os = \"macos\",\n            target_os = \"solaris\",\n            target_os = \"tvos\",\n            target_os = \"visionos\",\n            target_os = \"watchos\",\n        )))\n    )]\n    pub fn interface<I>(mut self, interface: I) -> RequestBuilder\n    where\n        I: Into<std::borrow::Cow<'static, str>>,\n    {\n        if let Ok(ref mut req) = self.request {\n            req.config_mut::<RequestOptions>()\n                .get_or_insert_default()\n                .socket_bind_options\n                .get_or_insert_default()\n                .set_interface(interface);\n        }\n        self\n    }\n\n    /// Sets the request builder to emulation the specified HTTP context.\n    ///\n    /// This method sets the necessary headers, HTTP/1 and HTTP/2 options configurations, and  TLS\n    /// options config to use the specified HTTP context. It allows the client to mimic the\n    /// behavior of different versions or setups, which can be useful for testing or ensuring\n    /// compatibility with various environments.\n    ///\n    /// # Note\n    /// This will overwrite the existing configuration.\n    /// You must set emulation before you can perform subsequent HTTP1/HTTP2/TLS fine-tuning.\n    pub fn emulation<T: IntoEmulation>(mut self, emulation: T) -> RequestBuilder {\n        if let Ok(ref mut req) = self.request {\n            let emulation = emulation.into_emulation();\n            let opts = req.config_mut::<RequestOptions>().get_or_insert_default();\n            opts.group.emulate(emulation.group);\n            opts.tls_options = emulation.tls_options;\n            opts.http1_options = emulation.http1_options;\n            opts.http2_options = emulation.http2_options;\n            return self\n                .headers(emulation.headers)\n                .orig_headers(emulation.orig_headers);\n        }\n\n        self\n    }\n\n    /// Assigns a logical group to this request.\n    ///\n    /// Groups define the request's identity and execution context.\n    /// Requests in different groups are logically partitioned to ensure\n    /// resource isolation and prevent metadata leakage.\n    pub fn group(mut self, group: Group) -> RequestBuilder {\n        if let Ok(ref mut req) = self.request {\n            req.config_mut::<RequestOptions>()\n                .get_or_insert_default()\n                .group\n                .request(group);\n        }\n        self\n    }\n\n    /// Build a `Request`, which can be inspected, modified and executed with\n    /// [`Client::execute()`].\n    #[inline]\n    pub fn build(self) -> crate::Result<Request> {\n        self.request\n    }\n\n    /// Build a `Request`, which can be inspected, modified and executed with\n    /// [`Client::execute()`].\n    ///\n    /// This is similar to [`RequestBuilder::build()`], but also returns the\n    /// embedded [`Client`].\n    #[inline]\n    pub fn build_split(self) -> (Client, crate::Result<Request>) {\n        (self.client, self.request)\n    }\n\n    /// Constructs the Request and sends it to the target URI, returning a\n    /// future Response.\n    ///\n    /// # Errors\n    ///\n    /// This method fails if there was an error while sending request,\n    /// redirect loop was detected or redirect limit was exhausted.\n    ///\n    /// # Example\n    ///\n    /// ```no_run\n    /// # use wreq::Error;\n    /// #\n    /// # async fn run() -> Result<(), Error> {\n    /// let response = wreq::Client::new().get(\"https://hyper.rs\").send().await?;\n    /// # Ok(())\n    /// # }\n    /// ```\n    pub fn send(self) -> impl Future<Output = crate::Result<Response>> {\n        match self.request {\n            Ok(req) => self.client.execute(req),\n            Err(err) => Pending::Error { error: Some(err) },\n        }\n    }\n\n    /// Attempt to clone the RequestBuilder.\n    ///\n    /// `None` is returned if the RequestBuilder can not be cloned,\n    /// i.e. if the request body is a stream.\n    ///\n    /// # Examples\n    ///\n    /// ```\n    /// # use wreq::Error;\n    /// #\n    /// # fn run() -> Result<(), Error> {\n    /// let client = wreq::Client::new();\n    /// let builder = client.post(\"http://httpbin.org/post\").body(\"from a &str!\");\n    /// let clone = builder.try_clone();\n    /// assert!(clone.is_some());\n    /// # Ok(())\n    /// # }\n    /// ```\n    pub fn try_clone(&self) -> Option<RequestBuilder> {\n        self.request\n            .as_ref()\n            .ok()\n            .and_then(|req| req.try_clone())\n            .map(|req| RequestBuilder {\n                client: self.client.clone(),\n                request: Ok(req),\n            })\n    }\n}\n\n/// Check the request URI for a \"username:password\" type authority, and if\n/// found, remove it from the URI and return it.\nfn extract_authority(uri: &mut Uri) -> Option<(String, Option<String>)> {\n    use percent_encoding::percent_decode;\n\n    let (username, password) = uri.userinfo();\n\n    let username: String = percent_decode(username?.as_bytes())\n        .decode_utf8()\n        .ok()?\n        .into();\n    let password = password.and_then(|pass| {\n        percent_decode(pass.as_bytes())\n            .decode_utf8()\n            .ok()\n            .map(String::from)\n    });\n\n    if !username.is_empty() || password.is_some() {\n        uri.set_userinfo(\"\", None);\n        return Some((username, password));\n    }\n\n    None\n}\n\nimpl<T: Into<Body>> From<http::Request<T>> for Request {\n    #[inline]\n    fn from(req: http::Request<T>) -> Request {\n        Request(req.map(Into::into).map(Some))\n    }\n}\n\nimpl From<Request> for http::Request<Body> {\n    #[inline]\n    fn from(req: Request) -> http::Request<Body> {\n        req.0.map(|body| body.unwrap_or_else(Body::empty))\n    }\n}\n"
  },
  {
    "path": "src/client/response.rs",
    "content": "use std::{\n    net::SocketAddr,\n    pin::Pin,\n    task::{Context, Poll},\n};\n\nuse bytes::Bytes;\n#[cfg(feature = \"charset\")]\nuse encoding_rs::{Encoding, UTF_8};\n#[cfg(feature = \"stream\")]\nuse futures_util::Stream;\nuse http::{HeaderMap, StatusCode, Uri, Version};\nuse http_body::{Body as HttpBody, Frame};\nuse http_body_util::{BodyExt, Collected};\n#[cfg(feature = \"charset\")]\nuse mime::Mime;\n#[cfg(feature = \"json\")]\nuse serde::de::DeserializeOwned;\n\nuse super::{\n    conn::HttpInfo,\n    core::{http1::ext::ReasonPhrase, upgrade},\n};\n#[cfg(feature = \"cookies\")]\nuse crate::cookie;\nuse crate::{Body, Error, Upgraded, client::Connected, error::BoxError, ext::RequestUri};\n\n/// A Response to a submitted [`crate::Request`].\n#[derive(Debug)]\npub struct Response {\n    uri: Uri,\n    res: http::Response<Body>,\n}\n\nimpl Response {\n    #[inline]\n    pub(super) fn new<B>(mut res: http::Response<B>, uri: Uri) -> Response\n    where\n        B: HttpBody + Send + Sync + 'static,\n        B::Data: Into<Bytes>,\n        B::Error: Into<BoxError>,\n    {\n        Response {\n            uri: res\n                .extensions_mut()\n                .remove::<RequestUri>()\n                .map_or(uri, |request_uri| request_uri.0),\n            res: res.map(Body::wrap),\n        }\n    }\n\n    /// Get the final [`Uri`] of this [`Response`].\n    #[inline]\n    pub fn uri(&self) -> &Uri {\n        &self.uri\n    }\n\n    /// Get the [`StatusCode`] of this [`Response`].\n    #[inline]\n    pub fn status(&self) -> StatusCode {\n        self.res.status()\n    }\n\n    /// Get the HTTP [`Version`] of this [`Response`].\n    #[inline]\n    pub fn version(&self) -> Version {\n        self.res.version()\n    }\n\n    /// Get the [`HeaderMap`] of this [`Response`].\n    #[inline]\n    pub fn headers(&self) -> &HeaderMap {\n        self.res.headers()\n    }\n\n    /// Get a mutable reference to the [`HeaderMap`] of this [`Response`].\n    #[inline]\n    pub fn headers_mut(&mut self) -> &mut HeaderMap {\n        self.res.headers_mut()\n    }\n\n    /// Get the content length of the [`Response`], if it is known.\n    ///\n    /// This value does not directly represents the value of the `Content-Length`\n    /// header, but rather the size of the response's body. To read the header's\n    /// value, please use the [`Response::headers`] method instead.\n    ///\n    /// Reasons it may not be known:\n    ///\n    /// - The response does not include a body (e.g. it responds to a `HEAD` request).\n    /// - The response is gzipped and automatically decoded (thus changing the actual decoded\n    ///   length).\n    #[inline]\n    pub fn content_length(&self) -> Option<u64> {\n        HttpBody::size_hint(self.res.body()).exact()\n    }\n\n    /// Retrieve the cookies contained in the [`Response`].\n    ///\n    /// Note that invalid 'Set-Cookie' headers will be ignored.\n    ///\n    /// # Optional\n    ///\n    /// This requires the optional `cookies` feature to be enabled.\n    #[cfg(feature = \"cookies\")]\n    pub fn cookies(&self) -> impl Iterator<Item = cookie::Cookie<'_>> {\n        self.res\n            .headers()\n            .get_all(crate::header::SET_COOKIE)\n            .iter()\n            .map(cookie::Cookie::parse)\n            .filter_map(Result::ok)\n    }\n\n    /// Get the local address used to get this [`Response`].\n    pub fn local_addr(&self) -> Option<SocketAddr> {\n        self.res\n            .extensions()\n            .get::<HttpInfo>()\n            .map(HttpInfo::local_addr)\n    }\n\n    /// Get the remote address used to get this [`Response`].\n    pub fn remote_addr(&self) -> Option<SocketAddr> {\n        self.res\n            .extensions()\n            .get::<HttpInfo>()\n            .map(HttpInfo::remote_addr)\n    }\n\n    // body methods\n\n    /// Get the full response text.\n    ///\n    /// This method decodes the response body with BOM sniffing\n    /// and with malformed sequences replaced with the [`char::REPLACEMENT_CHARACTER`].\n    /// Encoding is determined from the `charset` parameter of `Content-Type` header,\n    /// and defaults to `utf-8` if not presented.\n    ///\n    /// Note that the BOM is stripped from the returned String.\n    ///\n    /// # Note\n    ///\n    /// If the `charset` feature is disabled the method will only attempt to decode the\n    /// response as UTF-8, regardless of the given `Content-Type`\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # async fn run() -> Result<(), Box<dyn std::error::Error>> {\n    /// let content = wreq::Client::new()\n    ///     .get(\"http://httpbin.org/range/26\")\n    ///     .send()\n    ///     .await?\n    ///     .text()\n    ///     .await?;\n    ///\n    /// println!(\"text: {content:?}\");\n    /// # Ok(())\n    /// # }\n    /// ```\n    pub async fn text(self) -> crate::Result<String> {\n        #[cfg(feature = \"charset\")]\n        {\n            self.text_with_charset(\"utf-8\").await\n        }\n\n        #[cfg(not(feature = \"charset\"))]\n        {\n            let full = self.bytes().await?;\n            let text = String::from_utf8_lossy(&full);\n            Ok(text.into_owned())\n        }\n    }\n\n    /// Get the full response text given a specific encoding.\n    ///\n    /// This method decodes the response body with BOM sniffing\n    /// and with malformed sequences replaced with the\n    /// [`char::REPLACEMENT_CHARACTER`].\n    /// You can provide a default encoding for decoding the raw message, while the\n    /// `charset` parameter of `Content-Type` header is still prioritized. For more information\n    /// about the possible encoding name, please go to [`encoding_rs`] docs.\n    ///\n    /// Note that the BOM is stripped from the returned String.\n    ///\n    /// [`encoding_rs`]: https://docs.rs/encoding_rs/0.8/encoding_rs/#relationship-with-windows-code-pages\n    ///\n    /// # Optional\n    ///\n    /// This requires the optional `encoding_rs` feature enabled.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # async fn run() -> Result<(), Box<dyn std::error::Error>> {\n    /// let content = wreq::Client::new()\n    ///     .get(\"http://httpbin.org/range/26\")\n    ///     .send()\n    ///     .await?\n    ///     .text_with_charset(\"utf-8\")\n    ///     .await?;\n    ///\n    /// println!(\"text: {content:?}\");\n    /// # Ok(())\n    /// # }\n    /// ```\n    #[cfg(feature = \"charset\")]\n    #[cfg_attr(docsrs, doc(cfg(feature = \"charset\")))]\n    pub async fn text_with_charset(\n        self,\n        default_encoding: impl AsRef<str>,\n    ) -> crate::Result<String> {\n        let content_type = self\n            .headers()\n            .get(crate::header::CONTENT_TYPE)\n            .and_then(|value| value.to_str().ok())\n            .and_then(|value| value.parse::<Mime>().ok());\n        let encoding_name = content_type\n            .as_ref()\n            .and_then(|mime| mime.get_param(\"charset\").map(|charset| charset.as_str()))\n            .unwrap_or(default_encoding.as_ref());\n        let encoding = Encoding::for_label(encoding_name.as_bytes()).unwrap_or(UTF_8);\n\n        let full = self.bytes().await?;\n        let (text, _, _) = encoding.decode(&full);\n        Ok(text.into_owned())\n    }\n\n    /// Try to deserialize the response body as JSON.\n    ///\n    /// # Optional\n    ///\n    /// This requires the optional `json` feature enabled.\n    ///\n    /// # Examples\n    ///\n    /// ```\n    /// # extern crate wreq;\n    /// # extern crate serde;\n    /// #\n    /// # use wreq::Error;\n    /// # use serde::Deserialize;\n    /// #\n    /// // This `derive` requires the `serde` dependency.\n    /// #[derive(Deserialize)]\n    /// struct Ip {\n    ///     origin: String,\n    /// }\n    ///\n    /// # async fn run() -> Result<(), Error> {\n    /// let ip = wreq::Client::new()\n    ///     .get(\"http://httpbin.org/ip\")\n    ///     .send()\n    ///     .await?\n    ///     .json::<Ip>()\n    ///     .await?;\n    ///\n    /// println!(\"ip: {}\", ip.origin);\n    /// # Ok(())\n    /// # }\n    /// #\n    /// # fn main() { }\n    /// ```\n    ///\n    /// # Errors\n    ///\n    /// This method fails whenever the response body is not in JSON format\n    /// or it cannot be properly deserialized to target type `T`. For more\n    /// details please see [`serde_json::from_reader`].\n    ///\n    /// [`serde_json::from_reader`]: https://docs.serde.rs/serde_json/fn.from_reader.html\n    #[cfg(feature = \"json\")]\n    #[cfg_attr(docsrs, doc(cfg(feature = \"json\")))]\n    pub async fn json<T: DeserializeOwned>(self) -> crate::Result<T> {\n        let full = self.bytes().await?;\n        serde_json::from_slice(&full).map_err(Error::decode)\n    }\n\n    /// Get the full response body as [`Bytes`].\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # async fn run() -> Result<(), Box<dyn std::error::Error>> {\n    /// let bytes = wreq::Client::new()\n    ///     .get(\"http://httpbin.org/ip\")\n    ///     .send()\n    ///     .await?\n    ///     .bytes()\n    ///     .await?;\n    ///\n    /// println!(\"bytes: {bytes:?}\");\n    /// # Ok(())\n    /// # }\n    /// ```\n    #[inline]\n    pub async fn bytes(self) -> crate::Result<Bytes> {\n        BodyExt::collect(self.res.into_body())\n            .await\n            .map(Collected::<Bytes>::to_bytes)\n    }\n\n    /// Convert the response into a [`Stream`] of [`Bytes`] from the body.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// use futures_util::StreamExt;\n    ///\n    /// # async fn run() -> Result<(), Box<dyn std::error::Error>> {\n    /// let mut stream = wreq::Client::new()\n    ///     .get(\"http://httpbin.org/ip\")\n    ///     .send()\n    ///     .await?\n    ///     .bytes_stream();\n    ///\n    /// while let Some(item) = stream.next().await {\n    ///     println!(\"Chunk: {:?}\", item?);\n    /// }\n    /// # Ok(())\n    /// # }\n    /// ```\n    ///\n    /// # Optional\n    ///\n    /// This requires the optional `stream` feature to be enabled.\n    #[inline]\n    #[cfg(feature = \"stream\")]\n    #[cfg_attr(docsrs, doc(cfg(feature = \"stream\")))]\n    pub fn bytes_stream(self) -> impl Stream<Item = crate::Result<Bytes>> {\n        http_body_util::BodyDataStream::new(self.res.into_body())\n    }\n\n    // extension methods\n\n    /// Returns a reference to the associated extensions.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # use wreq::{Client, tls::TlsInfo};\n    /// # async fn run() -> wreq::Result<()> {\n    /// // Build a client that records TLS information.\n    /// let client = Client::builder()\n    ///     .tls_info(true)\n    ///     .build()?;\n    ///\n    /// // Make a request.\n    /// let resp = client.get(\"https://www.google.com\").send().await?;\n    ///\n    /// // Take the TlsInfo extension to inspect it.\n    /// if let Some(tls_info) = resp.extensions().get::<TlsInfo>() {\n    ///     // Now you own the TlsInfo and can process it.\n    ///     println!(\"Peer certificate: {:?}\", tls_info.peer_certificate());\n    /// }\n    ///\n    /// # Ok(())\n    /// # }\n    /// ```\n    #[inline]\n    pub fn extensions(&self) -> &http::Extensions {\n        self.res.extensions()\n    }\n\n    /// Returns a mutable reference to the associated extensions.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # use wreq::{Client, tls::TlsInfo};\n    /// # async fn run() -> wreq::Result<()> {\n    /// // Build a client that records TLS information.\n    /// let client = Client::builder()\n    ///     .tls_info(true)\n    ///     .build()?;\n    ///\n    /// // Make a request.\n    /// let mut resp = client.get(\"https://www.google.com\").send().await?;\n    ///\n    /// // Take the TlsInfo extension to inspect it.\n    /// if let Some(tls_info) = resp.extensions_mut().remove::<TlsInfo>() {\n    ///     // Now you own the TlsInfo and can process it.\n    ///     println!(\"Peer certificate: {:?}\", tls_info.peer_certificate());\n    /// }\n    ///\n    /// # Ok(())\n    /// # }\n    /// ```\n    #[inline]\n    pub fn extensions_mut(&mut self) -> &mut http::Extensions {\n        self.res.extensions_mut()\n    }\n\n    /// Forbids the [`Response`] connection from being recycled back into the pool.\n    ///\n    /// This marks the underlying connection as \"poisoned.\" Once marked, the connection\n    /// will be discarded instead of reused after the current request-response cycle completes.\n    ///\n    /// # Note on Lifecycle\n    /// Marking the connection does not trigger an immediate shutdown. For pooled\n    /// connections, the physical closure is deferred until the `Response` body\n    /// is dropped or the pool's background cleaner reclaims the resource.\n    #[inline]\n    pub fn forbid_recycle(&self) {\n        self.res\n            .extensions()\n            .get::<Connected>()\n            .map(Connected::poison);\n    }\n\n    // util methods\n\n    /// Turn a response into an error if the server returned an error.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # use wreq::Response;\n    /// fn on_response(res: Response) {\n    ///     match res.error_for_status() {\n    ///         Ok(_res) => (),\n    ///         Err(err) => {\n    ///             // asserting a 400 as an example\n    ///             // it could be any status between 400...599\n    ///             assert_eq!(err.status(), Some(wreq::StatusCode::BAD_REQUEST));\n    ///         }\n    ///     }\n    /// }\n    /// # fn main() {}\n    /// ```\n    pub fn error_for_status(mut self) -> crate::Result<Self> {\n        let status = self.status();\n        if status.is_client_error() || status.is_server_error() {\n            let reason = self.res.extensions_mut().remove::<ReasonPhrase>();\n            Err(Error::status_code(self.uri, status, reason))\n        } else {\n            Ok(self)\n        }\n    }\n\n    /// Turn a reference to a response into an error if the server returned an error.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # use wreq::Response;\n    /// fn on_response(res: &Response) {\n    ///     match res.error_for_status_ref() {\n    ///         Ok(_res) => (),\n    ///         Err(err) => {\n    ///             // asserting a 400 as an example\n    ///             // it could be any status between 400...599\n    ///             assert_eq!(err.status(), Some(wreq::StatusCode::BAD_REQUEST));\n    ///         }\n    ///     }\n    /// }\n    /// # fn main() {}\n    /// ```\n    pub fn error_for_status_ref(&self) -> crate::Result<&Self> {\n        let status = self.status();\n        if status.is_client_error() || status.is_server_error() {\n            let reason = self.res.extensions().get::<ReasonPhrase>().cloned();\n            Err(Error::status_code(self.uri.clone(), status, reason))\n        } else {\n            Ok(self)\n        }\n    }\n\n    /// Consumes the [`Response`] and returns a future for a possible HTTP upgrade.\n    #[inline]\n    pub async fn upgrade(self) -> crate::Result<Upgraded> {\n        upgrade::on(self.res).await.map_err(Error::upgrade)\n    }\n}\n\n/// I'm not sure this conversion is that useful... People should be encouraged\n/// to use [`http::Response`], not `wreq::Response`.\nimpl<T: Into<Body>> From<http::Response<T>> for Response {\n    fn from(r: http::Response<T>) -> Response {\n        let mut res = r.map(Into::into);\n        let uri = res\n            .extensions_mut()\n            .remove::<RequestUri>()\n            .unwrap_or_else(|| RequestUri(Uri::from_static(\"http://no.url.provided.local\")));\n        Response { res, uri: uri.0 }\n    }\n}\n\n/// A [`Response`] can be converted into a [`http::Response`].\n// It's supposed to be the inverse of the conversion above.\nimpl From<Response> for http::Response<Body> {\n    fn from(r: Response) -> http::Response<Body> {\n        let mut res = r.res.map(Body::wrap);\n        res.extensions_mut().insert(RequestUri(r.uri));\n        res\n    }\n}\n\n/// A [`Response`] can be piped as the [`Body`] of another request.\nimpl From<Response> for Body {\n    #[inline]\n    fn from(r: Response) -> Body {\n        Body::wrap(r.res.into_body())\n    }\n}\n\n/// A [`Response`] implements [`HttpBody`] to allow streaming the body.\nimpl HttpBody for Response {\n    type Data = Bytes;\n\n    type Error = Error;\n\n    #[inline]\n    fn poll_frame(\n        mut self: Pin<&mut Self>,\n        cx: &mut Context<'_>,\n    ) -> Poll<Option<Result<Frame<Self::Data>, Self::Error>>> {\n        Pin::new(self.res.body_mut()).poll_frame(cx)\n    }\n\n    #[inline]\n    fn is_end_stream(&self) -> bool {\n        self.res.body().is_end_stream()\n    }\n\n    #[inline]\n    fn size_hint(&self) -> http_body::SizeHint {\n        self.res.body().size_hint()\n    }\n}\n"
  },
  {
    "path": "src/client/ws/json.rs",
    "content": "use serde::{Serialize, de::DeserializeOwned};\n\nuse super::{Message, Utf8Bytes};\nuse crate::Error;\n\nimpl Message {\n    /// Tries to serialize the JSON as a [`Message::Text`].\n    ///\n    /// # Optional\n    ///\n    /// This requires the optional `json` feature enabled.\n    ///\n    /// # Errors\n    ///\n    /// Serialization can fail if `T`'s implementation of `Serialize` decides to\n    /// fail, or if `T` contains a map with non-string keys.\n    #[cfg_attr(docsrs, doc(cfg(feature = \"json\")))]\n    pub fn text_from_json<T: Serialize + ?Sized>(json: &T) -> crate::Result<Self> {\n        serde_json::to_string(json)\n            .map(Utf8Bytes::from)\n            .map(Message::Text)\n            .map_err(Error::decode)\n    }\n\n    /// Tries to serialize the JSON as a [`Message::Binary`].\n    ///\n    /// # Optional\n    ///\n    /// This requires that the optional `json` feature is enabled.\n    ///\n    /// # Errors\n    ///\n    /// Serialization can fail if `T`'s implementation of `Serialize` decides to\n    /// fail, or if `T` contains a map with non-string keys.\n    #[cfg_attr(docsrs, doc(cfg(feature = \"json\")))]\n    pub fn binary_from_json<T: Serialize + ?Sized>(json: &T) -> crate::Result<Self> {\n        serde_json::to_vec(json)\n            .map(bytes::Bytes::from)\n            .map(Message::Binary)\n            .map_err(Error::decode)\n    }\n\n    /// Tries to deserialize the message body as JSON.\n    ///\n    /// # Optional\n    ///\n    /// This requires that the optional `json` feature is enabled.\n    ///\n    /// # Errors\n    ///\n    /// This method fails whenever the response body is not in `JSON` format,\n    /// or it cannot be properly deserialized to target type `T`.\n    ///\n    /// For more details please see [`serde_json::from_str`] and\n    /// [`serde_json::from_slice`].\n    #[cfg_attr(docsrs, doc(cfg(feature = \"json\")))]\n    pub fn json<T: DeserializeOwned>(&self) -> crate::Result<T> {\n        use serde::de::Error as _;\n        match self {\n            Self::Text(x) => serde_json::from_str(x).map_err(Error::decode),\n            Self::Binary(x) => serde_json::from_slice(x).map_err(Error::decode),\n            Self::Ping(_) | Self::Pong(_) | Self::Close { .. } => Err(Error::decode(\n                serde_json::Error::custom(\"neither text nor binary\"),\n            )),\n        }\n    }\n}\n\n#[cfg(test)]\nmod test {\n    use serde::{Deserialize, Serialize};\n\n    use super::Message;\n\n    #[derive(Default, Serialize, Deserialize)]\n    struct Content {\n        message: String,\n    }\n\n    #[test]\n    pub fn text_json() -> crate::Result<()> {\n        let content = Content::default();\n        let message = Message::text_from_json(&content)?;\n        assert!(matches!(message, Message::Text(_)));\n        let _: Content = message.json()?;\n\n        Ok(())\n    }\n\n    #[test]\n    pub fn binary_json() -> crate::Result<()> {\n        let content = Content::default();\n        let message = Message::binary_from_json(&content)?;\n        assert!(matches!(message, Message::Binary(_)));\n        let _: Content = message.json()?;\n\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "src/client/ws/message.rs",
    "content": "//! WebSocket message types and utilities\n//!\n//! This module provides WebSocket message types that wrap the underlying\n//! tungstenite message implementation, offering a more ergonomic API\n//! for working with WebSocket communications.\n\nuse std::{fmt, ops::Deref};\n\nuse bytes::Bytes;\n\nuse super::tungstenite;\nuse crate::Error;\n\n/// UTF-8 wrapper for [Bytes].\n///\n/// An [Utf8Bytes] is always guaranteed to contain valid UTF-8.\n#[derive(Debug, Clone, PartialEq, Eq, Default)]\npub struct Utf8Bytes(pub(super) tungstenite::Utf8Bytes);\n\nimpl Utf8Bytes {\n    /// Creates from a static str.\n    #[inline]\n    pub const fn from_static(str: &'static str) -> Self {\n        Self(tungstenite::Utf8Bytes::from_static(str))\n    }\n\n    /// Returns as a string slice.\n    #[inline]\n    pub fn as_str(&self) -> &str {\n        self.0.as_str()\n    }\n}\n\nimpl Deref for Utf8Bytes {\n    type Target = str;\n\n    /// ```\n    /// /// Example fn that takes a str slice\n    /// fn a(s: &str) {}\n    ///\n    /// let data = wreq::Utf8Bytes::from_static(\"foo123\");\n    ///\n    /// // auto-deref as arg\n    /// a(&data);\n    ///\n    /// // deref to str methods\n    /// assert_eq!(data.len(), 6);\n    /// ```\n    #[inline]\n    fn deref(&self) -> &Self::Target {\n        self.as_str()\n    }\n}\n\nimpl fmt::Display for Utf8Bytes {\n    #[inline]\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> fmt::Result {\n        f.write_str(self.as_str())\n    }\n}\n\nimpl TryFrom<Bytes> for Utf8Bytes {\n    type Error = std::str::Utf8Error;\n\n    #[inline]\n    fn try_from(bytes: Bytes) -> Result<Self, Self::Error> {\n        Ok(Self(bytes.try_into()?))\n    }\n}\n\nimpl TryFrom<Vec<u8>> for Utf8Bytes {\n    type Error = std::str::Utf8Error;\n\n    #[inline]\n    fn try_from(v: Vec<u8>) -> Result<Self, Self::Error> {\n        Ok(Self(v.try_into()?))\n    }\n}\n\nimpl From<String> for Utf8Bytes {\n    #[inline]\n    fn from(s: String) -> Self {\n        Self(s.into())\n    }\n}\n\nimpl From<&str> for Utf8Bytes {\n    #[inline]\n    fn from(s: &str) -> Self {\n        Self(s.into())\n    }\n}\n\nimpl From<&String> for Utf8Bytes {\n    #[inline]\n    fn from(s: &String) -> Self {\n        Self(s.into())\n    }\n}\n\nimpl From<Utf8Bytes> for Bytes {\n    #[inline]\n    fn from(Utf8Bytes(bytes): Utf8Bytes) -> Self {\n        bytes.into()\n    }\n}\n\nimpl<T> PartialEq<T> for Utf8Bytes\nwhere\n    for<'a> &'a str: PartialEq<T>,\n{\n    /// ```\n    /// let payload = wreq::Utf8Bytes::from_static(\"foo123\");\n    /// assert_eq!(payload, \"foo123\");\n    /// assert_eq!(payload, \"foo123\".to_string());\n    /// assert_eq!(payload, &\"foo123\".to_string());\n    /// assert_eq!(payload, std::borrow::Cow::from(\"foo123\"));\n    /// ```\n    #[inline]\n    fn eq(&self, other: &T) -> bool {\n        self.as_str() == *other\n    }\n}\n\n/// Status code used to indicate why an endpoint is closing the WebSocket connection.\n#[derive(Debug, Clone, Eq, PartialEq)]\npub struct CloseCode(pub(super) u16);\n\nimpl CloseCode {\n    //! Constants for [`CloseCode`]s.\n    //!\n    //! [`CloseCode`]: super::CloseCode\n\n    /// Indicates a normal closure, meaning that the purpose for which the connection was\n    /// established has been fulfilled.\n    pub const NORMAL: CloseCode = CloseCode(1000);\n\n    /// Indicates that an endpoint is \"going away\", such as a server going down or a browser having\n    /// navigated away from a page.\n    pub const AWAY: CloseCode = CloseCode(1001);\n\n    /// Indicates that an endpoint is terminating the connection due to a protocol error.\n    pub const PROTOCOL: CloseCode = CloseCode(1002);\n\n    /// Indicates that an endpoint is terminating the connection because it has received a type of\n    /// data that it cannot accept.\n    ///\n    /// For example, an endpoint MAY send this if it understands only text data, but receives a\n    /// binary message.\n    pub const UNSUPPORTED: CloseCode = CloseCode(1003);\n\n    /// Indicates that no status code was included in a closing frame.\n    pub const STATUS: CloseCode = CloseCode(1005);\n\n    /// Indicates an abnormal closure.\n    pub const ABNORMAL: CloseCode = CloseCode(1006);\n\n    /// Indicates that an endpoint is terminating the connection because it has received data\n    /// within a message that was not consistent with the type of the message.\n    ///\n    /// For example, an endpoint received non-UTF-8 RFC3629 data within a text message.\n    pub const INVALID: CloseCode = CloseCode(1007);\n\n    /// Indicates that an endpoint is terminating the connection because it has received a message\n    /// that violates its policy.\n    ///\n    /// This is a generic status code that can be returned when there is\n    /// no other more suitable status code (e.g., `UNSUPPORTED` or `SIZE`) or if there is a need to\n    /// hide specific details about the policy.\n    pub const POLICY: CloseCode = CloseCode(1008);\n\n    /// Indicates that an endpoint is terminating the connection because it has received a message\n    /// that is too big for it to process.\n    pub const SIZE: CloseCode = CloseCode(1009);\n\n    /// Indicates that an endpoint (client) is terminating the connection because the server\n    /// did not respond to extension negotiation correctly.\n    ///\n    /// Specifically, the client has expected the server to negotiate one or more extension(s),\n    /// but the server didn't return them in the response message of the WebSocket handshake.\n    /// The list of extensions that are needed should be given as the reason for closing.\n    /// Note that this status code is not used by the server,\n    /// because it can fail the WebSocket handshake instead.\n    pub const EXTENSION: CloseCode = CloseCode(1010);\n\n    /// Indicates that a server is terminating the connection because it encountered an unexpected\n    /// condition that prevented it from fulfilling the request.\n    pub const ERROR: CloseCode = CloseCode(1011);\n\n    /// Indicates that the server is restarting.\n    pub const RESTART: CloseCode = CloseCode(1012);\n\n    /// Indicates that the server is overloaded and the client should either connect to a different\n    /// IP (when multiple targets exist), or reconnect to the same IP when a user has performed an\n    /// action.\n    pub const AGAIN: CloseCode = CloseCode(1013);\n}\n\nimpl From<CloseCode> for u16 {\n    #[inline]\n    fn from(code: CloseCode) -> u16 {\n        code.0\n    }\n}\n\nimpl From<u16> for CloseCode {\n    #[inline]\n    fn from(code: u16) -> CloseCode {\n        CloseCode(code)\n    }\n}\n\n/// A struct representing the close command.\n#[derive(Debug, Clone, Eq, PartialEq)]\npub struct CloseFrame {\n    /// The reason as a code.\n    pub code: CloseCode,\n    /// The reason as text string.\n    pub reason: Utf8Bytes,\n}\n\n/// A WebSocket message.\n//\n// This code comes from https://github.com/snapview/tungstenite-rs/blob/master/src/protocol/message.rs and is under following license:\n// Copyright (c) 2017 Alexey Galakhov\n// Copyright (c) 2016 Jason Housley\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n#[derive(Debug, Eq, PartialEq, Clone)]\npub enum Message {\n    /// A text WebSocket message\n    Text(Utf8Bytes),\n    /// A binary WebSocket message\n    Binary(Bytes),\n    /// A ping message with the specified payload\n    ///\n    /// The payload here must have a length less than 125 bytes.\n    ///\n    /// Ping messages will be automatically responded to by the server, so you do not have to worry\n    /// about dealing with them yourself.\n    Ping(Bytes),\n    /// A pong message with the specified payload\n    ///\n    /// The payload here must have a length less than 125 bytes.\n    ///\n    /// Pong messages will be automatically sent to the client if a ping message is received, so\n    /// you do not have to worry about constructing them yourself unless you want to implement a\n    /// [unidirectional heartbeat](https://tools.ietf.org/html/rfc6455#section-5.5.3).\n    Pong(Bytes),\n    /// A close message with the optional close frame.\n    ///\n    /// You may \"uncleanly\" close a WebSocket connection at any time\n    /// by simply dropping the [`super::WebSocket`].\n    /// However, you may also use the graceful closing protocol, in which\n    /// 1. peer A sends a close frame, and does not send any further messages;\n    /// 2. peer B responds with a close frame, and does not send any further messages;\n    /// 3. peer A processes the remaining messages sent by peer B, before finally\n    /// 4. both peers close the connection.\n    ///\n    /// After sending a close frame,\n    /// you may still read messages,\n    /// but attempts to send another message will error.\n    /// After receiving a close frame,\n    /// wreq will automatically respond with a close frame if necessary\n    /// (you do not have to deal with this yourself).\n    /// Since no further messages will be received,\n    /// you may either do nothing\n    /// or explicitly drop the connection.\n    Close(Option<CloseFrame>),\n}\n\nimpl Message {\n    /// Converts this `Message` into a `tungstenite::Message`.\n    ///\n    /// This method transforms the current `Message` instance into its corresponding\n    /// `tungstenite::Message` representation. This is useful when you need to work\n    /// with the `tungstenite` library directly.\n    ///\n    /// # Returns\n    ///\n    /// A `tungstenite::Message` instance that represents the current `Message`.\n    pub(super) fn into_tungstenite(self) -> tungstenite::Message {\n        match self {\n            Self::Text(text) => tungstenite::Message::Text(text.0),\n            Self::Binary(binary) => tungstenite::Message::Binary(binary),\n            Self::Ping(ping) => tungstenite::Message::Ping(ping),\n            Self::Pong(pong) => tungstenite::Message::Pong(pong),\n            Self::Close(Some(close)) => {\n                tungstenite::Message::Close(Some(tungstenite::protocol::CloseFrame {\n                    code: tungstenite::protocol::frame::coding::CloseCode::from(close.code.0),\n                    reason: close.reason.0,\n                }))\n            }\n            Self::Close(None) => tungstenite::Message::Close(None),\n        }\n    }\n\n    /// Converts a `tungstenite::Message` into an `Option<Message>`.\n    ///\n    /// This method transforms a given `tungstenite::Message` into its corresponding\n    /// `Message` representation. This is useful when you need to convert messages\n    /// received from the `tungstenite` library into the `Message` type used by this\n    /// library.\n    ///\n    /// # Arguments\n    ///\n    /// * `message` - The `tungstenite::Message` to convert.\n    ///\n    /// # Returns\n    ///\n    /// An `Option<Message>` instance that represents the given `tungstenite::Message`.\n    /// Returns `None` if the message is a `Frame` frame, as recommended by the\n    /// `tungstenite` maintainers.\n    pub(super) fn from_tungstenite(message: tungstenite::Message) -> Option<Self> {\n        match message {\n            tungstenite::Message::Text(text) => Some(Self::Text(Utf8Bytes(text))),\n            tungstenite::Message::Binary(binary) => Some(Self::Binary(binary)),\n            tungstenite::Message::Ping(ping) => Some(Self::Ping(ping)),\n            tungstenite::Message::Pong(pong) => Some(Self::Pong(pong)),\n            tungstenite::Message::Close(Some(close)) => Some(Self::Close(Some(CloseFrame {\n                code: CloseCode(close.code.into()),\n                reason: Utf8Bytes(close.reason),\n            }))),\n            tungstenite::Message::Close(None) => Some(Self::Close(None)),\n            // we can ignore `Frame` frames as recommended by the tungstenite maintainers\n            // https://github.com/snapview/tungstenite-rs/issues/268\n            tungstenite::Message::Frame(_) => None,\n        }\n    }\n\n    /// Consume the WebSocket and return it as binary data.\n    pub fn into_data(self) -> Bytes {\n        match self {\n            Self::Text(string) => Bytes::from(string),\n            Self::Binary(data) | Self::Ping(data) | Self::Pong(data) => data,\n            Self::Close(None) => Bytes::new(),\n            Self::Close(Some(frame)) => Bytes::from(frame.reason),\n        }\n    }\n\n    /// Attempt to consume the WebSocket message and convert it to a Utf8Bytes.\n    pub fn into_text(self) -> crate::Result<Utf8Bytes> {\n        match self {\n            Self::Text(string) => Ok(string),\n            Self::Binary(data) | Self::Ping(data) | Self::Pong(data) => {\n                Utf8Bytes::try_from(data).map_err(Error::decode)\n            }\n            Self::Close(None) => Ok(Utf8Bytes::default()),\n            Self::Close(Some(frame)) => Ok(frame.reason),\n        }\n    }\n\n    /// Attempt to get a &str from the WebSocket message,\n    /// this will try to convert binary data to utf8.\n    pub fn to_text(&self) -> crate::Result<&str> {\n        match *self {\n            Self::Text(ref string) => Ok(string.as_str()),\n            Self::Binary(ref data) | Self::Ping(ref data) | Self::Pong(ref data) => {\n                std::str::from_utf8(data).map_err(Error::decode)\n            }\n            Self::Close(None) => Ok(\"\"),\n            Self::Close(Some(ref frame)) => Ok(&frame.reason),\n        }\n    }\n}\n\nimpl Message {\n    /// Create a new text WebSocket message from a stringable.\n    pub fn text<S>(string: S) -> Message\n    where\n        S: Into<Utf8Bytes>,\n    {\n        Message::Text(string.into())\n    }\n\n    /// Create a new binary WebSocket message by converting to `Bytes`.\n    pub fn binary<B>(bin: B) -> Message\n    where\n        B: Into<Bytes>,\n    {\n        Message::Binary(bin.into())\n    }\n\n    /// Create a new ping WebSocket message by converting to `Bytes`.\n    pub fn ping<B>(bin: B) -> Message\n    where\n        B: Into<Bytes>,\n    {\n        Message::Ping(bin.into())\n    }\n\n    /// Create a new pong WebSocket message by converting to `Bytes`.\n    pub fn pong<B>(bin: B) -> Message\n    where\n        B: Into<Bytes>,\n    {\n        Message::Pong(bin.into())\n    }\n\n    /// Create a new close WebSocket message with an optional close frame.\n    pub fn close<C>(close: C) -> Message\n    where\n        C: Into<Option<CloseFrame>>,\n    {\n        Message::Close(close.into())\n    }\n}\n\nimpl From<String> for Message {\n    fn from(string: String) -> Self {\n        Message::Text(string.into())\n    }\n}\n\nimpl<'s> From<&'s str> for Message {\n    fn from(string: &'s str) -> Self {\n        Message::Text(string.into())\n    }\n}\n\nimpl<'b> From<&'b [u8]> for Message {\n    fn from(data: &'b [u8]) -> Self {\n        Message::Binary(Bytes::copy_from_slice(data))\n    }\n}\n\nimpl From<Vec<u8>> for Message {\n    fn from(data: Vec<u8>) -> Self {\n        Message::Binary(data.into())\n    }\n}\n\nimpl From<Message> for Vec<u8> {\n    fn from(msg: Message) -> Self {\n        msg.into_data().to_vec()\n    }\n}\n"
  },
  {
    "path": "src/client/ws.rs",
    "content": "//! WebSocket Upgrade\n\n#[cfg(feature = \"json\")]\nmod json;\npub mod message;\n\nuse std::{\n    borrow::Cow,\n    fmt,\n    net::{IpAddr, Ipv4Addr, Ipv6Addr},\n    ops::{Deref, DerefMut},\n    pin::Pin,\n    task::{Context, Poll, ready},\n};\n\nuse bytes::Bytes;\nuse futures_util::{Sink, SinkExt, Stream, StreamExt, stream::FusedStream};\nuse http::{\n    HeaderMap, HeaderName, HeaderValue, Method, StatusCode, Uri, Version, header, uri::Scheme,\n};\nuse http2::ext::Protocol;\nuse pin_project_lite::pin_project;\nuse tokio_tungstenite::tungstenite::{\n    self,\n    protocol::{self, CloseFrame, WebSocketConfig},\n};\n\nuse self::message::{CloseCode, Message, Utf8Bytes};\nuse super::{emulate::IntoEmulation, request::RequestBuilder, response::Response};\nuse crate::{Error, Upgraded, header::OrigHeaderMap, proxy::Proxy};\n\n/// A WebSocket stream.\ntype WebSocketStream = tokio_tungstenite::WebSocketStream<Upgraded>;\n\n/// Wrapper for [`RequestBuilder`] that performs the\n/// websocket handshake when sent.\npub struct WebSocketRequestBuilder {\n    inner: RequestBuilder,\n    accept_key: Option<Cow<'static, str>>,\n    protocols: Option<Vec<Cow<'static, str>>>,\n    config: WebSocketConfig,\n}\n\nimpl WebSocketRequestBuilder {\n    /// Creates a new WebSocket request builder.\n    pub fn new(inner: RequestBuilder) -> Self {\n        Self {\n            inner: inner.version(Version::HTTP_11),\n            accept_key: None,\n            protocols: None,\n            config: WebSocketConfig::default(),\n        }\n    }\n\n    /// Sets a custom WebSocket accept key.\n    ///\n    /// This method allows you to set a custom WebSocket accept key for the connection.\n    ///\n    /// # Arguments\n    ///\n    /// * `key` - The custom WebSocket accept key to set.\n    ///\n    /// # Returns\n    ///\n    /// * `Self` - The modified instance with the custom WebSocket accept key.\n    #[inline]\n    pub fn accept_key<K>(mut self, key: K) -> Self\n    where\n        K: Into<Cow<'static, str>>,\n    {\n        self.accept_key = Some(key.into());\n        self\n    }\n\n    /// Set HTTP version\n    ///\n    /// Configures the HTTP version used for the WebSocket handshake.\n    /// Defaults to HTTP/1.1.\n    ///\n    /// # HTTP/1.1 (default)\n    ///\n    /// - Uses the standard `Upgrade: websocket` mechanism (RFC 6455)\n    /// - Sends an HTTP `GET` request with `Connection: Upgrade` and `Upgrade: websocket` headers\n    /// - Widely supported by servers\n    ///\n    /// # HTTP/2\n    ///\n    /// - Uses the Extended CONNECT Protocol (RFC 8441)\n    /// - Sends a `CONNECT` request with the `:protocol: websocket` pseudo-header instead of the\n    ///   traditional upgrade mechanism\n    /// - Requires explicit server support for HTTP/2 WebSocket connections\n    /// - Will fail if the server does not support HTTP/2 WebSocket upgrade\n    #[inline]\n    pub fn version(mut self, version: Version) -> Self {\n        self.inner = self.inner.version(version);\n        self\n    }\n\n    /// Sets the websocket subprotocols to request.\n    ///\n    /// This method allows you to specify the subprotocols that the websocket client\n    /// should request during the handshake. Subprotocols are used to define the type\n    /// of communication expected over the websocket connection.\n    #[inline]\n    pub fn protocols<P>(mut self, protocols: P) -> Self\n    where\n        P: IntoIterator,\n        P::Item: Into<Cow<'static, str>>,\n    {\n        let protocols = protocols.into_iter().map(Into::into).collect();\n        self.protocols = Some(protocols);\n        self\n    }\n\n    /// Sets the websocket max_frame_size configuration.\n    #[inline]\n    pub fn max_frame_size(mut self, max_frame_size: usize) -> Self {\n        self.config.max_frame_size = Some(max_frame_size);\n        self\n    }\n\n    /// Sets the websocket read_buffer_size configuration.\n    #[inline]\n    pub fn read_buffer_size(mut self, read_buffer_size: usize) -> Self {\n        self.config.read_buffer_size = read_buffer_size;\n        self\n    }\n\n    /// Sets the websocket write_buffer_size configuration.\n    #[inline]\n    pub fn write_buffer_size(mut self, write_buffer_size: usize) -> Self {\n        self.config.write_buffer_size = write_buffer_size;\n        self\n    }\n\n    /// Sets the websocket max_write_buffer_size configuration.\n    #[inline]\n    pub fn max_write_buffer_size(mut self, max_write_buffer_size: usize) -> Self {\n        self.config.max_write_buffer_size = max_write_buffer_size;\n        self\n    }\n\n    /// Sets the websocket max_message_size configuration.\n    #[inline]\n    pub fn max_message_size(mut self, max_message_size: usize) -> Self {\n        self.config.max_message_size = Some(max_message_size);\n        self\n    }\n\n    /// Sets the websocket accept_unmasked_frames configuration.\n    #[inline]\n    pub fn accept_unmasked_frames(mut self, accept_unmasked_frames: bool) -> Self {\n        self.config.accept_unmasked_frames = accept_unmasked_frames;\n        self\n    }\n\n    /// Add a `Header` to this Request.\n    ///\n    /// If the header is already present, the value will be replaced.\n    #[inline]\n    pub fn header<K, V>(mut self, key: K, value: V) -> Self\n    where\n        HeaderName: TryFrom<K>,\n        <HeaderName as TryFrom<K>>::Error: Into<http::Error>,\n        HeaderValue: TryFrom<V>,\n        <HeaderValue as TryFrom<V>>::Error: Into<http::Error>,\n    {\n        self.inner = self.inner.header(key, value);\n        self\n    }\n\n    /// Add a set of Headers to the existing ones on this Request.\n    ///\n    /// The headers will be merged in to any already set.\n    #[inline]\n    pub fn headers(mut self, headers: HeaderMap) -> Self {\n        self.inner = self.inner.headers(headers);\n        self\n    }\n\n    /// Set the original headers for this request.\n    #[inline]\n    pub fn orig_headers(mut self, orig_headers: OrigHeaderMap) -> Self {\n        self.inner = self.inner.orig_headers(orig_headers);\n        self\n    }\n\n    /// Enable or disable client default headers for this request.\n    ///\n    /// By default, client default headers are included. Set to `false` to skip them.\n    pub fn default_headers(mut self, enable: bool) -> Self {\n        self.inner = self.inner.default_headers(enable);\n        self\n    }\n\n    /// Enable HTTP authentication.\n    ///\n    /// ```rust\n    /// # use wreq::Error;\n    /// #\n    /// # async fn run() -> Result<(), Error> {\n    /// let client = wreq::Client::new();\n    /// let resp = client\n    ///     .websocket(\"http://httpbin.org/get\")\n    ///     .auth(\"your_token_here\")\n    ///     .send()\n    ///     .await?;\n    /// # Ok(())\n    /// # }\n    /// ```\n    #[inline]\n    pub fn auth<V>(mut self, value: V) -> Self\n    where\n        HeaderValue: TryFrom<V>,\n        <HeaderValue as TryFrom<V>>::Error: Into<http::Error>,\n    {\n        self.inner = self.inner.auth(value);\n        self\n    }\n\n    /// Enable HTTP basic authentication.\n    ///\n    /// ```rust\n    /// # use wreq::Error;\n    ///\n    /// # async fn run() -> Result<(), Error> {\n    /// let client = wreq::Client::new();\n    /// let resp = client\n    ///     .websocket(\"http://httpbin.org/delete\")\n    ///     .basic_auth(\"admin\", Some(\"good password\"))\n    ///     .send()\n    ///     .await?;\n    /// # Ok(())\n    /// # }\n    /// ```\n    #[inline]\n    pub fn basic_auth<U, P>(mut self, username: U, password: Option<P>) -> Self\n    where\n        U: fmt::Display,\n        P: fmt::Display,\n    {\n        self.inner = self.inner.basic_auth(username, password);\n        self\n    }\n\n    /// Enable HTTP bearer authentication.\n    ///\n    /// ```rust\n    /// # use wreq::Error;\n    /// #\n    /// # async fn run() -> Result<(), Error> {\n    /// let client = wreq::Client::new();\n    /// let resp = client\n    ///     .websocket(\"http://httpbin.org/get\")\n    ///     .bearer_auth(\"your_token_here\")\n    ///     .send()\n    ///     .await?;\n    /// # Ok(())\n    /// # }\n    /// ```\n    #[inline]\n    pub fn bearer_auth<T>(mut self, token: T) -> Self\n    where\n        T: fmt::Display,\n    {\n        self.inner = self.inner.bearer_auth(token);\n        self\n    }\n\n    /// Modify the query string of the URI.\n    ///\n    /// Modifies the URI of this request, adding the parameters provided.\n    /// This method appends and does not overwrite. This means that it can\n    /// be called multiple times and that existing query parameters are not\n    /// overwritten if the same key is used. The key will simply show up\n    /// twice in the query string.\n    /// Calling `.query(&[(\"foo\", \"a\"), (\"foo\", \"b\")])` gives `\"foo=a&foo=b\"`.\n    ///\n    /// # Note\n    /// This method does not support serializing a single key-value\n    /// pair. Instead of using `.query((\"key\", \"val\"))`, use a sequence, such\n    /// as `.query(&[(\"key\", \"val\")])`. It's also possible to serialize structs\n    /// and maps into a key-value pair.\n    ///\n    /// # Errors\n    /// This method will fail if the object you provide cannot be serialized\n    /// into a query string.\n    #[inline]\n    #[cfg(feature = \"query\")]\n    #[cfg_attr(docsrs, doc(cfg(feature = \"query\")))]\n    pub fn query<T: serde::Serialize + ?Sized>(mut self, query: &T) -> Self {\n        self.inner = self.inner.query(query);\n        self\n    }\n\n    /// Set the proxy for this request.\n    #[inline]\n    pub fn proxy(mut self, proxy: Proxy) -> Self {\n        self.inner = self.inner.proxy(proxy);\n        self\n    }\n\n    /// Set the local address for this request.\n    #[inline]\n    pub fn local_address<V>(mut self, local_address: V) -> Self\n    where\n        V: Into<Option<IpAddr>>,\n    {\n        self.inner = self.inner.local_address(local_address);\n        self\n    }\n\n    /// Set the local addresses for this request.\n    #[inline]\n    pub fn local_addresses<V4, V6>(mut self, ipv4: V4, ipv6: V6) -> Self\n    where\n        V4: Into<Option<Ipv4Addr>>,\n        V6: Into<Option<Ipv6Addr>>,\n    {\n        self.inner = self.inner.local_addresses(ipv4, ipv6);\n        self\n    }\n\n    /// Bind connections only on the specified network interface.\n    ///\n    /// This option is only available on the following operating systems:\n    ///\n    /// - Android\n    /// - Fuchsia\n    /// - Linux,\n    /// - macOS and macOS-like systems (iOS, tvOS, watchOS and visionOS)\n    /// - Solaris and illumos\n    ///\n    /// On Android, Linux, and Fuchsia, this uses the\n    /// [`SO_BINDTODEVICE`][man-7-socket] socket option. On macOS and macOS-like\n    /// systems, Solaris, and illumos, this instead uses the [`IP_BOUND_IF` and\n    /// `IPV6_BOUND_IF`][man-7p-ip] socket options (as appropriate).\n    ///\n    /// Note that connections will fail if the provided interface name is not a\n    /// network interface that currently exists when a connection is established.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # fn doc() -> Result<(), wreq::Error> {\n    /// let interface = \"lo\";\n    /// let client = wreq::Client::builder()\n    ///     .interface(interface)\n    ///     .build()?;\n    /// # Ok(())\n    /// # }\n    /// ```\n    ///\n    /// [man-7-socket]: https://man7.org/linux/man-pages/man7/socket.7.html\n    /// [man-7p-ip]: https://docs.oracle.com/cd/E86824_01/html/E54777/ip-7p.html\n    #[cfg(any(\n        target_os = \"android\",\n        target_os = \"fuchsia\",\n        target_os = \"illumos\",\n        target_os = \"ios\",\n        target_os = \"linux\",\n        target_os = \"macos\",\n        target_os = \"solaris\",\n        target_os = \"tvos\",\n        target_os = \"visionos\",\n        target_os = \"watchos\",\n    ))]\n    #[cfg_attr(\n        docsrs,\n        doc(cfg(any(\n            target_os = \"android\",\n            target_os = \"fuchsia\",\n            target_os = \"illumos\",\n            target_os = \"ios\",\n            target_os = \"linux\",\n            target_os = \"macos\",\n            target_os = \"solaris\",\n            target_os = \"tvos\",\n            target_os = \"visionos\",\n            target_os = \"watchos\",\n        )))\n    )]\n    pub fn interface<I>(mut self, interface: I) -> Self\n    where\n        I: Into<std::borrow::Cow<'static, str>>,\n    {\n        self.inner = self.inner.interface(interface);\n        self\n    }\n\n    /// Sets the request builder to emulation the specified HTTP context.\n    ///\n    /// This method sets the necessary headers, HTTP/1 and HTTP/2 options configurations, and  TLS\n    /// options config to use the specified HTTP context. It allows the client to mimic the\n    /// behavior of different versions or setups, which can be useful for testing or ensuring\n    /// compatibility with various environments.\n    ///\n    /// # Note\n    /// This will overwrite the existing configuration.\n    /// You must set emulation before you can perform subsequent HTTP1/HTTP2/TLS fine-tuning.\n    #[inline]\n    pub fn emulation<T: IntoEmulation>(mut self, emulation: T) -> Self {\n        self.inner = self.inner.emulation(emulation);\n        self\n    }\n\n    /// Sends the request and returns and [`WebSocketResponse`].\n    pub async fn send(self) -> Result<WebSocketResponse, Error> {\n        let (client, request) = self.inner.build_split();\n        let mut request = request?;\n\n        // Ensure the scheme is http or https\n        let uri = request.uri_mut();\n        let scheme = match uri.scheme_str() {\n            Some(\"ws\") => Some(Scheme::HTTP),\n            Some(\"wss\") => Some(Scheme::HTTPS),\n            _ => None,\n        };\n        if scheme.is_some() {\n            let mut parts = uri.clone().into_parts();\n            parts.scheme = scheme;\n            *uri = Uri::from_parts(parts).map_err(Error::builder)?;\n        }\n\n        // Get the version of the request\n        let version = request.version();\n\n        // Set the headers for the websocket handshake\n        let headers = request.headers_mut();\n        headers.insert(\n            header::SEC_WEBSOCKET_VERSION,\n            HeaderValue::from_static(\"13\"),\n        );\n\n        // Ensure the request is HTTP 1.1/HTTP 2\n        let accept_key = match version {\n            Some(Version::HTTP_10 | Version::HTTP_11) => {\n                // Generate a nonce if one wasn't provided\n                let nonce = self\n                    .accept_key\n                    .unwrap_or_else(|| Cow::Owned(tungstenite::handshake::client::generate_key()));\n\n                headers.insert(header::UPGRADE, HeaderValue::from_static(\"websocket\"));\n                headers.insert(header::CONNECTION, HeaderValue::from_static(\"upgrade\"));\n                headers.insert(\n                    header::SEC_WEBSOCKET_KEY,\n                    HeaderValue::from_str(&nonce).map_err(Error::builder)?,\n                );\n\n                *request.method_mut() = Method::GET;\n                *request.version_mut() = Some(Version::HTTP_11);\n                Some(nonce)\n            }\n            Some(Version::HTTP_2) => {\n                *request.method_mut() = Method::CONNECT;\n                *request.version_mut() = Some(Version::HTTP_2);\n                request\n                    .extensions_mut()\n                    .insert(Protocol::from_static(\"websocket\"));\n                None\n            }\n            unsupported => {\n                return Err(Error::upgrade(format!(\n                    \"unsupported version: {unsupported:?}\"\n                )));\n            }\n        };\n\n        // Set websocket subprotocols\n        if let Some(ref protocols) = self.protocols {\n            // Sets subprotocols\n            if !protocols.is_empty() {\n                let subprotocols = protocols\n                    .iter()\n                    .map(|s| s.as_ref())\n                    .collect::<Vec<&str>>()\n                    .join(\", \");\n\n                request.headers_mut().insert(\n                    header::SEC_WEBSOCKET_PROTOCOL,\n                    HeaderValue::from_maybe_shared(Bytes::from(subprotocols))\n                        .map_err(Error::builder)?,\n                );\n            }\n        }\n\n        client\n            .execute(request)\n            .await\n            .map(|inner| WebSocketResponse {\n                inner,\n                accept_key,\n                protocols: self.protocols,\n                config: self.config,\n            })\n    }\n}\n\n/// The server's response to the websocket upgrade request.\n///\n/// This implements `Deref<Target = Response>`, so you can access all the usual\n/// information from the [`Response`].\n#[derive(Debug)]\npub struct WebSocketResponse {\n    inner: Response,\n    accept_key: Option<Cow<'static, str>>,\n    protocols: Option<Vec<Cow<'static, str>>>,\n    config: WebSocketConfig,\n}\n\nimpl Deref for WebSocketResponse {\n    type Target = Response;\n\n    fn deref(&self) -> &Self::Target {\n        &self.inner\n    }\n}\n\nimpl DerefMut for WebSocketResponse {\n    fn deref_mut(&mut self) -> &mut Self::Target {\n        &mut self.inner\n    }\n}\n\nimpl WebSocketResponse {\n    /// Turns the response into a websocket. This checks if the websocket\n    /// handshake was successful.\n    pub async fn into_websocket(self) -> Result<WebSocket, Error> {\n        let (inner, protocol) = {\n            let status = self.inner.status();\n            let headers = self.inner.headers();\n\n            match self.inner.version() {\n                // HTTP/1.0 and HTTP/1.1 use the traditional upgrade mechanism\n                Version::HTTP_10 | Version::HTTP_11 => {\n                    if status != StatusCode::SWITCHING_PROTOCOLS {\n                        return Err(Error::upgrade(format!(\"unexpected status code: {status}\")));\n                    }\n\n                    if !header_contains(self.inner.headers(), header::CONNECTION, \"upgrade\") {\n                        return Err(Error::upgrade(\"missing connection header\"));\n                    }\n\n                    if !header_eq(self.inner.headers(), header::UPGRADE, \"websocket\") {\n                        return Err(Error::upgrade(\"invalid upgrade header\"));\n                    }\n\n                    match self\n                        .accept_key\n                        .zip(headers.get(header::SEC_WEBSOCKET_ACCEPT))\n                    {\n                        Some((nonce, header)) => {\n                            if !header.to_str().is_ok_and(|s| {\n                                s == tungstenite::handshake::derive_accept_key(nonce.as_bytes())\n                            }) {\n                                return Err(Error::upgrade(format!(\n                                    \"invalid accept key: {header:?}\"\n                                )));\n                            }\n                        }\n                        None => {\n                            return Err(Error::upgrade(\"missing accept key\"));\n                        }\n                    }\n                }\n                // HTTP/2 uses the Extended CONNECT Protocol (RFC 8441)\n                // See: https://datatracker.ietf.org/doc/html/rfc8441\n                Version::HTTP_2 => {\n                    if status != StatusCode::OK {\n                        return Err(Error::upgrade(format!(\"unexpected status code: {status}\")));\n                    }\n                }\n                _ => {\n                    return Err(Error::upgrade(format!(\n                        \"unsupported version: {:?}\",\n                        self.inner.version()\n                    )));\n                }\n            }\n\n            let protocol = headers.get(header::SEC_WEBSOCKET_PROTOCOL).cloned();\n            let requested = self.protocols.as_ref().filter(|p| !p.is_empty());\n            let replied = protocol.as_ref().and_then(|v| v.to_str().ok());\n\n            match (requested, replied) {\n                // okay, we requested protocols and got one back\n                (Some(req), Some(rep)) => {\n                    if !req.contains(&Cow::Borrowed(rep)) {\n                        return Err(Error::upgrade(format!(\"invalid protocol: {rep}\")));\n                    }\n                }\n                // server didn't reply with a protocol\n                (Some(_), None) => {\n                    return Err(Error::upgrade(format!(\n                        \"missing protocol: {:?}\",\n                        self.protocols\n                    )));\n                }\n                // we didn't request any protocols, but got one anyway\n                (None, Some(_)) => {\n                    return Err(Error::upgrade(format!(\"invalid protocol: {protocol:?}\")));\n                }\n                // we didn't request any protocols, so we don't expect one\n                (None, None) => {}\n            };\n\n            let inner = WebSocketStream::from_raw_socket(\n                self.inner.upgrade().await?,\n                protocol::Role::Client,\n                Some(self.config),\n            )\n            .await;\n\n            (inner, protocol)\n        };\n\n        Ok(WebSocket { inner, protocol })\n    }\n}\n\n/// Checks if the header value is equal to the given value.\nfn header_eq(headers: &HeaderMap, key: HeaderName, value: &'static str) -> bool {\n    if let Some(header) = headers.get(&key) {\n        header.as_bytes().eq_ignore_ascii_case(value.as_bytes())\n    } else {\n        false\n    }\n}\n\n/// Checks if the header value contains the given value.\nfn header_contains(headers: &HeaderMap, key: HeaderName, value: &'static str) -> bool {\n    let header = if let Some(header) = headers.get(&key) {\n        header\n    } else {\n        return false;\n    };\n\n    if let Ok(header) = std::str::from_utf8(header.as_bytes()) {\n        header.to_ascii_lowercase().contains(value)\n    } else {\n        false\n    }\n}\n\npin_project! {\n    /// A websocket connection\n    #[derive(Debug)]\n    pub struct WebSocket {\n        #[pin]\n        inner: WebSocketStream,\n        protocol: Option<HeaderValue>,\n    }\n}\n\nimpl WebSocket {\n    /// Return the selected WebSocket subprotocol, if one has been chosen.\n    #[inline]\n    pub fn protocol(&self) -> Option<&HeaderValue> {\n        self.protocol.as_ref()\n    }\n\n    /// Receive another message.\n    ///\n    /// Returns `None` if the stream has closed.\n    #[inline]\n    pub async fn recv(&mut self) -> Option<Result<Message, Error>> {\n        self.next().await\n    }\n\n    /// Send a message.\n    #[inline]\n    pub async fn send(&mut self, msg: Message) -> Result<(), Error> {\n        self.inner\n            .send(msg.into_tungstenite())\n            .await\n            .map_err(Error::websocket)\n    }\n\n    /// Consumes the [`WebSocket`] and returns the underlying stream.\n    #[inline]\n    pub fn into_inner(self) -> Upgraded {\n        self.inner.into_inner()\n    }\n\n    /// Closes the connection with a given code and (optional) reason.\n    pub async fn close<C, R>(mut self, code: C, reason: R) -> Result<(), Error>\n    where\n        C: Into<CloseCode>,\n        R: Into<Utf8Bytes>,\n    {\n        let close_frame = CloseFrame {\n            code: code.into().0.into(),\n            reason: reason.into().0,\n        };\n\n        self.inner\n            .close(Some(close_frame))\n            .await\n            .map_err(Error::websocket)\n    }\n}\n\nimpl Sink<Message> for WebSocket {\n    type Error = Error;\n\n    #[inline]\n    fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {\n        self.project()\n            .inner\n            .poll_ready(cx)\n            .map_err(Error::websocket)\n    }\n\n    #[inline]\n    fn start_send(self: Pin<&mut Self>, item: Message) -> Result<(), Self::Error> {\n        self.project()\n            .inner\n            .start_send(item.into_tungstenite())\n            .map_err(Error::websocket)\n    }\n\n    #[inline]\n    fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {\n        self.project()\n            .inner\n            .poll_flush(cx)\n            .map_err(Error::websocket)\n    }\n\n    #[inline]\n    fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {\n        self.project()\n            .inner\n            .poll_close(cx)\n            .map_err(Error::websocket)\n    }\n}\n\nimpl FusedStream for WebSocket {\n    #[inline]\n    fn is_terminated(&self) -> bool {\n        self.inner.is_terminated()\n    }\n}\n\nimpl Stream for WebSocket {\n    type Item = Result<Message, Error>;\n\n    fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {\n        loop {\n            match ready!(self.inner.poll_next_unpin(cx)) {\n                Some(Ok(msg)) => {\n                    if let Some(msg) = Message::from_tungstenite(msg) {\n                        return Poll::Ready(Some(Ok(msg)));\n                    }\n                }\n                Some(Err(err)) => return Poll::Ready(Some(Err(Error::body(err)))),\n                None => return Poll::Ready(None),\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "src/client.rs",
    "content": "mod body;\nmod conn;\nmod core;\nmod emulate;\nmod group;\nmod request;\nmod response;\n\npub mod future;\npub mod layer;\n#[cfg(feature = \"multipart\")]\npub mod multipart;\n#[cfg(feature = \"ws\")]\npub mod ws;\n\nuse std::{\n    borrow::Cow,\n    collections::HashMap,\n    convert::TryInto,\n    net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr},\n    num::NonZeroUsize,\n    sync::Arc,\n    task::{Context, Poll},\n    time::Duration,\n};\n\nuse http::header::{HeaderMap, HeaderValue, USER_AGENT};\nuse tower::{\n    BoxError, Layer, Service, ServiceBuilder, ServiceExt,\n    retry::{Retry, RetryLayer},\n    util::{BoxCloneSyncService, BoxCloneSyncServiceLayer, Either, Oneshot},\n};\n\n#[cfg(any(\n    feature = \"gzip\",\n    feature = \"zstd\",\n    feature = \"brotli\",\n    feature = \"deflate\",\n))]\nuse self::layer::decoder::{AcceptEncoding, DecompressionLayer};\n#[cfg(feature = \"ws\")]\nuse self::ws::WebSocketRequestBuilder;\npub use self::{\n    body::Body,\n    core::{http1, http2, upgrade::Upgraded},\n    emulate::{Emulation, EmulationBuilder, IntoEmulation},\n    group::Group,\n    request::{Request, RequestBuilder},\n    response::Response,\n};\nuse self::{\n    conn::{\n        BoxedConnectorLayer, BoxedConnectorService, Conn, Connector, HttpTransport,\n        SocketBindOptions, Unnameable,\n    },\n    core::{\n        body::Incoming,\n        rt::{TokioExecutor, TokioTimer},\n    },\n    future::Pending,\n    layer::{\n        client::HttpClient,\n        config::{ConfigService, ConfigServiceLayer},\n        redirect::{FollowRedirect, FollowRedirectLayer},\n        retry::RetryPolicy,\n        timeout::{\n            ResponseBodyTimeout, ResponseBodyTimeoutLayer, Timeout, TimeoutBody, TimeoutLayer,\n            TimeoutOptions,\n        },\n    },\n};\npub(crate) use self::{\n    conn::{\n        Connected, Connection,\n        descriptor::{ConnectionDescriptor, ConnectionId},\n    },\n    core::Error as CoreError,\n};\n#[cfg(feature = \"cookies\")]\nuse crate::cookie;\n#[cfg(feature = \"hickory-dns\")]\nuse crate::dns::hickory::HickoryDnsResolver;\nuse crate::{\n    IntoUri, Method, Proxy,\n    dns::{DnsResolverWithOverrides, DynResolver, GaiResolver, IntoResolve, Resolve},\n    error::{self, Error},\n    header::OrigHeaderMap,\n    http1::Http1Options,\n    http2::Http2Options,\n    proxy::Matcher as ProxyMatcher,\n    redirect::{self, FollowRedirectPolicy},\n    retry,\n    tls::{\n        AlpnProtocol, TlsOptions, TlsVersion,\n        keylog::KeyLog,\n        session::{IntoTlsSessionCache, TlsSessionCache},\n        trust::{CertStore, Identity},\n    },\n};\n\n/// Decompression service type. Identity type when compression features are disabled.\n#[cfg(not(any(\n    feature = \"gzip\",\n    feature = \"zstd\",\n    feature = \"brotli\",\n    feature = \"deflate\"\n)))]\ntype Decompression<T> = T;\n\n/// Service wrapper that handles response body decompression.\n#[cfg(any(\n    feature = \"gzip\",\n    feature = \"zstd\",\n    feature = \"brotli\",\n    feature = \"deflate\"\n))]\ntype Decompression<T> = self::layer::decoder::Decompression<T>;\n\n/// Response body type with timeout and optional decompression.\n#[cfg(any(\n    feature = \"gzip\",\n    feature = \"zstd\",\n    feature = \"brotli\",\n    feature = \"deflate\"\n))]\ntype ResponseBody = TimeoutBody<tower_http::decompression::DecompressionBody<Incoming>>;\n\n/// Response body type with timeout only (no compression features).\n#[cfg(not(any(\n    feature = \"gzip\",\n    feature = \"zstd\",\n    feature = \"brotli\",\n    feature = \"deflate\"\n)))]\ntype ResponseBody = TimeoutBody<Incoming>;\n\n/// The complete HTTP client service stack with all middleware layers.\ntype ClientService = Timeout<\n    ResponseBodyTimeout<\n        ConfigService<\n            Decompression<\n                Retry<\n                    RetryPolicy,\n                    FollowRedirect<HttpClient<Connector, Body>, FollowRedirectPolicy>,\n                >,\n            >,\n        >,\n    >,\n>;\n\n/// Type-erased client service for dynamic middleware composition.\ntype BoxedClientService =\n    BoxCloneSyncService<http::Request<Body>, http::Response<ResponseBody>, BoxError>;\n\n/// Layer type for wrapping boxed client services with additional middleware.\ntype BoxedClientLayer = BoxCloneSyncServiceLayer<\n    BoxedClientService,\n    http::Request<Body>,\n    http::Response<ResponseBody>,\n    BoxError,\n>;\n\n/// An [`Client`] to make Requests with.\n///\n/// The Client has various configuration values to tweak, but the defaults\n/// are set to what is usually the most commonly desired value. To configure a\n/// [`Client`], use [`Client::builder()`].\n///\n/// The [`Client`] holds a connection pool internally, so it is advised that\n/// you create one and **reuse** it.\n///\n/// You do **not** have to wrap the [`Client`] in an [`Rc`] or [`Arc`] to **reuse** it,\n/// because it already uses an [`Arc`] internally.\n///\n/// [`Rc`]: std::rc::Rc\n#[derive(Clone)]\n#[repr(transparent)]\npub struct Client(Arc<Either<ClientService, BoxedClientService>>);\n\n/// A [`ClientBuilder`] can be used to create a [`Client`] with custom configuration.\n#[must_use]\npub struct ClientBuilder {\n    config: Config,\n}\n\n/// The HTTP version preference for the client.\n#[repr(u8)]\nenum HttpVersionPref {\n    Http1,\n    Http2,\n    All,\n}\n\nstruct Config {\n    error: Option<Error>,\n    headers: HeaderMap,\n    orig_headers: OrigHeaderMap,\n    #[cfg(any(\n        feature = \"gzip\",\n        feature = \"zstd\",\n        feature = \"brotli\",\n        feature = \"deflate\",\n    ))]\n    accept_encoding: AcceptEncoding,\n    connect_timeout: Option<Duration>,\n    connection_verbose: bool,\n    pool_idle_timeout: Option<Duration>,\n    pool_max_idle_per_host: usize,\n    pool_max_size: Option<NonZeroUsize>,\n    tcp_nodelay: bool,\n    tcp_reuse_address: bool,\n    tcp_keepalive: Option<Duration>,\n    tcp_keepalive_interval: Option<Duration>,\n    tcp_keepalive_retries: Option<u32>,\n    #[cfg(any(target_os = \"android\", target_os = \"fuchsia\", target_os = \"linux\"))]\n    tcp_user_timeout: Option<Duration>,\n    tcp_send_buffer_size: Option<usize>,\n    tcp_recv_buffer_size: Option<usize>,\n    tcp_happy_eyeballs_timeout: Option<Duration>,\n    socket_bind_options: SocketBindOptions,\n    proxies: Vec<ProxyMatcher>,\n    auto_sys_proxy: bool,\n    retry_policy: retry::Policy,\n    redirect_policy: redirect::Policy,\n    referer: bool,\n    timeout_options: TimeoutOptions,\n    #[cfg(feature = \"cookies\")]\n    cookie_store: Option<Arc<dyn cookie::CookieStore>>,\n    #[cfg(feature = \"hickory-dns\")]\n    hickory_dns: bool,\n    dns_overrides: HashMap<Cow<'static, str>, Vec<SocketAddr>>,\n    dns_resolver: Option<Arc<dyn Resolve>>,\n    http_version_pref: HttpVersionPref,\n    https_only: bool,\n    layers: Vec<BoxedClientLayer>,\n    connector_layers: Vec<BoxedConnectorLayer>,\n    tls_keylog: Option<KeyLog>,\n    tls_info: bool,\n    tls_sni: bool,\n    tls_verify_hostname: bool,\n    tls_identity: Option<Identity>,\n    tls_cert_store: CertStore,\n    tls_cert_verification: bool,\n    tls_min_version: Option<TlsVersion>,\n    tls_max_version: Option<TlsVersion>,\n    tls_session_cache: Option<Arc<dyn TlsSessionCache>>,\n    tls_options: Option<TlsOptions>,\n    http1_options: Option<Http1Options>,\n    http2_options: Option<Http2Options>,\n}\n\n// ===== impl Client =====\n\nimpl Default for Client {\n    fn default() -> Self {\n        Self::new()\n    }\n}\n\nimpl Client {\n    /// Constructs a new [`Client`].\n    ///\n    /// # Panics\n    ///\n    /// This method panics if a TLS backend cannot be initialized, or the resolver\n    /// cannot load the system configuration.\n    ///\n    /// Use [`Client::builder()`] if you wish to handle the failure as an [`Error`]\n    /// instead of panicking.\n    #[inline]\n    pub fn new() -> Client {\n        Client::builder().build().expect(\"Client::new()\")\n    }\n\n    /// Creates a [`ClientBuilder`] to configure a [`Client`].\n    pub fn builder() -> ClientBuilder {\n        ClientBuilder {\n            config: Config {\n                error: None,\n                headers: HeaderMap::new(),\n                orig_headers: OrigHeaderMap::new(),\n                #[cfg(any(\n                    feature = \"gzip\",\n                    feature = \"zstd\",\n                    feature = \"brotli\",\n                    feature = \"deflate\",\n                ))]\n                accept_encoding: AcceptEncoding::default(),\n                connect_timeout: None,\n                connection_verbose: false,\n                pool_idle_timeout: Some(Duration::from_secs(90)),\n                pool_max_idle_per_host: usize::MAX,\n                pool_max_size: None,\n                tcp_keepalive: Some(Duration::from_secs(15)),\n                tcp_keepalive_interval: Some(Duration::from_secs(15)),\n                tcp_keepalive_retries: Some(3),\n                #[cfg(any(target_os = \"android\", target_os = \"fuchsia\", target_os = \"linux\"))]\n                tcp_user_timeout: Some(Duration::from_secs(30)),\n                tcp_nodelay: true,\n                tcp_reuse_address: false,\n                tcp_send_buffer_size: None,\n                tcp_recv_buffer_size: None,\n                tcp_happy_eyeballs_timeout: Some(Duration::from_millis(300)),\n                socket_bind_options: SocketBindOptions::default(),\n                proxies: Vec::new(),\n                auto_sys_proxy: true,\n                retry_policy: retry::Policy::default(),\n                redirect_policy: redirect::Policy::none(),\n                referer: true,\n                timeout_options: TimeoutOptions::default(),\n                #[cfg(feature = \"hickory-dns\")]\n                hickory_dns: cfg!(feature = \"hickory-dns\"),\n                #[cfg(feature = \"cookies\")]\n                cookie_store: None,\n                dns_overrides: HashMap::new(),\n                dns_resolver: None,\n                http_version_pref: HttpVersionPref::All,\n                https_only: false,\n                http1_options: None,\n                http2_options: None,\n                layers: Vec::new(),\n                connector_layers: Vec::new(),\n                tls_keylog: None,\n                tls_info: false,\n                tls_sni: true,\n                tls_verify_hostname: true,\n                tls_identity: None,\n                tls_cert_store: CertStore::default(),\n                tls_cert_verification: true,\n                tls_min_version: None,\n                tls_max_version: None,\n                tls_session_cache: None,\n                tls_options: None,\n            },\n        }\n    }\n\n    /// Convenience method to make a `GET` request to a URI.\n    ///\n    /// # Errors\n    ///\n    /// This method fails whenever the supplied `Uri` cannot be parsed.\n    #[inline]\n    pub fn get<U: IntoUri>(&self, uri: U) -> RequestBuilder {\n        self.request(Method::GET, uri)\n    }\n\n    /// Convenience method to make a `POST` request to a URI.\n    ///\n    /// # Errors\n    ///\n    /// This method fails whenever the supplied `Uri` cannot be parsed.\n    #[inline]\n    pub fn post<U: IntoUri>(&self, uri: U) -> RequestBuilder {\n        self.request(Method::POST, uri)\n    }\n\n    /// Convenience method to make a `PUT` request to a URI.\n    ///\n    /// # Errors\n    ///\n    /// This method fails whenever the supplied `Uri` cannot be parsed.\n    #[inline]\n    pub fn put<U: IntoUri>(&self, uri: U) -> RequestBuilder {\n        self.request(Method::PUT, uri)\n    }\n\n    /// Convenience method to make a `PATCH` request to a URI.\n    ///\n    /// # Errors\n    ///\n    /// This method fails whenever the supplied `Uri` cannot be parsed.\n    #[inline]\n    pub fn patch<U: IntoUri>(&self, uri: U) -> RequestBuilder {\n        self.request(Method::PATCH, uri)\n    }\n\n    /// Convenience method to make a `DELETE` request to a URI.\n    ///\n    /// # Errors\n    ///\n    /// This method fails whenever the supplied `Uri` cannot be parsed.\n    #[inline]\n    pub fn delete<U: IntoUri>(&self, uri: U) -> RequestBuilder {\n        self.request(Method::DELETE, uri)\n    }\n\n    /// Convenience method to make a `HEAD` request to a URI.\n    ///\n    /// # Errors\n    ///\n    /// This method fails whenever the supplied `Uri` cannot be parsed.\n    #[inline]\n    pub fn head<U: IntoUri>(&self, uri: U) -> RequestBuilder {\n        self.request(Method::HEAD, uri)\n    }\n\n    /// Convenience method to make a `OPTIONS` request to a URI.\n    ///\n    /// # Errors\n    ///\n    /// This method fails whenever the supplied `Uri` cannot be parsed.\n    #[inline]\n    pub fn options<U: IntoUri>(&self, uri: U) -> RequestBuilder {\n        self.request(Method::OPTIONS, uri)\n    }\n\n    /// Start building a `Request` with the `Method` and `Uri`.\n    ///\n    /// Returns a `RequestBuilder`, which will allow setting headers and\n    /// the request body before sending.\n    ///\n    /// # Errors\n    ///\n    /// This method fails whenever the supplied `Uri` cannot be parsed.\n    pub fn request<U: IntoUri>(&self, method: Method, uri: U) -> RequestBuilder {\n        let req = uri.into_uri().map(move |uri| Request::new(method, uri));\n        RequestBuilder::new(self.clone(), req)\n    }\n\n    /// Upgrades the [`RequestBuilder`] to perform a\n    /// websocket handshake. This returns a wrapped type, so you must do\n    /// this after you set up your request, and just before you send the\n    /// request.\n    #[inline]\n    #[cfg(feature = \"ws\")]\n    #[cfg_attr(docsrs, doc(cfg(feature = \"ws\")))]\n    pub fn websocket<U: IntoUri>(&self, uri: U) -> WebSocketRequestBuilder {\n        WebSocketRequestBuilder::new(self.request(Method::GET, uri))\n    }\n\n    /// Executes a `Request`.\n    ///\n    /// A `Request` can be built manually with `Request::new()` or obtained\n    /// from a RequestBuilder with `RequestBuilder::build()`.\n    ///\n    /// You should prefer to use the `RequestBuilder` and\n    /// `RequestBuilder::send()`.\n    ///\n    /// # Errors\n    ///\n    /// This method fails if there was an error while sending request,\n    /// redirect loop was detected or redirect limit was exhausted.\n    pub fn execute(&self, request: Request) -> Pending {\n        let req = http::Request::<Body>::from(request);\n        Pending::Request {\n            uri: Some(req.uri().clone()),\n            fut: Box::pin(Oneshot::new((*self.0).clone(), req)),\n        }\n    }\n}\n\nimpl tower::Service<Request> for Client {\n    type Response = Response;\n    type Error = Error;\n    type Future = Pending;\n\n    #[inline(always)]\n    fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {\n        Poll::Ready(Ok(()))\n    }\n\n    #[inline(always)]\n    fn call(&mut self, req: Request) -> Self::Future {\n        self.execute(req)\n    }\n}\n\nimpl tower::Service<Request> for &'_ Client {\n    type Response = Response;\n    type Error = Error;\n    type Future = Pending;\n\n    #[inline(always)]\n    fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {\n        Poll::Ready(Ok(()))\n    }\n\n    #[inline(always)]\n    fn call(&mut self, req: Request) -> Self::Future {\n        self.execute(req)\n    }\n}\n\n// ===== impl ClientBuilder =====\n\nimpl ClientBuilder {\n    /// Returns a [`Client`] that uses this [`ClientBuilder`] configuration.\n    ///\n    /// # Errors\n    ///\n    /// This method fails if a TLS backend cannot be initialized, or the resolver\n    /// cannot load the system configuration.\n    pub fn build(self) -> crate::Result<Client> {\n        let mut config = self.config;\n\n        if let Some(err) = config.error {\n            return Err(err);\n        }\n\n        // Prepare proxies\n        if config.auto_sys_proxy {\n            config.proxies.push(ProxyMatcher::system());\n        }\n\n        // Create base client service\n        let service = {\n            let resolver = {\n                let mut resolver: Arc<dyn Resolve> = match config.dns_resolver {\n                    Some(dns_resolver) => dns_resolver,\n                    #[cfg(feature = \"hickory-dns\")]\n                    None if config.hickory_dns => Arc::new(HickoryDnsResolver::new()),\n                    None => Arc::new(GaiResolver::new()),\n                };\n\n                if !config.dns_overrides.is_empty() {\n                    resolver = Arc::new(DnsResolverWithOverrides::new(\n                        resolver,\n                        config.dns_overrides,\n                    ));\n                }\n                DynResolver::new(resolver)\n            };\n\n            let connector = Connector::builder(config.proxies, resolver)\n                .timeout(config.connect_timeout)\n                .tls_info(config.tls_info)\n                .tcp_nodelay(config.tcp_nodelay)\n                .verbose(config.connection_verbose)\n                .with_tls(|tls| {\n                    tls.alpn_protocol(match config.http_version_pref {\n                        HttpVersionPref::Http1 => Some(AlpnProtocol::HTTP1),\n                        HttpVersionPref::Http2 => Some(AlpnProtocol::HTTP2),\n                        _ => None,\n                    })\n                    .keylog(config.tls_keylog)\n                    .cert_store(config.tls_cert_store)\n                    .identity(config.tls_identity)\n                    .max_version(config.tls_max_version)\n                    .min_version(config.tls_min_version)\n                    .tls_sni(config.tls_sni)\n                    .verify_hostname(config.tls_verify_hostname)\n                    .cert_verification(config.tls_cert_verification)\n                    .session_store(config.tls_session_cache)\n                })\n                .with_http(|http| {\n                    http.enforce_http(false);\n                    http.set_keepalive(config.tcp_keepalive);\n                    http.set_keepalive_interval(config.tcp_keepalive_interval);\n                    http.set_keepalive_retries(config.tcp_keepalive_retries);\n                    http.set_reuse_address(config.tcp_reuse_address);\n                    http.set_connect_timeout(config.connect_timeout);\n                    http.set_nodelay(config.tcp_nodelay);\n                    http.set_send_buffer_size(config.tcp_send_buffer_size);\n                    http.set_recv_buffer_size(config.tcp_recv_buffer_size);\n                    http.set_happy_eyeballs_timeout(config.tcp_happy_eyeballs_timeout);\n\n                    #[cfg(any(target_os = \"android\", target_os = \"fuchsia\", target_os = \"linux\"))]\n                    http.set_tcp_user_timeout(config.tcp_user_timeout);\n\n                    #[cfg(any(\n                        target_os = \"android\",\n                        target_os = \"fuchsia\",\n                        target_os = \"illumos\",\n                        target_os = \"ios\",\n                        target_os = \"linux\",\n                        target_os = \"macos\",\n                        target_os = \"solaris\",\n                        target_os = \"tvos\",\n                        target_os = \"visionos\",\n                        target_os = \"watchos\",\n                    ))]\n                    if let Some(interface) = config.socket_bind_options.interface {\n                        http.set_interface(interface);\n                    }\n\n                    http.set_local_addresses(\n                        config.socket_bind_options.ipv4_address,\n                        config.socket_bind_options.ipv6_address,\n                    );\n                })\n                .build(config.tls_options, config.connector_layers)?;\n\n            #[allow(unused_mut)]\n            let mut builder = HttpClient::builder(TokioExecutor::new());\n\n            #[cfg(feature = \"cookies\")]\n            {\n                builder = builder.cookie_store(config.cookie_store);\n            }\n\n            builder\n                .http1_options(config.http1_options)\n                .http2_options(config.http2_options)\n                .http2_only(matches!(config.http_version_pref, HttpVersionPref::Http2))\n                .http2_timer(TokioTimer::new())\n                .pool_timer(TokioTimer::new())\n                .pool_idle_timeout(config.pool_idle_timeout)\n                .pool_max_idle_per_host(config.pool_max_idle_per_host)\n                .pool_max_size(config.pool_max_size)\n                .build(connector)\n        };\n\n        // Configured client service with layers\n        let client = {\n            let service = ServiceBuilder::new()\n                .layer(RetryLayer::new(RetryPolicy::new(config.retry_policy)))\n                .layer({\n                    let policy = FollowRedirectPolicy::new(config.redirect_policy)\n                        .with_referer(config.referer)\n                        .with_https_only(config.https_only);\n                    FollowRedirectLayer::with_policy(policy)\n                })\n                .service(service);\n\n            #[cfg(any(\n                feature = \"gzip\",\n                feature = \"zstd\",\n                feature = \"brotli\",\n                feature = \"deflate\",\n            ))]\n            let service = ServiceBuilder::new()\n                .layer(DecompressionLayer::new(config.accept_encoding))\n                .service(service);\n\n            let service = ServiceBuilder::new()\n                .layer(ResponseBodyTimeoutLayer::new(\n                    TokioTimer::new(),\n                    config.timeout_options,\n                ))\n                .layer(ConfigServiceLayer::new(\n                    config.https_only,\n                    config.headers,\n                    config.orig_headers,\n                ))\n                .service(service);\n\n            if config.layers.is_empty() {\n                let service = ServiceBuilder::new()\n                    .layer(TimeoutLayer::new(config.timeout_options))\n                    .service(service);\n\n                Either::Left(service)\n            } else {\n                let service = config\n                    .layers\n                    .into_iter()\n                    .fold(BoxCloneSyncService::new(service), |service, layer| {\n                        ServiceBuilder::new().layer(layer).service(service)\n                    });\n\n                let service = ServiceBuilder::new()\n                    .layer(TimeoutLayer::new(config.timeout_options))\n                    .service(service)\n                    .map_err(error::map_timeout_to_request_error);\n\n                Either::Right(BoxCloneSyncService::new(service))\n            }\n        };\n\n        Ok(Client(Arc::new(client)))\n    }\n\n    // Higher-level options\n\n    /// Sets the `User-Agent` header to be used by this client.\n    ///\n    /// # Example\n    ///\n    /// ```rust\n    /// # async fn doc() -> wreq::Result<()> {\n    /// // Name your user agent after your app?\n    /// static APP_USER_AGENT: &str = concat!(env!(\"CARGO_PKG_NAME\"), \"/\", env!(\"CARGO_PKG_VERSION\"),);\n    ///\n    /// let client = wreq::Client::builder().user_agent(APP_USER_AGENT).build()?;\n    /// let res = client.get(\"https://www.rust-lang.org\").send().await?;\n    /// # Ok(())\n    /// # }\n    /// ```\n    pub fn user_agent<V>(mut self, value: V) -> ClientBuilder\n    where\n        V: TryInto<HeaderValue>,\n        V::Error: Into<http::Error>,\n    {\n        match value.try_into() {\n            Ok(value) => {\n                self.config.headers.insert(USER_AGENT, value);\n            }\n            Err(err) => {\n                self.config.error = Some(Error::builder(err.into()));\n            }\n        };\n        self\n    }\n\n    /// Sets the default headers for every request.\n    ///\n    /// # Example\n    ///\n    /// ```rust\n    /// use wreq::header;\n    /// # async fn doc() -> wreq::Result<()> {\n    /// let mut headers = header::HeaderMap::new();\n    /// headers.insert(\"X-MY-HEADER\", header::HeaderValue::from_static(\"value\"));\n    ///\n    /// // Consider marking security-sensitive headers with `set_sensitive`.\n    /// let mut auth_value = header::HeaderValue::from_static(\"secret\");\n    /// auth_value.set_sensitive(true);\n    /// headers.insert(header::AUTHORIZATION, auth_value);\n    ///\n    /// // get a client builder\n    /// let client = wreq::Client::builder().default_headers(headers).build()?;\n    /// let res = client.get(\"https://www.rust-lang.org\").send().await?;\n    /// # Ok(())\n    /// # }\n    /// ```\n    ///\n    /// Override the default headers:\n    ///\n    /// ```rust\n    /// use wreq::header;\n    /// # async fn doc() -> wreq::Result<()> {\n    /// let mut headers = header::HeaderMap::new();\n    /// headers.insert(\"X-MY-HEADER\", header::HeaderValue::from_static(\"value\"));\n    ///\n    /// // get a client builder\n    /// let client = wreq::Client::builder().default_headers(headers).build()?;\n    /// let res = client\n    ///     .get(\"https://www.rust-lang.org\")\n    ///     .header(\"X-MY-HEADER\", \"new_value\")\n    ///     .send()\n    ///     .await?;\n    /// # Ok(())\n    /// # }\n    /// ```\n    #[inline]\n    pub fn default_headers(mut self, headers: HeaderMap) -> ClientBuilder {\n        crate::util::replace_headers(&mut self.config.headers, headers);\n        self\n    }\n\n    /// Sets the original headers for every request.\n    #[inline]\n    pub fn orig_headers(mut self, orig_headers: OrigHeaderMap) -> ClientBuilder {\n        self.config.orig_headers.extend(orig_headers);\n        self\n    }\n\n    /// Enable a persistent cookie store for the client.\n    ///\n    /// Cookies received in responses will be preserved and included in\n    /// additional requests.\n    ///\n    /// By default, no cookie store is used.\n    ///\n    /// # Optional\n    ///\n    /// This requires the optional `cookies` feature to be enabled.\n    #[inline]\n    #[cfg(feature = \"cookies\")]\n    #[cfg_attr(docsrs, doc(cfg(feature = \"cookies\")))]\n    pub fn cookie_store(mut self, enable: bool) -> ClientBuilder {\n        if enable {\n            self.cookie_provider(Arc::new(cookie::Jar::default()))\n        } else {\n            self.config.cookie_store = None;\n            self\n        }\n    }\n\n    /// Set the persistent cookie store for the client.\n    ///\n    /// Cookies received in responses will be passed to this store, and\n    /// additional requests will query this store for cookies.\n    ///\n    /// By default, no cookie store is used.\n    ///\n    /// # Optional\n    ///\n    /// This requires the optional `cookies` feature to be enabled.\n    #[inline]\n    #[cfg(feature = \"cookies\")]\n    #[cfg_attr(docsrs, doc(cfg(feature = \"cookies\")))]\n    pub fn cookie_provider<C: cookie::IntoCookieStore>(mut self, cookie_store: C) -> ClientBuilder {\n        self.config.cookie_store = Some(cookie_store.into_shared());\n        self\n    }\n\n    /// Enable auto gzip decompression by checking the `Content-Encoding` response header.\n    ///\n    /// If auto gzip decompression is turned on:\n    ///\n    /// - When sending a request and if the request's headers do not already contain an\n    ///   `Accept-Encoding` **and** `Range` values, the `Accept-Encoding` header is set to `gzip`.\n    ///   The request body is **not** automatically compressed.\n    /// - When receiving a response, if its headers contain a `Content-Encoding` value of `gzip`,\n    ///   both `Content-Encoding` and `Content-Length` are removed from the headers' set. The\n    ///   response body is automatically decompressed.\n    ///\n    /// If the `gzip` feature is turned on, the default option is enabled.\n    ///\n    /// # Optional\n    ///\n    /// This requires the optional `gzip` feature to be enabled\n    #[inline]\n    #[cfg(feature = \"gzip\")]\n    #[cfg_attr(docsrs, doc(cfg(feature = \"gzip\")))]\n    pub fn gzip(mut self, enable: bool) -> ClientBuilder {\n        self.config.accept_encoding.gzip = enable;\n        self\n    }\n\n    /// Enable auto brotli decompression by checking the `Content-Encoding` response header.\n    ///\n    /// If auto brotli decompression is turned on:\n    ///\n    /// - When sending a request and if the request's headers do not already contain an\n    ///   `Accept-Encoding` **and** `Range` values, the `Accept-Encoding` header is set to `br`. The\n    ///   request body is **not** automatically compressed.\n    /// - When receiving a response, if its headers contain a `Content-Encoding` value of `br`, both\n    ///   `Content-Encoding` and `Content-Length` are removed from the headers' set. The response\n    ///   body is automatically decompressed.\n    ///\n    /// If the `brotli` feature is turned on, the default option is enabled.\n    ///\n    /// # Optional\n    ///\n    /// This requires the optional `brotli` feature to be enabled\n    #[inline]\n    #[cfg(feature = \"brotli\")]\n    #[cfg_attr(docsrs, doc(cfg(feature = \"brotli\")))]\n    pub fn brotli(mut self, enable: bool) -> ClientBuilder {\n        self.config.accept_encoding.brotli = enable;\n        self\n    }\n\n    /// Enable auto zstd decompression by checking the `Content-Encoding` response header.\n    ///\n    /// If auto zstd decompression is turned on:\n    ///\n    /// - When sending a request and if the request's headers do not already contain an\n    ///   `Accept-Encoding` **and** `Range` values, the `Accept-Encoding` header is set to `zstd`.\n    ///   The request body is **not** automatically compressed.\n    /// - When receiving a response, if its headers contain a `Content-Encoding` value of `zstd`,\n    ///   both `Content-Encoding` and `Content-Length` are removed from the headers' set. The\n    ///   response body is automatically decompressed.\n    ///\n    /// If the `zstd` feature is turned on, the default option is enabled.\n    ///\n    /// # Optional\n    ///\n    /// This requires the optional `zstd` feature to be enabled\n    #[inline]\n    #[cfg(feature = \"zstd\")]\n    #[cfg_attr(docsrs, doc(cfg(feature = \"zstd\")))]\n    pub fn zstd(mut self, enable: bool) -> ClientBuilder {\n        self.config.accept_encoding.zstd = enable;\n        self\n    }\n\n    /// Enable auto deflate decompression by checking the `Content-Encoding` response header.\n    ///\n    /// If auto deflate decompression is turned on:\n    ///\n    /// - When sending a request and if the request's headers do not already contain an\n    ///   `Accept-Encoding` **and** `Range` values, the `Accept-Encoding` header is set to\n    ///   `deflate`. The request body is **not** automatically compressed.\n    /// - When receiving a response, if it's headers contain a `Content-Encoding` value that equals\n    ///   to `deflate`, both values `Content-Encoding` and `Content-Length` are removed from the\n    ///   headers' set. The response body is automatically decompressed.\n    ///\n    /// If the `deflate` feature is turned on, the default option is enabled.\n    ///\n    /// # Optional\n    ///\n    /// This requires the optional `deflate` feature to be enabled\n    #[inline]\n    #[cfg(feature = \"deflate\")]\n    #[cfg_attr(docsrs, doc(cfg(feature = \"deflate\")))]\n    pub fn deflate(mut self, enable: bool) -> ClientBuilder {\n        self.config.accept_encoding.deflate = enable;\n        self\n    }\n\n    /// Disable auto response body zstd decompression.\n    ///\n    /// This method exists even if the optional `zstd` feature is not enabled.\n    /// This can be used to ensure a `Client` doesn't use zstd decompression\n    /// even if another dependency were to enable the optional `zstd` feature.\n    #[inline]\n    pub fn no_zstd(self) -> ClientBuilder {\n        #[cfg(feature = \"zstd\")]\n        {\n            self.zstd(false)\n        }\n\n        #[cfg(not(feature = \"zstd\"))]\n        {\n            self\n        }\n    }\n\n    /// Disable auto response body gzip decompression.\n    ///\n    /// This method exists even if the optional `gzip` feature is not enabled.\n    /// This can be used to ensure a `Client` doesn't use gzip decompression\n    /// even if another dependency were to enable the optional `gzip` feature.\n    #[inline]\n    pub fn no_gzip(self) -> ClientBuilder {\n        #[cfg(feature = \"gzip\")]\n        {\n            self.gzip(false)\n        }\n\n        #[cfg(not(feature = \"gzip\"))]\n        {\n            self\n        }\n    }\n\n    /// Disable auto response body brotli decompression.\n    ///\n    /// This method exists even if the optional `brotli` feature is not enabled.\n    /// This can be used to ensure a `Client` doesn't use brotli decompression\n    /// even if another dependency were to enable the optional `brotli` feature.\n    #[inline]\n    pub fn no_brotli(self) -> ClientBuilder {\n        #[cfg(feature = \"brotli\")]\n        {\n            self.brotli(false)\n        }\n\n        #[cfg(not(feature = \"brotli\"))]\n        {\n            self\n        }\n    }\n\n    /// Disable auto response body deflate decompression.\n    ///\n    /// This method exists even if the optional `deflate` feature is not enabled.\n    /// This can be used to ensure a `Client` doesn't use deflate decompression\n    /// even if another dependency were to enable the optional `deflate` feature.\n    #[inline]\n    pub fn no_deflate(self) -> ClientBuilder {\n        #[cfg(feature = \"deflate\")]\n        {\n            self.deflate(false)\n        }\n\n        #[cfg(not(feature = \"deflate\"))]\n        {\n            self\n        }\n    }\n\n    // Redirect options\n\n    /// Set a `RedirectPolicy` for this client.\n    ///\n    /// Default will follow redirects up to a maximum of 10.\n    #[inline]\n    pub fn redirect(mut self, policy: redirect::Policy) -> ClientBuilder {\n        self.config.redirect_policy = policy;\n        self\n    }\n\n    /// Enable or disable automatic setting of the `Referer` header.\n    ///\n    /// Default is `true`.\n    #[inline]\n    pub fn referer(mut self, enable: bool) -> ClientBuilder {\n        self.config.referer = enable;\n        self\n    }\n\n    // Retry options\n\n    /// Set a request retry policy.\n    pub fn retry(mut self, policy: retry::Policy) -> ClientBuilder {\n        self.config.retry_policy = policy;\n        self\n    }\n\n    // Proxy options\n\n    /// Add a `Proxy` to the list of proxies the `Client` will use.\n    ///\n    /// # Note\n    ///\n    /// Adding a proxy will disable the automatic usage of the \"system\" proxy.\n    ///\n    /// # Example\n    /// ```\n    /// use wreq::{Client, Proxy};\n    ///\n    /// let proxy = Proxy::http(\"http://proxy:8080\").unwrap();\n    /// let client = Client::builder().proxy(proxy).build().unwrap();\n    /// ```\n    #[inline]\n    pub fn proxy(mut self, proxy: Proxy) -> ClientBuilder {\n        self.config.proxies.push(proxy.into_matcher());\n        self.config.auto_sys_proxy = false;\n        self\n    }\n\n    /// Clear all `Proxies`, so `Client` will use no proxy anymore.\n    ///\n    /// # Note\n    /// To add a proxy exclusion list, use [crate::proxy::Proxy::no_proxy()]\n    /// on all desired proxies instead.\n    ///\n    /// This also disables the automatic usage of the \"system\" proxy.\n    #[inline]\n    pub fn no_proxy(mut self) -> ClientBuilder {\n        self.config.proxies.clear();\n        self.config.auto_sys_proxy = false;\n        self\n    }\n\n    // Timeout options\n\n    /// Enables a request timeout.\n    ///\n    /// The timeout is applied from when the request starts connecting until the\n    /// response body has finished.\n    ///\n    /// Default is no timeout.\n    #[inline]\n    pub fn timeout(mut self, timeout: Duration) -> ClientBuilder {\n        self.config.timeout_options.total_timeout(timeout);\n        self\n    }\n\n    /// Set a timeout for only the read phase of a `Client`.\n    ///\n    /// Default is `None`.\n    #[inline]\n    pub fn read_timeout(mut self, timeout: Duration) -> ClientBuilder {\n        self.config.timeout_options.read_timeout(timeout);\n        self\n    }\n\n    /// Set a timeout for only the connect phase of a `Client`.\n    ///\n    /// Default is `None`.\n    ///\n    /// # Note\n    ///\n    /// This **requires** the futures be executed in a tokio runtime with\n    /// a tokio timer enabled.\n    #[inline]\n    pub fn connect_timeout(mut self, timeout: Duration) -> ClientBuilder {\n        self.config.connect_timeout = Some(timeout);\n        self\n    }\n\n    /// Set whether connections should emit verbose logs.\n    ///\n    /// Enabling this option will emit [log][] messages at the `TRACE` level\n    /// for read and write operations on connections.\n    ///\n    /// [log]: https://crates.io/crates/log\n    #[inline]\n    pub fn connection_verbose(mut self, verbose: bool) -> ClientBuilder {\n        self.config.connection_verbose = verbose;\n        self\n    }\n\n    // HTTP options\n\n    /// Set an optional timeout for idle sockets being kept-alive.\n    ///\n    /// Pass `None` to disable timeout.\n    ///\n    /// Default is 90 seconds.\n    #[inline]\n    pub fn pool_idle_timeout<D>(mut self, val: D) -> ClientBuilder\n    where\n        D: Into<Option<Duration>>,\n    {\n        self.config.pool_idle_timeout = val.into();\n        self\n    }\n\n    /// Sets the maximum idle connection per host allowed in the pool.\n    #[inline]\n    pub fn pool_max_idle_per_host(mut self, max: usize) -> ClientBuilder {\n        self.config.pool_max_idle_per_host = max;\n        self\n    }\n\n    /// Sets the maximum number of connections in the pool.\n    #[inline]\n    pub fn pool_max_size(mut self, max: usize) -> ClientBuilder {\n        self.config.pool_max_size = NonZeroUsize::new(max);\n        self\n    }\n\n    /// Restrict the Client to be used with HTTPS only requests.\n    ///\n    /// Defaults to false.\n    #[inline]\n    pub fn https_only(mut self, enabled: bool) -> ClientBuilder {\n        self.config.https_only = enabled;\n        self\n    }\n\n    /// Only use HTTP/1.\n    #[inline]\n    pub fn http1_only(mut self) -> ClientBuilder {\n        self.config.http_version_pref = HttpVersionPref::Http1;\n        self\n    }\n\n    /// Only use HTTP/2.\n    #[inline]\n    pub fn http2_only(mut self) -> ClientBuilder {\n        self.config.http_version_pref = HttpVersionPref::Http2;\n        self\n    }\n\n    /// Sets the HTTP/1 options for the client.\n    #[inline]\n    pub fn http1_options<T>(mut self, options: T) -> ClientBuilder\n    where\n        T: Into<Option<Http1Options>>,\n    {\n        self.config.http1_options = options.into();\n        self\n    }\n\n    /// Sets the HTTP/2 options for the client.\n    #[inline]\n    pub fn http2_options<T>(mut self, options: T) -> ClientBuilder\n    where\n        T: Into<Option<Http2Options>>,\n    {\n        self.config.http2_options = options.into();\n        self\n    }\n\n    // TCP options\n\n    /// Set whether sockets have `TCP_NODELAY` enabled.\n    ///\n    /// Default is `true`.\n    #[inline]\n    pub fn tcp_nodelay(mut self, enabled: bool) -> ClientBuilder {\n        self.config.tcp_nodelay = enabled;\n        self\n    }\n\n    /// Set that all sockets have `SO_KEEPALIVE` set with the supplied duration.\n    ///\n    /// If `None`, the option will not be set.\n    ///\n    /// Default is 15 seconds.\n    #[inline]\n    pub fn tcp_keepalive<D>(mut self, val: D) -> ClientBuilder\n    where\n        D: Into<Option<Duration>>,\n    {\n        self.config.tcp_keepalive = val.into();\n        self\n    }\n\n    /// Set that all sockets have `SO_KEEPALIVE` set with the supplied interval.\n    ///\n    /// If `None`, the option will not be set.\n    ///\n    /// Default is 15 seconds.\n    #[inline]\n    pub fn tcp_keepalive_interval<D>(mut self, val: D) -> ClientBuilder\n    where\n        D: Into<Option<Duration>>,\n    {\n        self.config.tcp_keepalive_interval = val.into();\n        self\n    }\n\n    /// Set that all sockets have `SO_KEEPALIVE` set with the supplied retry count.\n    ///\n    /// If `None`, the option will not be set.\n    ///\n    /// Default is 3 retries.\n    #[inline]\n    pub fn tcp_keepalive_retries<C>(mut self, retries: C) -> ClientBuilder\n    where\n        C: Into<Option<u32>>,\n    {\n        self.config.tcp_keepalive_retries = retries.into();\n        self\n    }\n\n    /// Set that all sockets have `TCP_USER_TIMEOUT` set with the supplied duration.\n    ///\n    /// This option controls how long transmitted data may remain unacknowledged before\n    /// the connection is force-closed.\n    ///\n    /// If `None`, the option will not be set.\n    ///\n    /// Default is 30 seconds.\n    #[inline]\n    #[cfg(any(target_os = \"android\", target_os = \"fuchsia\", target_os = \"linux\"))]\n    #[cfg_attr(\n        docsrs,\n        doc(cfg(any(target_os = \"android\", target_os = \"fuchsia\", target_os = \"linux\")))\n    )]\n    pub fn tcp_user_timeout<D>(mut self, val: D) -> ClientBuilder\n    where\n        D: Into<Option<Duration>>,\n    {\n        self.config.tcp_user_timeout = val.into();\n        self\n    }\n\n    /// Set whether sockets have `SO_REUSEADDR` enabled.\n    #[inline]\n    pub fn tcp_reuse_address(mut self, enabled: bool) -> ClientBuilder {\n        self.config.tcp_reuse_address = enabled;\n        self\n    }\n\n    /// Sets the size of the TCP send buffer on this client socket.\n    ///\n    /// On most operating systems, this sets the `SO_SNDBUF` socket option.\n    #[inline]\n    pub fn tcp_send_buffer_size<S>(mut self, size: S) -> ClientBuilder\n    where\n        S: Into<Option<usize>>,\n    {\n        self.config.tcp_send_buffer_size = size.into();\n        self\n    }\n\n    /// Sets the size of the TCP receive buffer on this client socket.\n    ///\n    /// On most operating systems, this sets the `SO_RCVBUF` socket option.\n    #[inline]\n    pub fn tcp_recv_buffer_size<S>(mut self, size: S) -> ClientBuilder\n    where\n        S: Into<Option<usize>>,\n    {\n        self.config.tcp_recv_buffer_size = size.into();\n        self\n    }\n\n    /// Set timeout for [RFC 6555 (Happy Eyeballs)][RFC 6555] algorithm.\n    ///\n    /// If hostname resolves to both IPv4 and IPv6 addresses and connection\n    /// cannot be established using preferred address family before timeout\n    /// elapses, then connector will in parallel attempt connection using other\n    /// address family.\n    ///\n    /// If `None`, parallel connection attempts are disabled.\n    ///\n    /// Default is 300 milliseconds.\n    ///\n    /// [RFC 6555]: https://tools.ietf.org/html/rfc6555\n    #[inline]\n    pub fn tcp_happy_eyeballs_timeout<D>(mut self, val: D) -> ClientBuilder\n    where\n        D: Into<Option<Duration>>,\n    {\n        self.config.tcp_happy_eyeballs_timeout = val.into();\n        self\n    }\n\n    /// Bind to a local IP Address.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// use std::net::IpAddr;\n    /// let local_addr = IpAddr::from([12, 4, 1, 8]);\n    /// let client = wreq::Client::builder()\n    ///     .local_address(local_addr)\n    ///     .build()\n    ///     .unwrap();\n    /// ```\n    #[inline]\n    pub fn local_address<T>(mut self, addr: T) -> ClientBuilder\n    where\n        T: Into<Option<IpAddr>>,\n    {\n        self.config\n            .socket_bind_options\n            .set_local_address(addr.into());\n        self\n    }\n\n    /// Set that all sockets are bound to the configured IPv4 or IPv6 address (depending on host's\n    /// preferences) before connection.\n    ///\n    ///  # Example\n    /// ///\n    /// ```\n    /// use std::net::{Ipv4Addr, Ipv6Addr};\n    /// let ipv4 = Ipv4Addr::new(127, 0, 0, 1);\n    /// let ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1);\n    /// let client = wreq::Client::builder()\n    ///     .local_addresses(ipv4, ipv6)\n    ///     .build()\n    ///     .unwrap();\n    /// ```\n    #[inline]\n    pub fn local_addresses<V4, V6>(mut self, ipv4: V4, ipv6: V6) -> ClientBuilder\n    where\n        V4: Into<Option<Ipv4Addr>>,\n        V6: Into<Option<Ipv6Addr>>,\n    {\n        self.config\n            .socket_bind_options\n            .set_local_addresses(ipv4, ipv6);\n        self\n    }\n\n    /// Bind connections only on the specified network interface.\n    ///\n    /// This option is only available on the following operating systems:\n    ///\n    /// - Android\n    /// - Fuchsia\n    /// - Linux,\n    /// - macOS and macOS-like systems (iOS, tvOS, watchOS and visionOS)\n    /// - Solaris and illumos\n    ///\n    /// On Android, Linux, and Fuchsia, this uses the\n    /// [`SO_BINDTODEVICE`][man-7-socket] socket option. On macOS and macOS-like\n    /// systems, Solaris, and illumos, this instead uses the [`IP_BOUND_IF` and\n    /// `IPV6_BOUND_IF`][man-7p-ip] socket options (as appropriate).\n    ///\n    /// Note that connections will fail if the provided interface name is not a\n    /// network interface that currently exists when a connection is established.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # fn doc() -> Result<(), wreq::Error> {\n    /// let interface = \"lo\";\n    /// let client = wreq::Client::builder()\n    ///     .interface(interface)\n    ///     .build()?;\n    /// # Ok(())\n    /// # }\n    /// ```\n    ///\n    /// [man-7-socket]: https://man7.org/linux/man-pages/man7/socket.7.html\n    /// [man-7p-ip]: https://docs.oracle.com/cd/E86824_01/html/E54777/ip-7p.html\n    #[inline]\n    #[cfg(any(\n        target_os = \"android\",\n        target_os = \"fuchsia\",\n        target_os = \"illumos\",\n        target_os = \"ios\",\n        target_os = \"linux\",\n        target_os = \"macos\",\n        target_os = \"solaris\",\n        target_os = \"tvos\",\n        target_os = \"visionos\",\n        target_os = \"watchos\",\n    ))]\n    #[cfg_attr(\n        docsrs,\n        doc(cfg(any(\n            target_os = \"android\",\n            target_os = \"fuchsia\",\n            target_os = \"illumos\",\n            target_os = \"ios\",\n            target_os = \"linux\",\n            target_os = \"macos\",\n            target_os = \"solaris\",\n            target_os = \"tvos\",\n            target_os = \"visionos\",\n            target_os = \"watchos\",\n        )))\n    )]\n    pub fn interface<T>(mut self, interface: T) -> ClientBuilder\n    where\n        T: Into<std::borrow::Cow<'static, str>>,\n    {\n        self.config.socket_bind_options.set_interface(interface);\n        self\n    }\n\n    // TLS options\n\n    /// Sets the identity to be used for client certificate authentication.\n    #[inline]\n    pub fn tls_identity(mut self, identity: Identity) -> ClientBuilder {\n        self.config.tls_identity = Some(identity);\n        self\n    }\n\n    /// Sets the verify certificate store for the client.\n    ///\n    /// This method allows you to specify a custom verify certificate store to be used\n    /// for TLS connections. By default, the system's verify certificate store is used.\n    #[inline]\n    pub fn tls_cert_store(mut self, store: CertStore) -> ClientBuilder {\n        self.config.tls_cert_store = store;\n        self\n    }\n\n    /// Controls the use of certificate validation.\n    ///\n    /// Defaults to `true`.\n    ///\n    /// # Warning\n    ///\n    /// You should think very carefully before using this method. If\n    /// invalid certificates are trusted, *any* certificate for *any* site\n    /// will be trusted for use. This includes expired certificates. This\n    /// introduces significant vulnerabilities, and should only be used\n    /// as a last resort.\n    #[inline]\n    pub fn tls_cert_verification(mut self, cert_verification: bool) -> ClientBuilder {\n        self.config.tls_cert_verification = cert_verification;\n        self\n    }\n\n    /// Configures the use of hostname verification when connecting.\n    ///\n    /// Defaults to `true`.\n    /// # Warning\n    ///\n    /// You should think very carefully before you use this method. If hostname verification is not\n    /// used, *any* valid certificate for *any* site will be trusted for use from any other. This\n    /// introduces a significant vulnerability to man-in-the-middle attacks.\n    #[inline]\n    pub fn tls_verify_hostname(mut self, verify_hostname: bool) -> ClientBuilder {\n        self.config.tls_verify_hostname = verify_hostname;\n        self\n    }\n\n    /// Configures the use of Server Name Indication (SNI) when connecting.\n    ///\n    /// Defaults to `true`.\n    #[inline]\n    pub fn tls_sni(mut self, tls_sni: bool) -> ClientBuilder {\n        self.config.tls_sni = tls_sni;\n        self\n    }\n\n    /// Configures TLS key logging for the client.\n    #[inline]\n    pub fn tls_keylog(mut self, keylog: KeyLog) -> ClientBuilder {\n        self.config.tls_keylog = Some(keylog);\n        self\n    }\n\n    /// Set the minimum required TLS version for connections.\n    ///\n    /// By default the TLS backend's own default is used.\n    #[inline]\n    pub fn tls_min_version(mut self, version: TlsVersion) -> ClientBuilder {\n        self.config.tls_min_version = Some(version);\n        self\n    }\n\n    /// Set the maximum allowed TLS version for connections.\n    ///\n    /// By default there's no maximum.\n    #[inline]\n    pub fn tls_max_version(mut self, version: TlsVersion) -> ClientBuilder {\n        self.config.tls_max_version = Some(version);\n        self\n    }\n\n    /// Add TLS information as `TlsInfo` extension to responses.\n    ///\n    /// # Optional\n    ///\n    /// feature to be enabled.\n    #[inline]\n    pub fn tls_info(mut self, tls_info: bool) -> ClientBuilder {\n        self.config.tls_info = tls_info;\n        self\n    }\n\n    /// Sets the TLS session cache.\n    ///\n    /// By default, an in-memory LRU cache is used. Use this method to provide\n    /// a custom [`TlsSessionCache`] implementation (e.g., file-based or distributed).\n    #[inline]\n    pub fn tls_session_cache<S: IntoTlsSessionCache>(mut self, store: S) -> ClientBuilder {\n        self.config.tls_session_cache = Some(store.into_shared());\n        self\n    }\n\n    /// Sets the TLS options for the client.\n    #[inline]\n    pub fn tls_options<T>(mut self, options: T) -> ClientBuilder\n    where\n        T: Into<Option<TlsOptions>>,\n    {\n        self.config.tls_options = options.into();\n        self\n    }\n\n    // DNS options\n\n    /// Disables the hickory-dns async resolver.\n    ///\n    /// This method exists even if the optional `hickory-dns` feature is not enabled.\n    /// This can be used to ensure a `Client` doesn't use the hickory-dns async resolver\n    /// even if another dependency were to enable the optional `hickory-dns` feature.\n    #[inline]\n    #[cfg(feature = \"hickory-dns\")]\n    #[cfg_attr(docsrs, doc(cfg(feature = \"hickory-dns\")))]\n    pub fn no_hickory_dns(mut self) -> ClientBuilder {\n        self.config.hickory_dns = false;\n        self\n    }\n\n    /// Override DNS resolution for specific domains to a particular IP address.\n    ///\n    /// Warning\n    ///\n    /// Since the DNS protocol has no notion of ports, if you wish to send\n    /// traffic to a particular port you must include this port in the URI\n    /// itself, any port in the overridden addr will be ignored and traffic sent\n    /// to the conventional port for the given scheme (e.g. 80 for http).\n    #[inline]\n    pub fn resolve<D>(self, domain: D, addr: SocketAddr) -> ClientBuilder\n    where\n        D: Into<Cow<'static, str>>,\n    {\n        self.resolve_to_addrs(domain, std::iter::once(addr))\n    }\n\n    /// Override DNS resolution for specific domains to particular IP addresses.\n    ///\n    /// Warning\n    ///\n    /// Since the DNS protocol has no notion of ports, if you wish to send\n    /// traffic to a particular port you must include this port in the URI\n    /// itself, any port in the overridden addresses will be ignored and traffic sent\n    /// to the conventional port for the given scheme (e.g. 80 for http).\n    #[inline]\n    pub fn resolve_to_addrs<D, A>(mut self, domain: D, addrs: A) -> ClientBuilder\n    where\n        D: Into<Cow<'static, str>>,\n        A: IntoIterator<Item = SocketAddr>,\n    {\n        self.config\n            .dns_overrides\n            .insert(domain.into(), addrs.into_iter().collect());\n        self\n    }\n\n    /// Override the DNS resolver implementation.\n    ///\n    /// Pass any type implementing `IntoResolve`.\n    /// Overrides for specific names passed to `resolve` and `resolve_to_addrs` will\n    /// still be applied on top of this resolver.\n    #[inline]\n    pub fn dns_resolver<R: IntoResolve>(mut self, resolver: R) -> ClientBuilder {\n        self.config.dns_resolver = Some(resolver.into_shared());\n        self\n    }\n\n    // Tower middleware options\n\n    /// Adds a new Tower [`Layer`](https://docs.rs/tower/latest/tower/trait.Layer.html) to the\n    /// request [`Service`](https://docs.rs/tower/latest/tower/trait.Service.html) which is responsible\n    /// for request processing.\n    ///\n    /// Each subsequent invocation of this function will wrap previous layers.\n    ///\n    /// If configured, the `timeout` will be the outermost layer.\n    ///\n    /// Example usage:\n    /// ```\n    /// use std::time::Duration;\n    ///\n    /// let client = wreq::Client::builder()\n    ///     .timeout(Duration::from_millis(200))\n    ///     .layer(tower::timeout::TimeoutLayer::new(Duration::from_millis(50)))\n    ///     .build()\n    ///     .unwrap();\n    /// ```\n    #[inline]\n    pub fn layer<L>(mut self, layer: L) -> ClientBuilder\n    where\n        L: Layer<BoxedClientService> + Clone + Send + Sync + 'static,\n        L::Service: Service<http::Request<Body>, Response = http::Response<ResponseBody>, Error = BoxError>\n            + Clone\n            + Send\n            + Sync\n            + 'static,\n        <L::Service as Service<http::Request<Body>>>::Future: Send + 'static,\n    {\n        let layer = BoxCloneSyncServiceLayer::new(layer);\n        self.config.layers.push(layer);\n        self\n    }\n\n    /// Adds a new Tower [`Layer`](https://docs.rs/tower/latest/tower/trait.Layer.html) to the\n    /// base connector [`Service`](https://docs.rs/tower/latest/tower/trait.Service.html) which\n    /// is responsible for connection establishment.a\n    ///\n    /// Each subsequent invocation of this function will wrap previous layers.\n    ///\n    /// If configured, the `connect_timeout` will be the outermost layer.\n    ///\n    /// Example usage:\n    /// ```\n    /// use std::time::Duration;\n    ///\n    /// let client = wreq::Client::builder()\n    ///     // resolved to outermost layer, meaning while we are waiting on concurrency limit\n    ///     .connect_timeout(Duration::from_millis(200))\n    ///     // underneath the concurrency check, so only after concurrency limit lets us through\n    ///     .connector_layer(tower::timeout::TimeoutLayer::new(Duration::from_millis(50)))\n    ///     .connector_layer(tower::limit::concurrency::ConcurrencyLimitLayer::new(2))\n    ///     .build()\n    ///     .unwrap();\n    /// ```\n    #[inline]\n    pub fn connector_layer<L>(mut self, layer: L) -> ClientBuilder\n    where\n        L: Layer<BoxedConnectorService> + Clone + Send + Sync + 'static,\n        L::Service:\n            Service<Unnameable, Response = Conn, Error = BoxError> + Clone + Send + Sync + 'static,\n        <L::Service as Service<Unnameable>>::Future: Send + 'static,\n    {\n        let layer = BoxCloneSyncServiceLayer::new(layer);\n        self.config.connector_layers.push(layer);\n        self\n    }\n\n    // TLS/HTTP2 emulation options\n\n    /// Configures the client builder to emulation the specified HTTP context.\n    ///\n    /// This method sets the necessary headers, HTTP/1 and HTTP/2 options configurations, and  TLS\n    /// options config to use the specified HTTP context. It allows the client to mimic the\n    /// behavior of different versions or setups, which can be useful for testing or ensuring\n    /// compatibility with various environments.\n    ///\n    /// # Note\n    /// This will overwrite the existing configuration.\n    /// You must set emulation before you can perform subsequent HTTP1/HTTP2/TLS fine-tuning.\n    #[inline]\n    pub fn emulation<T: IntoEmulation>(self, emulation: T) -> ClientBuilder {\n        let emulation = emulation.into_emulation();\n        self.tls_options(emulation.tls_options)\n            .http1_options(emulation.http1_options)\n            .http2_options(emulation.http2_options)\n            .default_headers(emulation.headers)\n            .orig_headers(emulation.orig_headers)\n    }\n}\n"
  },
  {
    "path": "src/config.rs",
    "content": "//! The `config` module provides a generic mechanism for loading and managing\n//! request-scoped configuration.\n//!\n//! # Design Overview\n//!\n//! This module is centered around two abstractions:\n//!\n//! - The [`RequestConfigValue`] trait, used to associate a config key type with its value type.\n//! - The [`RequestConfig`] struct, which wraps an optional value of the type linked via\n//!   [`RequestConfigValue`].\n//!\n//! Under the hood, the [`RequestConfig`] struct holds a single value for the associated config\n//! type. This value can be conveniently accessed, inserted, or mutated using [`http::Extensions`],\n//! enabling type-safe configuration storage and retrieval on a per-request basis.\n//!\n//! # Motivation\n//!\n//! The key design benefit is the ability to store multiple config types—potentially even with the\n//! same value type (e.g., [`std::time::Duration`])—without code duplication or ambiguity. By\n//! leveraging trait association, each config key is distinct at the type level, while code for\n//! storage and access remains totally generic.\n//!\n//! # Usage\n//!\n//! Implement [`RequestConfigValue`] for any marker type you wish to use as a config key,\n//! specifying the associated value type. Then use [`RequestConfig<T>`] in [`Extensions`]\n//! to set or retrieve config values for each key type in a uniform way.\n\nuse http::Extensions;\n\n/// Associate a marker key type with its associated value type stored in [`http::Extensions`].\n/// Implement this trait for unit/marker types to declare the concrete `Value` used for that key.\npub(crate) trait RequestConfigValue: Clone + 'static {\n    type Value: Clone + Send + Sync + 'static;\n}\n\n/// Typed wrapper that holds an optional configuration value for a given marker key `T`.\n/// Instances of [`RequestConfig<T>`] are intended to be inserted into [`http::Extensions`].\n#[derive(Clone, Copy)]\npub(crate) struct RequestConfig<T: RequestConfigValue>(Option<T::Value>);\n\nimpl<T: RequestConfigValue> Default for RequestConfig<T> {\n    #[inline]\n    fn default() -> Self {\n        RequestConfig(None)\n    }\n}\n\nimpl<T> RequestConfig<T>\nwhere\n    T: RequestConfigValue,\n{\n    /// Creates a new `RequestConfig` with the provided value.\n    #[inline]\n    pub(crate) const fn new(v: Option<T::Value>) -> Self {\n        RequestConfig(v)\n    }\n\n    /// Returns a reference to the inner value of this request-scoped configuration.\n    #[inline]\n    pub(crate) const fn as_ref(&self) -> Option<&T::Value> {\n        self.0.as_ref()\n    }\n\n    /// Retrieve the value from the request-scoped configuration.\n    ///\n    /// If the request specifies a value, use that value; otherwise, attempt to retrieve it from the\n    /// current instance (typically a client instance).\n    #[inline]\n    pub(crate) fn fetch<'a>(&'a self, ext: &'a Extensions) -> Option<&'a T::Value> {\n        ext.get::<RequestConfig<T>>()\n            .and_then(Self::as_ref)\n            .or(self.as_ref())\n    }\n\n    /// Stores this value into the given [`http::Extensions`], if a value of the same type is not\n    /// already present.\n    ///\n    /// This method checks whether the provided [`http::Extensions`] contains a\n    /// [`RequestConfig<T>`]. If not, it clones the current value and inserts it into the\n    /// extensions. If a value already exists, the method does nothing.\n    #[inline]\n    pub(crate) fn store<'a>(&'a self, ext: &'a mut Extensions) -> &'a mut Option<T::Value> {\n        &mut ext.get_or_insert_with(|| self.clone()).0\n    }\n\n    /// Loads the internal value from the provided [`http::Extensions`], if present.\n    ///\n    /// This method attempts to remove a value of type [`RequestConfig<T>`] from the provided\n    /// [`http::Extensions`]. If such a value exists, the current internal value is replaced with\n    /// the removed value. If not, the internal value remains unchanged.\n    #[inline]\n    pub(crate) fn load(&mut self, ext: &mut Extensions) -> Option<&T::Value> {\n        if let Some(value) = RequestConfig::<T>::remove(ext) {\n            self.0.replace(value);\n        }\n        self.as_ref()\n    }\n\n    /// Returns an immutable reference to the stored value from the given [`http::Extensions`], if\n    /// present.\n    ///\n    /// Internally fetches [`RequestConfig<T>`] and returns a reference to its inner value, if set.\n    #[inline]\n    pub(crate) fn get(ext: &Extensions) -> Option<&T::Value> {\n        ext.get::<RequestConfig<T>>()?.0.as_ref()\n    }\n\n    /// Returns a mutable reference to the inner value in [`http::Extensions`], inserting a default\n    /// if missing.\n    ///\n    /// This ensures a [`RequestConfig<T>`] exists and returns a mutable reference to its inner\n    /// `Option<T::Value>`.\n    #[inline]\n    pub(crate) fn get_mut(ext: &mut Extensions) -> &mut Option<T::Value> {\n        &mut ext.get_or_insert_default::<RequestConfig<T>>().0\n    }\n\n    /// Removes and returns the stored value from the given [`http::Extensions`], if present.\n    ///\n    /// This consumes the [`RequestConfig<T>`] entry and extracts its inner value.\n    #[inline]\n    pub(crate) fn remove(ext: &mut Extensions) -> Option<T::Value> {\n        ext.remove::<RequestConfig<T>>()?.0\n    }\n}\n\n/// Implements [`RequestConfigValue`] for a given type.\nmacro_rules! impl_request_config_value {\n    ($type:ty) => {\n        impl crate::config::RequestConfigValue for $type {\n            type Value = Self;\n        }\n    };\n    ($type:ty, $value:ty) => {\n        impl crate::config::RequestConfigValue for $type {\n            type Value = $value;\n        }\n    };\n}\n"
  },
  {
    "path": "src/cookie.rs",
    "content": "//! HTTP Cookies\n\nuse std::{collections::HashMap, convert::TryInto, fmt, sync::Arc, time::SystemTime};\n\nuse bytes::Bytes;\nuse cookie::{Cookie as RawCookie, CookieJar, Expiration, SameSite, time::Duration};\nuse http::{Uri, Version};\n\nuse crate::{IntoUri, error::Error, ext::UriExt, header::HeaderValue, sync::RwLock};\n\n/// Cookie header values in two forms.\n#[derive(Debug, Clone)]\n#[non_exhaustive]\npub enum Cookies {\n    /// All cookies combined into one header (compressed).\n    Compressed(HeaderValue),\n\n    /// Each cookie sent as its own header (uncompressed).\n    Uncompressed(Vec<HeaderValue>),\n\n    /// No cookies.\n    Empty,\n}\n\n/// Actions for a persistent cookie store providing session support.\npub trait CookieStore: Send + Sync {\n    /// Store a set of Set-Cookie header values received from `uri`\n    fn set_cookies(&self, cookie_headers: &mut dyn Iterator<Item = &HeaderValue>, uri: &Uri);\n\n    /// Returns cookies for the given URI and HTTP version.\n    ///\n    /// Following [RFC 9112 §5.6.3], HTTP/1.1 combines all cookies into a single header.\n    /// For [HTTP/2] and above, cookies are sent as separate header fields\n    /// as per [RFC 9113 §8.1.2.5].\n    ///\n    /// [RFC 9112 §5.6.3]: https://www.rfc-editor.org/rfc/rfc9112#section-5.6.3\n    /// [RFC 9113 §8.1.2.5]: https://www.rfc-editor.org/rfc/rfc9113#section-8.1.2.5\n    /// [HTTP/2]: https://datatracker.ietf.org/doc/html/rfc9113\n    fn cookies(&self, uri: &Uri, version: Version) -> Cookies;\n}\n\nimpl_into_shared!(\n    /// Trait for converting types into a shared cookie store ([`Arc<dyn CookieStore>`]).\n    ///\n    /// Implemented for any [`CookieStore`] type, [`Arc<T>`] where `T: CookieStore`, and [`Arc<dyn\n    /// CookieStore>`]. Enables ergonomic conversion to a trait object for use in APIs without manual\n    /// boxing.\n    pub trait IntoCookieStore => CookieStore\n);\n\nimpl_request_config_value!(Arc<dyn CookieStore>);\n\n/// Trait for converting types into an owned cookie ([`Cookie<'static>`]).\npub trait IntoCookie {\n    /// Converts the implementor into a optional owned [`Cookie<'static>`].\n    fn into_cookie(self) -> Option<Cookie<'static>>;\n}\n\n/// A single HTTP cookie.\n#[derive(Debug, Clone)]\npub struct Cookie<'a>(RawCookie<'a>);\n\n/// A good default `CookieStore` implementation.\n///\n/// This is the implementation used when simply calling `cookie_store(true)`.\n/// This type is exposed to allow creating one and filling it with some\n/// existing cookies more easily, before creating a [`crate::Client`].\n#[derive(Debug, Default)]\npub struct Jar(RwLock<HashMap<String, HashMap<String, CookieJar>>>);\n\n// ===== impl IntoCookie =====\n\nimpl IntoCookie for Cookie<'_> {\n    #[inline]\n    fn into_cookie(self) -> Option<Cookie<'static>> {\n        Some(self.into_owned())\n    }\n}\n\nimpl IntoCookie for RawCookie<'_> {\n    #[inline]\n    fn into_cookie(self) -> Option<Cookie<'static>> {\n        Some(Cookie(self.into_owned()))\n    }\n}\n\nimpl IntoCookie for &str {\n    #[inline]\n    fn into_cookie(self) -> Option<Cookie<'static>> {\n        RawCookie::parse(self).map(|c| Cookie(c.into_owned())).ok()\n    }\n}\n\n// ===== impl Cookie =====\n\nimpl<'a> Cookie<'a> {\n    pub(crate) fn parse(value: &'a HeaderValue) -> crate::Result<Cookie<'a>> {\n        std::str::from_utf8(value.as_bytes())\n            .map_err(cookie::ParseError::from)\n            .and_then(cookie::Cookie::parse)\n            .map_err(Error::decode)\n            .map(Cookie)\n    }\n\n    /// The name of the cookie.\n    #[inline]\n    pub fn name(&self) -> &str {\n        self.0.name()\n    }\n\n    /// The value of the cookie.\n    #[inline]\n    pub fn value(&self) -> &str {\n        self.0.value()\n    }\n\n    /// Returns true if the 'HttpOnly' directive is enabled.\n    #[inline]\n    pub fn http_only(&self) -> bool {\n        self.0.http_only().unwrap_or(false)\n    }\n\n    /// Returns true if the 'Secure' directive is enabled.\n    #[inline]\n    pub fn secure(&self) -> bool {\n        self.0.secure().unwrap_or(false)\n    }\n\n    /// Returns true if  'SameSite' directive is 'Lax'.\n    #[inline]\n    pub fn same_site_lax(&self) -> bool {\n        self.0.same_site() == Some(SameSite::Lax)\n    }\n\n    /// Returns true if  'SameSite' directive is 'Strict'.\n    #[inline]\n    pub fn same_site_strict(&self) -> bool {\n        self.0.same_site() == Some(SameSite::Strict)\n    }\n\n    /// Returns the path directive of the cookie, if set.\n    #[inline]\n    pub fn path(&self) -> Option<&str> {\n        self.0.path()\n    }\n\n    /// Returns the domain directive of the cookie, if set.\n    #[inline]\n    pub fn domain(&self) -> Option<&str> {\n        self.0.domain()\n    }\n\n    /// Get the Max-Age information.\n    #[inline]\n    pub fn max_age(&self) -> Option<std::time::Duration> {\n        self.0.max_age().and_then(|d| d.try_into().ok())\n    }\n\n    /// The cookie expiration time.\n    #[inline]\n    pub fn expires(&self) -> Option<SystemTime> {\n        match self.0.expires() {\n            Some(Expiration::DateTime(offset)) => Some(SystemTime::from(offset)),\n            None | Some(Expiration::Session) => None,\n        }\n    }\n\n    /// Converts `self` into a `Cookie` with a static lifetime with as few\n    /// allocations as possible.\n    #[inline]\n    pub fn into_owned(self) -> Cookie<'static> {\n        Cookie(self.0.into_owned())\n    }\n}\n\nimpl fmt::Display for Cookie<'_> {\n    #[inline]\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        self.0.fmt(f)\n    }\n}\n\nimpl<'c> From<RawCookie<'c>> for Cookie<'c> {\n    #[inline]\n    fn from(cookie: RawCookie<'c>) -> Cookie<'c> {\n        Cookie(cookie)\n    }\n}\n\nimpl<'c> From<Cookie<'c>> for RawCookie<'c> {\n    #[inline]\n    fn from(cookie: Cookie<'c>) -> RawCookie<'c> {\n        cookie.0\n    }\n}\n\n// ===== impl Jar =====\n\nmacro_rules! into_uri {\n    ($expr:expr) => {\n        match $expr.into_uri() {\n            Ok(u) => u,\n            Err(_) => return,\n        }\n    };\n}\n\nimpl Jar {\n    /// Get a cookie by name for a given Uri.\n    ///\n    /// Returns the cookie with the specified name for the domain and path\n    /// derived from the given Uri, if it exists.\n    ///\n    /// # Example\n    /// ```\n    /// use wreq::cookie::Jar;\n    /// let jar = Jar::default();\n    /// jar.add(\"foo=bar; Path=/foo; Domain=example.com\", \"http://example.com/foo\");\n    /// let cookie = jar.get(\"foo\", \"http://example.com/foo\").unwrap();\n    /// assert_eq!(cookie.value(), \"bar\");\n    /// ```\n    pub fn get<U: IntoUri>(&self, name: &str, uri: U) -> Option<Cookie<'static>> {\n        let uri = uri.into_uri().ok()?;\n        let host = normalize_domain(uri.host()?);\n        let cookie = self\n            .0\n            .read()\n            .get(host)?\n            .get(uri.path())?\n            .get(name)?\n            .clone()\n            .into_owned();\n        Some(Cookie(cookie))\n    }\n\n    /// Get all cookies in this jar.\n    ///\n    /// Returns an iterator over all cookies currently stored in the jar,\n    /// regardless of domain or path.\n    ///\n    /// # Example\n    /// ```\n    /// use wreq::cookie::Jar;\n    /// let jar = Jar::default();\n    /// jar.add(\"foo=bar; Domain=example.com\", \"http://example.com\");\n    /// for cookie in jar.get_all() {\n    ///     println!(\"{}={}\", cookie.name(), cookie.value());\n    /// }\n    /// ```\n    pub fn get_all(&self) -> impl Iterator<Item = Cookie<'static>> {\n        self.0\n            .read()\n            .iter()\n            .flat_map(|(domain, path_map)| {\n                path_map.iter().flat_map(|(path, name_map)| {\n                    name_map.iter().map(|cookie| {\n                        let mut cookie = cookie.clone().into_owned();\n\n                        if cookie.domain().is_none() {\n                            cookie.set_domain(domain.to_owned());\n                        }\n\n                        if cookie.path().is_none() {\n                            cookie.set_path(path.to_owned());\n                        }\n\n                        Cookie(cookie)\n                    })\n                })\n            })\n            .collect::<Vec<_>>()\n            .into_iter()\n    }\n\n    /// Add a cookie to this jar.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// use wreq::cookie::Jar;\n    /// use cookie::CookieBuilder;\n    /// let jar = Jar::default();\n    /// let cookie = CookieBuilder::new(\"foo\", \"bar\")\n    ///     .domain(\"example.com\")\n    ///     .path(\"/\")\n    ///     .build();\n    /// jar.add(cookie, \"http://example.com\");\n    ///\n    /// let cookie = CookieBuilder::new(\"foo\", \"bar\")\n    ///     .domain(\"example.com\")\n    ///     .path(\"/\")\n    ///     .build();\n    /// jar.add(cookie, \"http://example.com\");\n    /// ```\n    pub fn add<C, U>(&self, cookie: C, uri: U)\n    where\n        C: IntoCookie,\n        U: IntoUri,\n    {\n        if let Some(cookie) = cookie.into_cookie() {\n            let uri = into_uri!(uri);\n            let mut cookie: RawCookie<'static> = cookie.into();\n\n            // If the request-uri contains no host component:\n            let Some(host) = uri.host() else {\n                return;\n            };\n\n            // If the canonicalized request-host does not domain-match the\n            // domain-attribute:\n            //    Ignore the cookie entirely and abort these steps.\n            //\n            // RFC 6265 §5.3 + §5.1.3:\n            // https://datatracker.ietf.org/doc/html/rfc6265#section-5.3\n            // https://datatracker.ietf.org/doc/html/rfc6265#section-5.1.3\n            let domain = if let Some(domain) = cookie.domain() {\n                let domain = normalize_domain(domain);\n                if domain.is_empty() || !domain_match(normalize_domain(host), domain) {\n                    return;\n                }\n                domain\n            } else {\n                normalize_domain(host)\n            };\n\n            // If the request-uri contains no path component or if the first character of the\n            // path component of the request-uri is not a %x2F (\"/\") OR if the cookie's path-\n            // attribute is missing or does not start with a %x2F (\"/\"):\n            //    Let cookie-path be the default-path of the request-uri.\n            // Otherwise:\n            //    Let cookie-path be the substring of the request-uri's path from the first\n            // character    up to, not including, the right-most %x2F (\"/\").\n            //\n            // RFC 6265 §5.2.4 + §5.1.4:\n            // https://datatracker.ietf.org/doc/html/rfc6265#section-5.2.4\n            // https://datatracker.ietf.org/doc/html/rfc6265#section-5.1.4\n            let path = cookie\n                .path()\n                .filter(|path| path.starts_with(DEFAULT_PATH))\n                .unwrap_or_else(|| normalize_path(uri.path()));\n\n            let mut inner = self.0.write();\n            let name_map = inner\n                .entry(domain.to_owned())\n                .or_default()\n                .entry(path.to_owned())\n                .or_default();\n\n            // RFC 6265: If Max-Age=0 or Expires in the past, remove the cookie\n            let expired = cookie\n                .expires_datetime()\n                .is_some_and(|dt| dt <= SystemTime::now())\n                || cookie.max_age().is_some_and(Duration::is_zero);\n\n            if expired {\n                name_map.remove(cookie);\n            } else {\n                cookie.set_path(path.to_owned());\n                name_map.add(cookie);\n            }\n        }\n    }\n\n    /// Remove a cookie by name for a given Uri.\n    ///\n    /// Removes the cookie with the specified name for the domain and path\n    /// derived from the given Uri, if it exists.\n    ///\n    /// # Example\n    /// ```\n    /// use wreq::cookie::Jar;\n    /// let jar = Jar::default();\n    /// jar.add(\"foo=bar; Path=/foo; Domain=example.com\", \"http://example.com/foo\");\n    /// assert!(jar.get(\"foo\", \"http://example.com/foo\").is_some());\n    /// jar.remove(\"foo\", \"http://example.com/foo\");\n    /// assert!(jar.get(\"foo\", \"http://example.com/foo\").is_none());\n    /// ```\n    pub fn remove<C, U>(&self, cookie: C, uri: U)\n    where\n        C: Into<RawCookie<'static>>,\n        U: IntoUri,\n    {\n        let uri = into_uri!(uri);\n        if let Some(host) = uri.host() {\n            let host = normalize_domain(host);\n            let mut inner = self.0.write();\n            if let Some(path_map) = inner.get_mut(host) {\n                if let Some(name_map) = path_map.get_mut(uri.path()) {\n                    name_map.remove(cookie.into());\n                }\n            }\n        }\n    }\n\n    /// Clear all cookies from this jar.\n    ///\n    /// Removes all cookies from the jar, leaving it empty.\n    ///\n    /// # Example\n    /// ```\n    /// use wreq::cookie::Jar;\n    /// let jar = Jar::default();\n    /// jar.add(\"foo=bar; Domain=example.com\", \"http://example.com\");\n    /// assert_eq!(jar.get_all().count(), 1);\n    /// jar.clear();\n    /// assert_eq!(jar.get_all().count(), 0);\n    /// ```\n    pub fn clear(&self) {\n        self.0.write().clear();\n    }\n}\n\nimpl CookieStore for Jar {\n    fn set_cookies(&self, cookie_headers: &mut dyn Iterator<Item = &HeaderValue>, uri: &Uri) {\n        let cookies = cookie_headers\n            .map(Cookie::parse)\n            .filter_map(Result::ok)\n            .map(|cookie| cookie.0.into_owned());\n\n        for cookie in cookies {\n            self.add(cookie, uri);\n        }\n    }\n\n    fn cookies(&self, uri: &Uri, version: Version) -> Cookies {\n        let host = match uri.host() {\n            Some(h) => normalize_domain(h),\n            None => return Cookies::Empty,\n        };\n\n        let store = self.0.read();\n        let iter = store\n            .iter()\n            .filter(|(domain, _)| domain_match(host, domain))\n            .flat_map(|(_, path_map)| {\n                path_map\n                    .iter()\n                    .filter(|(path, _)| path_match(uri.path(), path))\n                    .flat_map(|(_, name_map)| {\n                        name_map.iter().filter(|cookie| {\n                            if cookie.secure() == Some(true) && uri.is_http() {\n                                return false;\n                            }\n\n                            if cookie\n                                .expires_datetime()\n                                .is_some_and(|dt| dt <= SystemTime::now())\n                            {\n                                return false;\n                            }\n\n                            true\n                        })\n                    })\n            });\n\n        if matches!(version, Version::HTTP_2 | Version::HTTP_3) {\n            let cookies = iter\n                .map(|cookie| {\n                    let name = cookie.name();\n                    let value = cookie.value();\n\n                    let mut cookie_str = String::with_capacity(name.len() + 1 + value.len());\n                    cookie_str.push_str(name);\n                    cookie_str.push('=');\n                    cookie_str.push_str(value);\n\n                    HeaderValue::from_maybe_shared(Bytes::from(cookie_str))\n                })\n                .filter_map(Result::ok)\n                .collect();\n\n            Cookies::Uncompressed(cookies)\n        } else {\n            let cookies = iter.fold(String::new(), |mut cookies, cookie| {\n                if !cookies.is_empty() {\n                    cookies.push_str(\"; \");\n                }\n                cookies.push_str(cookie.name());\n                cookies.push('=');\n                cookies.push_str(cookie.value());\n                cookies\n            });\n\n            if cookies.is_empty() {\n                return Cookies::Empty;\n            }\n\n            HeaderValue::from_maybe_shared(Bytes::from(cookies))\n                .map(Cookies::Compressed)\n                .unwrap_or(Cookies::Empty)\n        }\n    }\n}\n\nconst DEFAULT_PATH: &str = \"/\";\n\n/// Determines if the given `host` matches the cookie `domain` according to\n/// [RFC 6265 section 5.1.3](https://datatracker.ietf.org/doc/html/rfc6265#section-5.1.3).\n///\n/// - Returns true if the host and domain are identical.\n/// - Returns true if the host is a subdomain of the domain (host ends with \".domain\").\n/// - Returns false otherwise.\nfn domain_match(host: &str, domain: &str) -> bool {\n    if domain.is_empty() {\n        return false;\n    }\n    if host == domain {\n        return true;\n    }\n    host.len() > domain.len()\n        && host.as_bytes()[host.len() - domain.len() - 1] == b'.'\n        && host.ends_with(domain)\n}\n\n/// Determines if the request path matches the cookie path according to\n/// [RFC 6265 section 5.1.4](https://datatracker.ietf.org/doc/html/rfc6265#section-5.1.4).\n///\n/// - Returns true if the request path and cookie path are identical.\n/// - Returns true if the request path starts with the cookie path, and\n///   - the cookie path ends with '/', or\n///   - the next character in the request path after the cookie path is '/'.\n/// - Returns false otherwise.\nfn path_match(req_path: &str, cookie_path: &str) -> bool {\n    req_path == cookie_path\n        || req_path.starts_with(cookie_path)\n            && (cookie_path.ends_with(DEFAULT_PATH)\n                || req_path[cookie_path.len()..].starts_with(DEFAULT_PATH))\n}\n\n/// Normalizes a domain by stripping any port information.\n///\n/// According to [RFC 6265 section 5.2.3](https://datatracker.ietf.org/doc/html/rfc6265#section-5.2.3),\n/// the domain attribute of a cookie must not include a port. If a port is present (non-standard),\n/// it will be ignored for domain matching purposes.\nfn normalize_domain(domain: &str) -> &str {\n    let host_without_port = domain.split(':').next().unwrap_or(domain);\n    let without_leading = host_without_port\n        .strip_prefix(\".\")\n        .unwrap_or(host_without_port);\n    without_leading.strip_suffix(\".\").unwrap_or(without_leading)\n}\n\n/// Computes the normalized default path for a cookie as specified in\n/// [RFC 6265 section 5.1.4](https://datatracker.ietf.org/doc/html/rfc6265#section-5.1.4).\n///\n/// This function normalizes the path for a cookie, ensuring it matches\n/// browser and server expectations for default cookie scope.\nfn normalize_path(path: &str) -> &str {\n    if !path.starts_with(DEFAULT_PATH) {\n        return DEFAULT_PATH;\n    }\n    if let Some(pos) = path.rfind(DEFAULT_PATH) {\n        if pos == 0 {\n            return DEFAULT_PATH;\n        }\n        return &path[..pos];\n    }\n    DEFAULT_PATH\n}\n\n#[cfg(test)]\nmod tests {\n    use http::{Uri, Version};\n\n    use super::{CookieStore, Cookies, Jar};\n\n    #[test]\n    fn jar_get_all_backfills_domain_and_path() {\n        let jar = Jar::default();\n        jar.add(\"session=abc\", \"http://example.com/foo/bar\");\n\n        let cookies = jar.get_all().collect::<Vec<_>>();\n        assert_eq!(cookies.len(), 1);\n\n        let cookie = &cookies[0];\n        assert_eq!(cookie.name(), \"session\");\n        assert_eq!(cookie.value(), \"abc\");\n        assert_eq!(cookie.domain(), Some(\"example.com\"));\n        assert_eq!(cookie.path(), Some(\"/foo\"));\n    }\n\n    #[test]\n    fn jar_get_all_keeps_existing_domain_and_path() {\n        let jar = Jar::default();\n        jar.add(\n            \"session=abc; Domain=example.com; Path=/custom\",\n            \"http://example.com/foo/bar\",\n        );\n\n        let cookies = jar.get_all().collect::<Vec<_>>();\n        assert_eq!(cookies.len(), 1);\n\n        let cookie = &cookies[0];\n        assert_eq!(cookie.name(), \"session\");\n        assert_eq!(cookie.value(), \"abc\");\n        assert_eq!(cookie.domain(), Some(\"example.com\"));\n        assert_eq!(cookie.path(), Some(\"/custom\"));\n    }\n\n    #[test]\n    fn jar_get_all_backfills_only_missing_field() {\n        let jar = Jar::default();\n        jar.add(\"a=1; Domain=example.com\", \"http://example.com/foo/bar\");\n        jar.add(\"b=2; Path=/fixed\", \"http://example.com/foo/bar\");\n\n        let mut cookies = jar.get_all().collect::<Vec<_>>();\n        cookies.sort_by(|left, right| left.name().cmp(right.name()));\n\n        let a = &cookies[0];\n        assert_eq!(a.name(), \"a\");\n        assert_eq!(a.domain(), Some(\"example.com\"));\n        assert_eq!(a.path(), Some(\"/foo\"));\n\n        let b = &cookies[1];\n        assert_eq!(b.name(), \"b\");\n        assert_eq!(b.domain(), Some(\"example.com\"));\n        assert_eq!(b.path(), Some(\"/fixed\"));\n    }\n\n    #[test]\n    fn jar_add_rejects_mismatched_domain() {\n        let jar = Jar::default();\n        jar.add(\"session=abc; Domain=other.com\", \"http://example.com/foo\");\n\n        assert_eq!(jar.get_all().count(), 0);\n    }\n\n    #[test]\n    fn jar_add_accepts_matching_parent_domain() {\n        let jar = Jar::default();\n        jar.add(\n            \"session=abc; Domain=example.com\",\n            \"http://api.example.com/foo\",\n        );\n\n        let cookies = jar.get_all().collect::<Vec<_>>();\n        assert_eq!(cookies.len(), 1);\n        assert_eq!(cookies[0].domain(), Some(\"example.com\"));\n    }\n\n    #[test]\n    fn jar_get_all_export_import_keeps_effective_path() {\n        let source = Jar::default();\n        source.add(\"session=abc\", \"http://example.com/foo/bar\");\n\n        let exported = source.get_all().collect::<Vec<_>>();\n        assert_eq!(exported.len(), 1);\n        assert_eq!(exported[0].path(), Some(\"/foo\"));\n\n        let target = Jar::default();\n        for cookie in exported {\n            target.add(cookie, \"http://example.com/another/deeper\");\n        }\n\n        let imported = target.get_all().collect::<Vec<_>>();\n        assert_eq!(imported.len(), 1);\n        assert_eq!(imported[0].path(), Some(\"/foo\"));\n    }\n\n    #[test]\n    fn cookie_store_invalid_explicit_path_falls_back_to_default_path() {\n        let jar = Jar::default();\n        jar.add(\"key=val; Path=noslash\", \"http://example.com/foo/bar\");\n\n        assert!(jar.get(\"key\", \"http://example.com/foo\").is_some());\n        assert!(jar.get(\"key\", \"http://example.com/noslash\").is_none());\n\n        let cookies = jar.get_all().collect::<Vec<_>>();\n        assert_eq!(cookies.len(), 1);\n        assert_eq!(cookies[0].path(), Some(\"/foo\"));\n    }\n\n    #[test]\n    fn jar_sends_parent_domain_cookie_to_subdomain() {\n        let jar = Jar::default();\n        jar.add(\n            \"session=abc; Domain=example.com; Path=/\",\n            \"http://example.com/login\",\n        );\n\n        let should_receive = [\n            \"http://example.com/dashboard\",\n            \"http://api.example.com/dashboard\",\n            \"http://sub.api.example.com/dashboard\",\n        ];\n        for uri_str in &should_receive {\n            let uri = Uri::from_static(uri_str);\n            match jar.cookies(&uri, Version::HTTP_11) {\n                Cookies::Compressed(v) => assert_eq!(\n                    v.to_str().unwrap(),\n                    \"session=abc\",\n                    \"expected cookie to be sent to {uri_str}\"\n                ),\n                other => panic!(\"expected Compressed cookie for {uri_str}, got {other:?}\"),\n            }\n        }\n\n        let should_not_receive = [\n            \"http://notexample.com/dashboard\",\n            \"http://fakeexample.com/dashboard\",\n        ];\n        for uri_str in &should_not_receive {\n            let uri = Uri::from_static(uri_str);\n            assert!(\n                matches!(jar.cookies(&uri, Version::HTTP_11), Cookies::Empty),\n                \"cookie must NOT be sent to {uri_str}\"\n            );\n        }\n    }\n\n    #[test]\n    fn jar_subdomain_cookie_does_not_leak_to_parent_or_sibling() {\n        let jar = Jar::default();\n        jar.add(\n            \"token=xyz; Domain=api.example.com; Path=/\",\n            \"http://api.example.com/\",\n        );\n\n        let uri = Uri::from_static(\"http://api.example.com/\");\n        assert!(\n            matches!(jar.cookies(&uri, Version::HTTP_11), Cookies::Compressed(_)),\n            \"cookie must be sent to api.example.com\"\n        );\n\n        let must_not_receive = [\n            \"http://example.com/\",\n            \"http://other.example.com/\",\n            \"http://notapi.example.com/\",\n        ];\n        for uri_str in &must_not_receive {\n            let uri = Uri::from_static(uri_str);\n            assert!(\n                matches!(jar.cookies(&uri, Version::HTTP_11), Cookies::Empty),\n                \"cookie must NOT leak to {uri_str}\"\n            );\n        }\n    }\n}\n"
  },
  {
    "path": "src/dns/gai.rs",
    "content": "use std::{\n    future::Future,\n    io,\n    net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, ToSocketAddrs},\n    pin::Pin,\n    task::{self, Poll},\n    vec,\n};\n\nuse tokio::task::JoinHandle;\nuse tower::Service;\n\nuse super::{Addrs, Name, Resolve, Resolving};\n\n/// A resolver using blocking `getaddrinfo` calls in a threadpool.\n#[derive(Clone, Default)]\npub struct GaiResolver {\n    _priv: (),\n}\n\n/// An iterator of IP addresses returned from `getaddrinfo`.\npub struct GaiAddrs {\n    inner: SocketAddrs,\n}\n\n/// A future to resolve a name returned by `GaiResolver`.\npub struct GaiFuture {\n    inner: JoinHandle<Result<SocketAddrs, io::Error>>,\n}\n\n/// A wrapper around `SocketAddrs` to implement the `Iterator` trait.\npub(crate) struct SocketAddrs {\n    iter: vec::IntoIter<SocketAddr>,\n}\n\n// ==== impl GaiResolver ====\n\nimpl GaiResolver {\n    /// Creates a new [`GaiResolver`].\n    pub fn new() -> Self {\n        GaiResolver { _priv: () }\n    }\n}\n\nimpl Service<Name> for GaiResolver {\n    type Response = GaiAddrs;\n    type Error = io::Error;\n    type Future = GaiFuture;\n\n    fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll<Result<(), io::Error>> {\n        Poll::Ready(Ok(()))\n    }\n\n    fn call(&mut self, name: Name) -> Self::Future {\n        let blocking = tokio::task::spawn_blocking(move || {\n            debug!(\"resolving {}\", name);\n            (name.as_str(), 0)\n                .to_socket_addrs()\n                .map(|i| SocketAddrs { iter: i })\n        });\n\n        GaiFuture { inner: blocking }\n    }\n}\n\nimpl Resolve for GaiResolver {\n    fn resolve(&self, name: Name) -> Resolving {\n        let mut this = self.clone();\n        Box::pin(async move {\n            this.call(name)\n                .await\n                .map(|addrs| Box::new(addrs) as Addrs)\n                .map_err(Into::into)\n        })\n    }\n}\n\n// ==== impl GaiFuture ====\n\nimpl Future for GaiFuture {\n    type Output = Result<GaiAddrs, io::Error>;\n\n    fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {\n        Pin::new(&mut self.inner).poll(cx).map(|res| match res {\n            Ok(Ok(addrs)) => Ok(GaiAddrs { inner: addrs }),\n            Ok(Err(err)) => Err(err),\n            Err(join_err) => {\n                if join_err.is_cancelled() {\n                    Err(io::Error::new(io::ErrorKind::Interrupted, join_err))\n                } else {\n                    panic!(\"gai background task failed: {join_err:?}\")\n                }\n            }\n        })\n    }\n}\n\nimpl Drop for GaiFuture {\n    fn drop(&mut self) {\n        self.inner.abort();\n    }\n}\n\n// ==== impl GaiAddrs ====\n\nimpl Iterator for GaiAddrs {\n    type Item = SocketAddr;\n\n    fn next(&mut self) -> Option<Self::Item> {\n        self.inner.next()\n    }\n}\n\n// ==== impl SocketAddrs ====\n\nimpl SocketAddrs {\n    pub(crate) fn new(addrs: Vec<SocketAddr>) -> Self {\n        SocketAddrs {\n            iter: addrs.into_iter(),\n        }\n    }\n\n    pub(crate) fn try_parse(host: &str, port: u16) -> Option<SocketAddrs> {\n        if let Ok(addr) = host.parse::<Ipv4Addr>() {\n            let addr = SocketAddrV4::new(addr, port);\n            return Some(SocketAddrs {\n                iter: vec![SocketAddr::V4(addr)].into_iter(),\n            });\n        }\n        if let Ok(addr) = host.parse::<Ipv6Addr>() {\n            let addr = SocketAddrV6::new(addr, port, 0, 0);\n            return Some(SocketAddrs {\n                iter: vec![SocketAddr::V6(addr)].into_iter(),\n            });\n        }\n        None\n    }\n\n    #[inline]\n    fn filter(self, predicate: impl FnMut(&SocketAddr) -> bool) -> SocketAddrs {\n        SocketAddrs::new(self.iter.filter(predicate).collect())\n    }\n\n    pub(crate) fn split_by_preference(\n        self,\n        local_addr_ipv4: Option<Ipv4Addr>,\n        local_addr_ipv6: Option<Ipv6Addr>,\n    ) -> (SocketAddrs, SocketAddrs) {\n        match (local_addr_ipv4, local_addr_ipv6) {\n            (Some(_), None) => (self.filter(SocketAddr::is_ipv4), SocketAddrs::new(vec![])),\n            (None, Some(_)) => (self.filter(SocketAddr::is_ipv6), SocketAddrs::new(vec![])),\n            _ => {\n                let preferring_v6 = self\n                    .iter\n                    .as_slice()\n                    .first()\n                    .map(SocketAddr::is_ipv6)\n                    .unwrap_or(false);\n\n                let (preferred, fallback) = self\n                    .iter\n                    .partition::<Vec<_>, _>(|addr| addr.is_ipv6() == preferring_v6);\n\n                (SocketAddrs::new(preferred), SocketAddrs::new(fallback))\n            }\n        }\n    }\n\n    pub(crate) fn is_empty(&self) -> bool {\n        self.iter.as_slice().is_empty()\n    }\n\n    pub(crate) fn len(&self) -> usize {\n        self.iter.as_slice().len()\n    }\n}\n\nimpl Iterator for SocketAddrs {\n    type Item = SocketAddr;\n    #[inline]\n    fn next(&mut self) -> Option<SocketAddr> {\n        self.iter.next()\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::net::{Ipv4Addr, Ipv6Addr};\n\n    use super::*;\n\n    #[test]\n    fn test_ip_addrs_split_by_preference() {\n        let ip_v4 = Ipv4Addr::new(127, 0, 0, 1);\n        let ip_v6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1);\n        let v4_addr = (ip_v4, 80).into();\n        let v6_addr = (ip_v6, 80).into();\n\n        let (mut preferred, mut fallback) = SocketAddrs {\n            iter: vec![v4_addr, v6_addr].into_iter(),\n        }\n        .split_by_preference(None, None);\n        assert!(preferred.next().unwrap().is_ipv4());\n        assert!(fallback.next().unwrap().is_ipv6());\n\n        let (mut preferred, mut fallback) = SocketAddrs {\n            iter: vec![v6_addr, v4_addr].into_iter(),\n        }\n        .split_by_preference(None, None);\n        assert!(preferred.next().unwrap().is_ipv6());\n        assert!(fallback.next().unwrap().is_ipv4());\n\n        let (mut preferred, mut fallback) = SocketAddrs {\n            iter: vec![v4_addr, v6_addr].into_iter(),\n        }\n        .split_by_preference(Some(ip_v4), Some(ip_v6));\n        assert!(preferred.next().unwrap().is_ipv4());\n        assert!(fallback.next().unwrap().is_ipv6());\n\n        let (mut preferred, mut fallback) = SocketAddrs {\n            iter: vec![v6_addr, v4_addr].into_iter(),\n        }\n        .split_by_preference(Some(ip_v4), Some(ip_v6));\n        assert!(preferred.next().unwrap().is_ipv6());\n        assert!(fallback.next().unwrap().is_ipv4());\n\n        let (mut preferred, fallback) = SocketAddrs {\n            iter: vec![v4_addr, v6_addr].into_iter(),\n        }\n        .split_by_preference(Some(ip_v4), None);\n        assert!(preferred.next().unwrap().is_ipv4());\n        assert!(fallback.is_empty());\n\n        let (mut preferred, fallback) = SocketAddrs {\n            iter: vec![v4_addr, v6_addr].into_iter(),\n        }\n        .split_by_preference(None, Some(ip_v6));\n        assert!(preferred.next().unwrap().is_ipv6());\n        assert!(fallback.is_empty());\n    }\n\n    #[test]\n    fn test_name_from_str() {\n        const DOMAIN: &str = \"test.example.com\";\n        let name = Name::from(DOMAIN);\n        assert_eq!(name.as_str(), DOMAIN);\n        assert_eq!(name.to_string(), DOMAIN);\n    }\n}\n"
  },
  {
    "path": "src/dns/hickory.rs",
    "content": "//! DNS resolution via the [hickory-resolver](https://github.com/hickory-dns/hickory-dns) crate\n\nuse std::{net::SocketAddr, sync::LazyLock};\n\nuse hickory_resolver::{\n    TokioResolver,\n    config::{self, LookupIpStrategy, ResolverConfig},\n    net::runtime::TokioRuntimeProvider,\n};\n\nuse super::{Addrs, Name, Resolve, Resolving};\n\n/// Wrapper around an [`TokioResolver`], which implements the `Resolve` trait.\n#[derive(Debug, Clone)]\npub struct HickoryDnsResolver {\n    /// Shared, lazily-initialized Tokio-based DNS resolver.\n    ///\n    /// Backed by [`LazyLock`] to guarantee thread-safe, one-time creation.\n    /// On initialization, it attempts to load the system's DNS configuration;\n    /// if unavailable, it falls back to sensible default settings.\n    resolver: &'static LazyLock<TokioResolver>,\n}\n\nimpl HickoryDnsResolver {\n    /// Create a new resolver with the default configuration,\n    /// which reads from `/etc/resolve.conf`. The options are\n    /// overridden to look up both IPv4 and IPv6 addresses\n    /// to support the \"happy eyeballs\" algorithm.\n    ///\n    /// SAFETY: `build` only fails if DNS-over-TLS is enabled and default TLS config creation fails.\n    pub fn new() -> HickoryDnsResolver {\n        static RESOLVER: LazyLock<TokioResolver> = LazyLock::new(|| {\n            let mut builder = match TokioResolver::builder_tokio() {\n                Ok(resolver) => {\n                    debug!(\"using system DNS configuration\");\n                    resolver\n                }\n                Err(_err) => {\n                    debug!(\"error reading DNS system conf: {}, using defaults\", _err);\n                    TokioResolver::builder_with_config(\n                        ResolverConfig::udp_and_tcp(&config::GOOGLE),\n                        TokioRuntimeProvider::default(),\n                    )\n                }\n            };\n            builder.options_mut().ip_strategy = LookupIpStrategy::Ipv4AndIpv6;\n            builder.build().expect(\"failed to create DNS resolver\")\n        });\n\n        HickoryDnsResolver {\n            resolver: &RESOLVER,\n        }\n    }\n}\n\nimpl Resolve for HickoryDnsResolver {\n    fn resolve(&self, name: Name) -> Resolving {\n        let resolver = self.clone();\n        Box::pin(async move {\n            let lookup = resolver.resolver.lookup_ip(name.as_str()).await?;\n            let addrs: Addrs = Box::new(\n                lookup\n                    .iter()\n                    .map(|ip_addr| SocketAddr::new(ip_addr, 0))\n                    .collect::<Vec<_>>()\n                    .into_iter(),\n            );\n            Ok(addrs)\n        })\n    }\n}\n"
  },
  {
    "path": "src/dns/resolve.rs",
    "content": "use std::{\n    borrow::Cow,\n    collections::HashMap,\n    fmt,\n    future::Future,\n    net::SocketAddr,\n    pin::Pin,\n    sync::Arc,\n    task::{Context, Poll},\n};\n\nuse tower::{BoxError, Service};\n\n/// A domain name to resolve into IP addresses.\n#[derive(Clone, Hash, Eq, PartialEq)]\npub struct Name {\n    host: Box<str>,\n}\n\nimpl Name {\n    /// Creates a new [`Name`] from a string slice.\n    #[inline]\n    pub fn new(host: Box<str>) -> Name {\n        Name { host }\n    }\n\n    /// View the hostname as a string slice.\n    #[inline]\n    pub fn as_str(&self) -> &str {\n        &self.host\n    }\n}\n\nimpl From<&str> for Name {\n    fn from(value: &str) -> Self {\n        Name::new(value.into())\n    }\n}\n\nimpl fmt::Debug for Name {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        fmt::Debug::fmt(&self.host, f)\n    }\n}\n\nimpl fmt::Display for Name {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        fmt::Display::fmt(&self.host, f)\n    }\n}\n\n/// Alias for an `Iterator` trait object over `SocketAddr`.\npub type Addrs = Box<dyn Iterator<Item = SocketAddr> + Send>;\n\n/// Alias for the `Future` type returned by a DNS resolver.\npub type Resolving = Pin<Box<dyn Future<Output = Result<Addrs, BoxError>> + Send>>;\n\n/// Trait for customizing DNS resolution in wreq.\npub trait Resolve: Send + Sync {\n    /// Performs DNS resolution on a `Name`.\n    /// The return type is a future containing an iterator of `SocketAddr`.\n    ///\n    /// It differs from `tower::Service<Name>` in several ways:\n    ///  * It is assumed that `resolve` will always be ready to poll.\n    ///  * It does not need a mutable reference to `self`.\n    ///  * Since trait objects cannot make use of associated types, it requires wrapping the\n    ///    returned `Future` and its contained `Iterator` with `Box`.\n    ///\n    /// Explicitly specified port in the URI will override any port in the resolved `SocketAddr`s.\n    /// Otherwise, port `0` will be replaced by the conventional port for the given scheme (e.g. 80\n    /// for http).\n    fn resolve(&self, name: Name) -> Resolving;\n}\n\nimpl_into_shared!(\n    /// Trait for converting types into a shared DNS resolver ([`Arc<dyn Resolve>`]).\n    ///\n    /// Implemented for any [`Resolve`] type, [`Arc<T>`] where `T: Resolve`, and [`Arc<dyn Resolve>`].\n    /// Enables ergonomic conversion to a trait object for use in APIs without manual Arc wrapping.\n    pub trait IntoResolve => Resolve\n);\n\n/// Adapter that wraps a [`Resolve`] trait object to work with Tower's `Service` trait.\n///\n/// This allows custom DNS resolvers implementing `Resolve` to be used in contexts\n/// that expect a `Service<Name>` implementation.\n#[derive(Clone)]\npub(crate) struct DynResolver {\n    resolver: Arc<dyn Resolve>,\n}\n\nimpl DynResolver {\n    /// Creates a new [`DynResolver`] with the provided resolver.\n    pub(crate) fn new(resolver: Arc<dyn Resolve>) -> Self {\n        Self { resolver }\n    }\n}\n\nimpl Service<Name> for DynResolver {\n    type Response = Addrs;\n    type Error = BoxError;\n    type Future = Resolving;\n\n    fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {\n        Poll::Ready(Ok(()))\n    }\n\n    fn call(&mut self, name: Name) -> Self::Future {\n        self.resolver.resolve(name)\n    }\n}\n\n/// DNS resolver that supports hostname overrides.\n///\n/// This resolver first checks for manual hostname-to-IP mappings before\n/// falling back to the underlying DNS resolver. Useful for testing or\n/// bypassing DNS for specific domains.\npub(crate) struct DnsResolverWithOverrides {\n    dns_resolver: Arc<dyn Resolve>,\n    overrides: Arc<HashMap<Cow<'static, str>, Vec<SocketAddr>>>,\n}\n\nimpl DnsResolverWithOverrides {\n    /// Creates a new [`DnsResolverWithOverrides`] with the provided DNS resolver and overrides.\n    pub(crate) fn new(\n        dns_resolver: Arc<dyn Resolve>,\n        overrides: HashMap<Cow<'static, str>, Vec<SocketAddr>>,\n    ) -> Self {\n        DnsResolverWithOverrides {\n            dns_resolver,\n            overrides: Arc::new(overrides),\n        }\n    }\n}\n\nimpl Resolve for DnsResolverWithOverrides {\n    fn resolve(&self, name: Name) -> Resolving {\n        match self.overrides.get(name.as_str()) {\n            Some(dest) => {\n                let addrs: Addrs = Box::new(dest.clone().into_iter());\n                Box::pin(std::future::ready(Ok(addrs)))\n            }\n            None => self.dns_resolver.resolve(name),\n        }\n    }\n}\n"
  },
  {
    "path": "src/dns.rs",
    "content": "//! DNS resolution\n\npub(crate) mod gai;\n#[cfg(feature = \"hickory-dns\")]\npub(crate) mod hickory;\npub(crate) mod resolve;\n\npub use resolve::{Addrs, IntoResolve, Name, Resolve, Resolving};\n\npub(crate) use self::{\n    gai::{GaiResolver, SocketAddrs},\n    resolve::{DnsResolverWithOverrides, DynResolver},\n    sealed::{InternalResolve, resolve},\n};\n\nmod sealed {\n    use std::{\n        future::Future,\n        net::SocketAddr,\n        task::{self, Poll},\n    };\n\n    use tower::{BoxError, Service};\n\n    use super::Name;\n\n    /// Internal adapter trait for DNS resolvers.\n    ///\n    /// This trait provides a unified interface for different resolver implementations,\n    /// allowing both custom [`super::Resolve`] types and Tower [`Service`] implementations\n    /// to be used interchangeably within the connector.\n    pub trait InternalResolve {\n        type Addrs: Iterator<Item = SocketAddr>;\n        type Error: Into<BoxError>;\n        type Future: Future<Output = Result<Self::Addrs, Self::Error>>;\n\n        fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>>;\n        fn resolve(&mut self, name: Name) -> Self::Future;\n    }\n\n    /// Automatic implementation for any Tower [`Service`] that resolves names to socket addresses.\n    impl<S> InternalResolve for S\n    where\n        S: Service<Name>,\n        S::Response: Iterator<Item = SocketAddr>,\n        S::Error: Into<BoxError>,\n    {\n        type Addrs = S::Response;\n        type Error = S::Error;\n        type Future = S::Future;\n\n        fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {\n            Service::poll_ready(self, cx)\n        }\n\n        fn resolve(&mut self, name: Name) -> Self::Future {\n            Service::call(self, name)\n        }\n    }\n\n    pub async fn resolve<R>(resolver: &mut R, name: Name) -> Result<R::Addrs, R::Error>\n    where\n        R: InternalResolve,\n    {\n        std::future::poll_fn(|cx| resolver.poll_ready(cx)).await?;\n        resolver.resolve(name).await\n    }\n}\n"
  },
  {
    "path": "src/error.rs",
    "content": "use std::{error::Error as StdError, fmt, io};\n\nuse http::Uri;\n\nuse crate::{StatusCode, client::http1::ext::ReasonPhrase, util::Escape};\n\n/// A `Result` alias where the `Err` case is `wreq::Error`.\npub type Result<T> = std::result::Result<T, Error>;\n\n/// A boxed error type that can be used for dynamic error handling.\npub type BoxError = Box<dyn StdError + Send + Sync>;\n\n/// The Errors that may occur when processing a `Request`.\n///\n/// Note: Errors may include the full URI used to make the `Request`. If the URI\n/// contains sensitive information (e.g. an API key as a query parameter), be\n/// sure to remove it ([`without_uri`](Error::without_uri))\npub struct Error {\n    inner: Box<Inner>,\n}\n\nstruct Inner {\n    kind: Kind,\n    source: Option<BoxError>,\n    uri: Option<Uri>,\n}\n\n#[derive(Debug)]\nenum Kind {\n    Builder,\n    Request,\n    Tls,\n    Redirect,\n    Status(StatusCode, Option<ReasonPhrase>),\n    Body,\n    Decode,\n    Upgrade,\n    #[cfg(feature = \"ws\")]\n    WebSocket,\n}\n\nimpl Error {\n    fn new<E>(kind: Kind, source: Option<E>) -> Error\n    where\n        E: Into<BoxError>,\n    {\n        Error {\n            inner: Box::new(Inner {\n                kind,\n                source: source.map(Into::into),\n                uri: None,\n            }),\n        }\n    }\n\n    #[inline]\n    pub(crate) fn builder<E: Into<BoxError>>(e: E) -> Error {\n        Error::new(Kind::Builder, Some(e))\n    }\n\n    #[inline]\n    pub(crate) fn body<E: Into<BoxError>>(e: E) -> Error {\n        Error::new(Kind::Body, Some(e))\n    }\n\n    #[inline]\n    pub(crate) fn tls<E: Into<BoxError>>(e: E) -> Error {\n        Error::new(Kind::Tls, Some(e))\n    }\n\n    #[inline]\n    pub(crate) fn decode<E: Into<BoxError>>(e: E) -> Error {\n        Error::new(Kind::Decode, Some(e))\n    }\n\n    #[inline]\n    pub(crate) fn request<E: Into<BoxError>>(e: E) -> Error {\n        Error::new(Kind::Request, Some(e))\n    }\n\n    #[inline]\n    pub(crate) fn redirect<E: Into<BoxError>>(e: E, uri: Uri) -> Error {\n        Error::new(Kind::Redirect, Some(e)).with_uri(uri)\n    }\n\n    #[inline]\n    pub(crate) fn upgrade<E: Into<BoxError>>(e: E) -> Error {\n        Error::new(Kind::Upgrade, Some(e))\n    }\n\n    #[inline]\n    #[cfg(feature = \"ws\")]\n    pub(crate) fn websocket<E: Into<BoxError>>(e: E) -> Error {\n        Error::new(Kind::WebSocket, Some(e))\n    }\n\n    #[inline]\n    pub(crate) fn status_code(uri: Uri, status: StatusCode, reason: Option<ReasonPhrase>) -> Error {\n        Error::new(Kind::Status(status, reason), None::<Error>).with_uri(uri)\n    }\n\n    #[inline]\n    pub(crate) fn uri_bad_scheme(uri: Uri) -> Error {\n        Error::new(Kind::Builder, Some(BadScheme)).with_uri(uri)\n    }\n}\n\nimpl Error {\n    /// Returns a possible URI related to this error.\n    ///\n    /// # Examples\n    ///\n    /// ```\n    /// # async fn run() {\n    /// // displays last stop of a redirect loop\n    /// let response = wreq::get(\"http://site.with.redirect.loop\")\n    ///     .send()\n    ///     .await;\n    /// if let Err(e) = response {\n    ///     if e.is_redirect() {\n    ///         if let Some(final_stop) = e.uri() {\n    ///             println!(\"redirect loop at {}\", final_stop);\n    ///         }\n    ///     }\n    /// }\n    /// # }\n    /// ```\n    #[inline]\n    pub fn uri(&self) -> Option<&Uri> {\n        self.inner.uri.as_ref()\n    }\n\n    /// Returns a mutable reference to the URI related to this error\n    ///\n    /// This is useful if you need to remove sensitive information from the URI\n    /// (e.g. an API key in the query), but do not want to remove the URI\n    /// entirely.\n    #[inline]\n    pub fn uri_mut(&mut self) -> Option<&mut Uri> {\n        self.inner.uri.as_mut()\n    }\n\n    /// Add a uri related to this error (overwriting any existing)\n    #[inline]\n    pub fn with_uri(mut self, uri: Uri) -> Self {\n        self.inner.uri = Some(uri);\n        self\n    }\n\n    /// Strip the related uri from this error (if, for example, it contains\n    /// sensitive information)\n    #[inline]\n    pub fn without_uri(mut self) -> Self {\n        self.inner.uri = None;\n        self\n    }\n\n    /// Returns true if the error is from a type Builder.\n    #[inline]\n    pub fn is_builder(&self) -> bool {\n        matches!(self.inner.kind, Kind::Builder)\n    }\n\n    /// Returns true if the error is from a `RedirectPolicy`.\n    #[inline]\n    pub fn is_redirect(&self) -> bool {\n        matches!(self.inner.kind, Kind::Redirect)\n    }\n\n    /// Returns true if the error is from `Response::error_for_status`.\n    #[inline]\n    pub fn is_status(&self) -> bool {\n        matches!(self.inner.kind, Kind::Status(_, _))\n    }\n\n    /// Returns true if the error is related to a timeout.\n    pub fn is_timeout(&self) -> bool {\n        let mut source = self.source();\n\n        while let Some(err) = source {\n            if err.is::<TimedOut>() {\n                return true;\n            }\n\n            if let Some(core_err) = err.downcast_ref::<crate::client::CoreError>() {\n                if core_err.is_timeout() {\n                    return true;\n                }\n            }\n\n            if let Some(io) = err.downcast_ref::<io::Error>() {\n                if io.kind() == io::ErrorKind::TimedOut {\n                    return true;\n                }\n            }\n\n            source = err.source();\n        }\n\n        false\n    }\n\n    /// Returns true if the error is related to the request\n    #[inline]\n    pub fn is_request(&self) -> bool {\n        matches!(self.inner.kind, Kind::Request)\n    }\n\n    /// Returns true if the error is related to connect\n    pub fn is_connect(&self) -> bool {\n        use crate::client::layer::client::Error;\n\n        let mut source = self.source();\n\n        while let Some(err) = source {\n            if let Some(err) = err.downcast_ref::<Error>() {\n                if err.is_connect() {\n                    return true;\n                }\n            }\n\n            source = err.source();\n        }\n\n        false\n    }\n\n    /// Returns true if the error is related to proxy connect\n    pub fn is_proxy_connect(&self) -> bool {\n        use crate::client::layer::client::Error;\n\n        let mut source = self.source();\n\n        while let Some(err) = source {\n            if let Some(err) = err.downcast_ref::<Error>() {\n                if err.is_proxy_connect() {\n                    return true;\n                }\n            }\n\n            source = err.source();\n        }\n\n        false\n    }\n\n    /// Returns true if the error is related to a connection reset.\n    pub fn is_connection_reset(&self) -> bool {\n        let mut source = self.source();\n\n        while let Some(err) = source {\n            if let Some(io) = err.downcast_ref::<io::Error>() {\n                if io.kind() == io::ErrorKind::ConnectionReset {\n                    return true;\n                }\n            }\n            source = err.source();\n        }\n\n        false\n    }\n\n    /// Returns true if the error is related to the request or response body\n    #[inline]\n    pub fn is_body(&self) -> bool {\n        matches!(self.inner.kind, Kind::Body)\n    }\n\n    /// Returns true if the error is related to TLS\n    #[inline]\n    pub fn is_tls(&self) -> bool {\n        matches!(self.inner.kind, Kind::Tls)\n    }\n\n    /// Returns true if the error is related to decoding the response's body\n    #[inline]\n    pub fn is_decode(&self) -> bool {\n        matches!(self.inner.kind, Kind::Decode)\n    }\n\n    /// Returns true if the error is related to upgrading the connection\n    #[inline]\n    pub fn is_upgrade(&self) -> bool {\n        matches!(self.inner.kind, Kind::Upgrade)\n    }\n\n    /// Returns true if the error is related to WebSocket operations\n    #[inline]\n    #[cfg(feature = \"ws\")]\n    pub fn is_websocket(&self) -> bool {\n        matches!(self.inner.kind, Kind::WebSocket)\n    }\n\n    /// Returns the status code, if the error was generated from a response.\n    pub fn status(&self) -> Option<StatusCode> {\n        match self.inner.kind {\n            Kind::Status(code, _) => Some(code),\n            _ => None,\n        }\n    }\n}\n\n/// Maps external timeout errors (such as `tower::timeout::error::Elapsed`)\n/// to the internal `TimedOut` error type used for connector operations.\n/// Returns the original error if it is not a timeout.\n#[inline]\npub(crate) fn map_timeout_to_connector_error(error: BoxError) -> BoxError {\n    if error.is::<tower::timeout::error::Elapsed>() {\n        Box::new(TimedOut)\n    } else {\n        error\n    }\n}\n\n/// Maps external timeout errors (such as `tower::timeout::error::Elapsed`)\n/// to the internal request-level `Error` type.\n/// Returns the original error if it is not a timeout.\n#[inline]\npub(crate) fn map_timeout_to_request_error(error: BoxError) -> BoxError {\n    if error.is::<tower::timeout::error::Elapsed>() {\n        Box::new(Error::request(TimedOut))\n    } else {\n        error\n    }\n}\n\nimpl fmt::Debug for Error {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        let mut builder = f.debug_struct(\"wreq::Error\");\n\n        builder.field(\"kind\", &self.inner.kind);\n\n        if let Some(ref uri) = self.inner.uri {\n            builder.field(\"uri\", uri);\n        }\n\n        if let Some(ref source) = self.inner.source {\n            builder.field(\"source\", source);\n        }\n\n        builder.finish()\n    }\n}\n\nimpl fmt::Display for Error {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        match self.inner.kind {\n            Kind::Builder => f.write_str(\"builder error\")?,\n            Kind::Request => f.write_str(\"error sending request\")?,\n            Kind::Body => f.write_str(\"request or response body error\")?,\n            Kind::Tls => f.write_str(\"tls error\")?,\n            Kind::Decode => f.write_str(\"error decoding response body\")?,\n            Kind::Redirect => f.write_str(\"error following redirect\")?,\n            Kind::Upgrade => f.write_str(\"error upgrading connection\")?,\n            #[cfg(feature = \"ws\")]\n            Kind::WebSocket => f.write_str(\"websocket error\")?,\n            Kind::Status(ref code, ref reason) => {\n                let prefix = if code.is_client_error() {\n                    \"HTTP status client error\"\n                } else {\n                    debug_assert!(code.is_server_error());\n                    \"HTTP status server error\"\n                };\n                if let Some(reason) = reason {\n                    write!(\n                        f,\n                        \"{prefix} ({} {})\",\n                        code.as_str(),\n                        Escape::new(reason.as_ref())\n                    )?;\n                } else {\n                    write!(f, \"{prefix} ({code})\")?;\n                }\n            }\n        };\n\n        if let Some(uri) = &self.inner.uri {\n            write!(f, \" for uri ({})\", uri)?;\n        }\n\n        if let Some(e) = &self.inner.source {\n            write!(f, \": {e}\")?;\n        }\n\n        Ok(())\n    }\n}\n\nimpl StdError for Error {\n    #[inline]\n    fn source(&self) -> Option<&(dyn StdError + 'static)> {\n        self.inner.source.as_ref().map(|e| &**e as _)\n    }\n}\n\n#[derive(Debug)]\npub(crate) struct TimedOut;\n\n#[derive(Debug)]\npub(crate) struct BadScheme;\n\n#[derive(Debug)]\npub(crate) struct ProxyConnect(pub(crate) BoxError);\n\n// ==== impl TimedOut ====\n\nimpl StdError for TimedOut {}\n\nimpl fmt::Display for TimedOut {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        f.write_str(\"operation timed out\")\n    }\n}\n\n// ==== impl BadScheme ====\n\nimpl StdError for BadScheme {}\n\nimpl fmt::Display for BadScheme {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        f.write_str(\"URI scheme is not allowed\")\n    }\n}\n\n// ==== impl ProxyConnect ====\n\nimpl StdError for ProxyConnect {\n    #[inline]\n    fn source(&self) -> Option<&(dyn StdError + 'static)> {\n        Some(&*self.0)\n    }\n}\n\nimpl fmt::Display for ProxyConnect {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"proxy connect error: {}\", self.0)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    fn assert_send<T: Send>() {}\n    fn assert_sync<T: Sync>() {}\n\n    impl super::Error {\n        fn into_io(self) -> io::Error {\n            io::Error::other(self)\n        }\n    }\n\n    fn decode_io(e: io::Error) -> Error {\n        if e.get_ref().map(|r| r.is::<Error>()).unwrap_or(false) {\n            *e.into_inner()\n                .expect(\"io::Error::get_ref was Some(_)\")\n                .downcast::<Error>()\n                .expect(\"StdError::is() was true\")\n        } else {\n            Error::decode(e)\n        }\n    }\n\n    #[test]\n    fn test_source_chain() {\n        let root = Error::new(Kind::Request, None::<Error>);\n        assert!(root.source().is_none());\n\n        let link = Error::body(root);\n        assert!(link.source().is_some());\n        assert_send::<Error>();\n        assert_sync::<Error>();\n    }\n\n    #[test]\n    fn mem_size_of() {\n        use std::mem::size_of;\n        assert_eq!(size_of::<Error>(), size_of::<usize>());\n    }\n\n    #[test]\n    fn roundtrip_io_error() {\n        let orig = Error::request(\"orig\");\n        // Convert wreq::Error into an io::Error...\n        let io = orig.into_io();\n        // Convert that io::Error back into a wreq::Error...\n        let err = decode_io(io);\n        // It should have pulled out the original, not nested it...\n        match err.inner.kind {\n            Kind::Request => (),\n            _ => panic!(\"{err:?}\"),\n        }\n    }\n\n    #[test]\n    fn from_unknown_io_error() {\n        let orig = io::Error::other(\"orly\");\n        let err = decode_io(orig);\n        match err.inner.kind {\n            Kind::Decode => (),\n            _ => panic!(\"{err:?}\"),\n        }\n    }\n\n    #[test]\n    fn is_timeout() {\n        let err = Error::request(super::TimedOut);\n        assert!(err.is_timeout());\n\n        let io = io::Error::from(io::ErrorKind::TimedOut);\n        let nested = Error::request(io);\n        assert!(nested.is_timeout());\n    }\n\n    #[test]\n    fn is_connection_reset() {\n        let err = Error::request(io::Error::new(\n            io::ErrorKind::ConnectionReset,\n            \"connection reset\",\n        ));\n        assert!(err.is_connection_reset());\n\n        let io = io::Error::other(err);\n        let nested = Error::request(io);\n        assert!(nested.is_connection_reset());\n    }\n}\n"
  },
  {
    "path": "src/ext.rs",
    "content": "//! Extension utilities.\n\nuse bytes::Bytes;\nuse http::uri::{Authority, Scheme, Uri};\nuse percent_encoding::{AsciiSet, CONTROLS};\n\nuse crate::Body;\n\n/// See: <https://url.spec.whatwg.org/#fragment-percent-encode-set>\nconst FRAGMENT: &AsciiSet = &CONTROLS.add(b' ').add(b'\"').add(b'<').add(b'>').add(b'`');\n\n/// See: <https://url.spec.whatwg.org/#path-percent-encode-set>\nconst PATH: &AsciiSet = &FRAGMENT.add(b'#').add(b'?').add(b'{').add(b'}');\n\n/// See: <https://url.spec.whatwg.org/#userinfo-percent-encode-set>\nconst USERINFO: &AsciiSet = &PATH\n    .add(b'/')\n    .add(b':')\n    .add(b';')\n    .add(b'=')\n    .add(b'@')\n    .add(b'[')\n    .add(b'\\\\')\n    .add(b']')\n    .add(b'^')\n    .add(b'|');\n\nmacro_rules! impl_into_shared {\n    ($(#[$meta:meta])* $vis:vis trait $name:ident => $target:path) => {\n        $(#[$meta])*\n        $vis trait $name {\n            #[doc = concat!(\"Converts this type into a shared [`\", stringify!($target), \"`].\")]\n            fn into_shared(self) -> Arc<dyn $target>;\n        }\n\n        impl $name for Arc<dyn $target> {\n            #[inline]\n            fn into_shared(self) -> Arc<dyn $target> { self }\n        }\n\n        impl<R: $target + 'static> $name for Arc<R> {\n            #[inline]\n            fn into_shared(self) -> Arc<dyn $target> { self }\n        }\n\n        impl<R: $target + 'static> $name for R {\n            #[inline]\n            fn into_shared(self) -> Arc<dyn $target> { Arc::new(self) }\n        }\n    };\n}\n\n/// Extension trait for http::Response objects\n///\n/// Provides methods to extract URI information from HTTP responses\npub trait ResponseExt {\n    /// Returns a reference to the `Uri` associated with this response, if available.\n    fn uri(&self) -> Option<&Uri>;\n}\n\n/// Extension trait for http::response::Builder objects\n///\n/// Allows the user to add a `Uri` to the http::Response\npub trait ResponseBuilderExt {\n    /// A builder method for the `http::response::Builder` type that allows the user to add a `Uri`\n    /// to the `http::Response`\n    fn uri(self, uri: Uri) -> Self;\n}\n\n/// Extension type to store the request URI in a response's extensions.\n#[derive(Clone)]\npub(crate) struct RequestUri(pub Uri);\n\n/// Extension trait for `Uri` helpers.\npub(crate) trait UriExt {\n    /// Returns true if the URI scheme is HTTP.\n    fn is_http(&self) -> bool;\n\n    /// Returns true if the URI scheme is HTTPS.\n    fn is_https(&self) -> bool;\n\n    /// Returns the port of the URI, or the default port for the scheme if none is specified.\n    fn port_or_default(&self) -> u16;\n\n    /// Sets the query component of the URI, replacing any existing query.\n    #[cfg(feature = \"query\")]\n    fn set_query(&mut self, query: String);\n\n    /// Returns the username and password from the URI's userinfo, if present.\n    fn userinfo(&self) -> (Option<&str>, Option<&str>);\n\n    /// Sets the username and password in the URI's userinfo component.\n    fn set_userinfo(&mut self, username: &str, password: Option<&str>);\n}\n\n// ===== impl ResponseExt =====\n\nimpl ResponseExt for http::Response<Body> {\n    fn uri(&self) -> Option<&Uri> {\n        self.extensions().get::<RequestUri>().map(|r| &r.0)\n    }\n}\n\n// ===== impl ResponseBuilderExt =====\n\nimpl ResponseBuilderExt for http::response::Builder {\n    fn uri(self, uri: Uri) -> Self {\n        self.extension(RequestUri(uri))\n    }\n}\n\n// ===== impl UriExt =====\n\nimpl UriExt for Uri {\n    #[inline]\n    fn is_http(&self) -> bool {\n        self.scheme() == Some(&Scheme::HTTP)\n    }\n\n    #[inline]\n    fn is_https(&self) -> bool {\n        self.scheme() == Some(&Scheme::HTTPS)\n    }\n\n    fn port_or_default(&self) -> u16 {\n        match Uri::port(self) {\n            Some(p) => p.as_u16(),\n            None if self.is_https() => 443u16,\n            _ => 80u16,\n        }\n    }\n\n    #[cfg(feature = \"query\")]\n    fn set_query(&mut self, query: String) {\n        use http::uri::PathAndQuery;\n\n        if query.is_empty() {\n            return;\n        }\n\n        let path = self.path();\n        let parts = match PathAndQuery::from_maybe_shared(Bytes::from(format!(\"{path}?{query}\"))) {\n            Ok(path_and_query) => {\n                let mut parts = self.clone().into_parts();\n                parts.path_and_query.replace(path_and_query);\n                parts\n            }\n            Err(_err) => {\n                debug!(\"Failed to set query in URI: {_err}\");\n                return;\n            }\n        };\n\n        if let Ok(uri) = Uri::from_parts(parts) {\n            *self = uri;\n        }\n    }\n\n    fn userinfo(&self) -> (Option<&str>, Option<&str>) {\n        self.authority()\n            .and_then(|auth| auth.as_str().rsplit_once('@'))\n            .map_or((None, None), |(userinfo, _)| {\n                match userinfo.split_once(':') {\n                    Some((u, p)) => ((!u.is_empty()).then_some(u), (!p.is_empty()).then_some(p)),\n                    None => (Some(userinfo), None),\n                }\n            })\n    }\n\n    fn set_userinfo(&mut self, username: &str, password: Option<&str>) {\n        let mut parts = self.clone().into_parts();\n\n        let authority = match self.authority() {\n            Some(authority) => authority,\n            None => return,\n        };\n\n        let host_and_port = authority\n            .as_str()\n            .rsplit_once('@')\n            .map(|(_, host)| host)\n            .unwrap_or_else(|| authority.as_str());\n\n        let authority = match (username.is_empty(), password) {\n            (true, None) => Bytes::from(host_and_port.to_owned()),\n            (true, Some(password)) => {\n                let pass = percent_encoding::utf8_percent_encode(password, USERINFO);\n                Bytes::from(format!(\":{pass}@{host_and_port}\"))\n            }\n            (false, Some(password)) => {\n                let username = percent_encoding::utf8_percent_encode(username, USERINFO);\n                let password = percent_encoding::utf8_percent_encode(password, USERINFO);\n                Bytes::from(format!(\"{username}:{password}@{host_and_port}\"))\n            }\n            (false, None) => {\n                let username = percent_encoding::utf8_percent_encode(username, USERINFO);\n                Bytes::from(format!(\"{username}@{host_and_port}\"))\n            }\n        };\n\n        match Authority::from_maybe_shared(authority) {\n            Ok(authority) => {\n                parts.authority.replace(authority);\n            }\n            Err(_err) => {\n                debug!(\"Failed to set userinfo in URI: {_err}\");\n                return;\n            }\n        };\n\n        if let Ok(uri) = Uri::from_parts(parts) {\n            *self = uri;\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use http::{Uri, response::Builder};\n\n    use super::{RequestUri, ResponseBuilderExt, ResponseExt, UriExt};\n    use crate::Body;\n\n    #[test]\n    fn test_uri_ext_is_https() {\n        let https_uri: Uri = \"https://example.com\".parse().unwrap();\n        let http_uri: Uri = \"http://example.com\".parse().unwrap();\n\n        assert!(https_uri.is_https());\n        assert!(!http_uri.is_https());\n        assert!(http_uri.is_http());\n        assert!(!https_uri.is_http());\n    }\n\n    #[test]\n    fn test_userinfo_with_username_and_password() {\n        let uri: Uri = \"http://user:pass@example.com\".parse().unwrap();\n        let (username, password) = uri.userinfo();\n\n        assert_eq!(username, Some(\"user\"));\n        assert_eq!(password, Some(\"pass\"));\n    }\n\n    #[test]\n    fn test_userinfo_with_empty_username() {\n        let uri: Uri = \"http://:pass@example.com\".parse().unwrap();\n        let (username, password) = uri.userinfo();\n\n        assert_eq!(username, None);\n        assert_eq!(password, Some(\"pass\"));\n    }\n\n    #[test]\n    fn test_userinfo_with_empty_password() {\n        let uri: Uri = \"http://user:@example.com\".parse().unwrap();\n        let (username, password) = uri.userinfo();\n\n        assert_eq!(username, Some(\"user\"));\n        assert_eq!(password, None);\n\n        let uri: Uri = \"http://user@example.com\".parse().unwrap();\n        let (username, password) = uri.userinfo();\n\n        assert_eq!(username, Some(\"user\"));\n        assert_eq!(password, None);\n    }\n\n    #[test]\n    fn test_userinfo_without_colon() {\n        let uri: Uri = \"http://something@example.com\".parse().unwrap();\n        let (username, password) = uri.userinfo();\n\n        assert_eq!(username, Some(\"something\"));\n        assert_eq!(password, None);\n    }\n\n    #[test]\n    fn test_userinfo_without_at() {\n        let uri: Uri = \"http://example.com\".parse().unwrap();\n        let (username, password) = uri.userinfo();\n\n        assert_eq!(username, None);\n        assert_eq!(password, None);\n    }\n\n    #[test]\n    fn test_set_userinfo_both() {\n        let mut uri: Uri = \"http://example.com/path\".parse().unwrap();\n        uri.set_userinfo(\"user\", Some(\"pass\"));\n\n        let (username, password) = uri.userinfo();\n        assert_eq!(username, Some(\"user\"));\n        assert_eq!(password, Some(\"pass\"));\n        assert_eq!(uri.to_string(), \"http://user:pass@example.com/path\");\n    }\n\n    #[test]\n    fn test_set_userinfo_empty_username() {\n        let mut uri: Uri = \"http://user:pass@example.com/path\".parse().unwrap();\n        uri.set_userinfo(\"\", Some(\"pass\"));\n\n        let (username, password) = uri.userinfo();\n        assert_eq!(username, None);\n        assert_eq!(password, Some(\"pass\"));\n        assert_eq!(uri.to_string(), \"http://:pass@example.com/path\");\n    }\n\n    #[test]\n    fn test_set_userinfo_none_password() {\n        let mut uri: Uri = \"http://user:pass@example.com/path\".parse().unwrap();\n        uri.set_userinfo(\"user\", None);\n\n        let (username, password) = uri.userinfo();\n        assert_eq!(username, Some(\"user\"));\n        assert_eq!(password, None);\n        assert_eq!(uri.to_string(), \"http://user@example.com/path\");\n    }\n\n    #[test]\n    fn test_set_userinfo_empty_username_and_password() {\n        let mut uri: Uri = \"http://user:pass@example.com/path\".parse().unwrap();\n        uri.set_userinfo(\"\", None);\n\n        let (username, password) = uri.userinfo();\n        assert_eq!(username, None);\n        assert_eq!(password, None);\n        assert_eq!(uri.to_string(), \"http://example.com/path\");\n    }\n\n    #[test]\n    fn test_set_userinfo_with_encoding() {\n        use http::Uri;\n\n        use crate::ext::UriExt;\n\n        let mut uri: Uri = \"http://example.com/path\".parse().unwrap();\n        uri.set_userinfo(\"us er\", Some(\"p@ss:word!\"));\n\n        let (username, password) = uri.userinfo();\n        assert_eq!(username, Some(\"us%20er\"));\n        assert_eq!(password, Some(\"p%40ss%3Aword!\"));\n\n        assert_eq!(\n            uri.to_string(),\n            \"http://us%20er:p%40ss%3Aword!@example.com/path\"\n        );\n    }\n\n    #[test]\n    fn test_set_userinfo_only_username_with_encoding() {\n        use http::Uri;\n\n        use crate::ext::UriExt;\n\n        let mut uri: Uri = \"http://example.com/\".parse().unwrap();\n        uri.set_userinfo(\"user name\", None);\n\n        let (username, password) = uri.userinfo();\n        assert_eq!(username, Some(\"user%20name\"));\n        assert_eq!(password, None);\n\n        assert_eq!(uri.to_string(), \"http://user%20name@example.com/\");\n    }\n\n    #[test]\n    fn test_set_userinfo_only_password_with_encoding() {\n        use http::Uri;\n\n        use crate::ext::UriExt;\n\n        let mut uri: Uri = \"http://example.com/\".parse().unwrap();\n        uri.set_userinfo(\"\", Some(\"p@ss word\"));\n\n        let (username, password) = uri.userinfo();\n        assert_eq!(username, None);\n        assert_eq!(password, Some(\"p%40ss%20word\"));\n\n        assert_eq!(uri.to_string(), \"http://:p%40ss%20word@example.com/\");\n    }\n\n    #[cfg(feature = \"query\")]\n    #[test]\n    fn test_set_query() {\n        let mut uri: Uri = \"http://example.com/path\".parse().unwrap();\n        uri.set_query(\"key=value&foo=bar\".to_string());\n\n        assert_eq!(uri.to_string(), \"http://example.com/path?key=value&foo=bar\");\n\n        let mut uri: Uri = \"http://example.com/path?existing=param\".parse().unwrap();\n        uri.set_query(\"newkey=newvalue\".to_string());\n\n        assert_eq!(uri.to_string(), \"http://example.com/path?newkey=newvalue\");\n\n        let mut uri: Uri = \"http://example.com/path\".parse().unwrap();\n        uri.set_query(\"\".to_string());\n\n        assert_eq!(uri.to_string(), \"http://example.com/path\");\n    }\n\n    #[test]\n    fn test_response_builder_ext() {\n        let uri = Uri::try_from(\"http://example.com\").unwrap();\n        let response = Builder::new()\n            .status(200)\n            .uri(uri.clone())\n            .body(Body::empty())\n            .unwrap();\n\n        assert_eq!(response.uri(), Some(&uri));\n    }\n\n    #[test]\n    fn test_response_ext() {\n        let uri = Uri::try_from(\"http://example.com\").unwrap();\n        let response = http::Response::builder()\n            .status(200)\n            .extension(RequestUri(uri.clone()))\n            .body(Body::empty())\n            .unwrap();\n\n        assert_eq!(response.uri(), Some(&uri));\n    }\n}\n"
  },
  {
    "path": "src/header.rs",
    "content": "//! HTTP header types\n//!\n//! This module provides [`HeaderName`], [`HeaderMap`], [`OrigHeaderMap`], [`OrigHeaderName`], and a\n//! number of types used for interacting with `HeaderMap`. These types allow representing both\n//! HTTP/1 and HTTP/2 headers.\n\npub use http::header::*;\npub use name::OrigHeaderName;\n\n/// Trait for types that can be converted into an [`OrigHeaderName`] (case-preserved header).\n///\n/// This trait is sealed, so only known types can implement it.\n/// Supported types:\n/// - `&'static str`\n/// - `String`\n/// - `Bytes`\n/// - `HeaderName`\n/// - `&HeaderName`\n/// - `OrigHeaderName`\n/// - `&OrigHeaderName`\npub trait IntoOrigHeaderName: sealed::Sealed {\n    /// Converts the type into an [`OrigHeaderName`].\n    fn into_orig_header_name(self) -> OrigHeaderName;\n}\n\n/// A map from header names to their original casing as received in an HTTP message.\n///\n/// [`OrigHeaderMap`] not only preserves the original case of each header name as it appeared\n/// in the request or response, but also maintains the insertion order of headers. This makes\n/// it suitable for use cases where the order of headers matters, such as HTTP/1.x message\n/// serialization, proxying, or reproducing requests/responses exactly as received.\n#[derive(Debug, Clone, Default)]\npub struct OrigHeaderMap(HeaderMap<OrigHeaderName>);\n\n// ===== impl OrigHeaderMap =====\n\nimpl OrigHeaderMap {\n    /// Creates a new, empty [`OrigHeaderMap`].\n    #[inline]\n    pub fn new() -> Self {\n        Self(HeaderMap::default())\n    }\n\n    /// Creates an empty [`OrigHeaderMap`] with the specified capacity.\n    #[inline]\n    pub fn with_capacity(size: usize) -> Self {\n        Self(HeaderMap::with_capacity(size))\n    }\n\n    /// Insert a new header name into the collection.\n    ///\n    /// If the map did not previously have this key present, then `false` is\n    /// returned.\n    ///\n    /// If the map did have this key present, the new value is pushed to the end\n    /// of the list of values currently associated with the key. The key is not\n    /// updated, though; this matters for types that can be `==` without being\n    /// identical.\n    #[inline]\n    pub fn insert<N>(&mut self, orig: N) -> bool\n    where\n        N: IntoOrigHeaderName,\n    {\n        let orig_header_name = orig.into_orig_header_name();\n        match &orig_header_name.kind {\n            name::Kind::Cased(bytes) => HeaderName::from_bytes(bytes)\n                .map(|name| self.0.append(name, orig_header_name))\n                .unwrap_or(false),\n            name::Kind::Standard(header_name) => {\n                self.0.append(header_name.clone(), orig_header_name)\n            }\n        }\n    }\n\n    /// Extends the map with all entries from another [`OrigHeaderMap`], preserving order.\n    #[inline]\n    pub fn extend(&mut self, iter: OrigHeaderMap) {\n        self.0.extend(iter.0);\n    }\n\n    /// Returns the number of headers stored in the map.\n    ///\n    /// This number represents the total number of **values** stored in the map.\n    /// This number can be greater than or equal to the number of **keys**\n    /// stored given that a single key may have more than one associated value.\n    #[inline]\n    pub fn len(&self) -> usize {\n        self.0.len()\n    }\n\n    /// Returns true if the map contains no elements.\n    #[inline]\n    pub fn is_empty(&self) -> bool {\n        self.0.is_empty()\n    }\n\n    /// Returns an iterator over all header names and their original spellings, in insertion order.\n    #[inline]\n    pub fn iter(&self) -> impl Iterator<Item = (&HeaderName, &OrigHeaderName)> {\n        self.0.iter()\n    }\n}\n\nimpl OrigHeaderMap {\n    /// Sorts headers by this map, preserving original casing.\n    /// Headers in the map come first, others follow.\n    pub(crate) fn sort_headers(&self, headers: &mut HeaderMap) {\n        if headers.len() <= 1 || self.0.is_empty() {\n            return;\n        }\n\n        // Create a new header map to store the sorted headers\n        let mut sorted_headers = HeaderMap::with_capacity(headers.keys_len());\n\n        // First insert headers in the specified order\n        for name in self.0.keys() {\n            for value in headers.get_all(name) {\n                sorted_headers.append(name.clone(), value.clone());\n            }\n            headers.remove(name);\n        }\n\n        // Then insert any remaining headers that were not ordered\n        let mut prev_name: Option<HeaderName> = None;\n        for (name, value) in headers.drain() {\n            match (name, &prev_name) {\n                (Some(name), _) => {\n                    prev_name.replace(name.clone());\n                    sorted_headers.insert(name, value);\n                }\n                (None, Some(prev_name)) => {\n                    sorted_headers.append(prev_name, value);\n                }\n                _ => {}\n            }\n        }\n\n        std::mem::swap(headers, &mut sorted_headers);\n    }\n\n    /// Calls the given function for each header in this map's order, preserving original casing.\n    /// Headers in the map are processed first, others follow.\n    pub(crate) fn sort_headers_for_each<F>(&self, headers: &mut HeaderMap, mut dst: F)\n    where\n        F: FnMut(&[u8], &HeaderValue),\n    {\n        // First, sort headers according to the order defined in this map\n        for (name, orig_name) in self.iter() {\n            for value in headers.get_all(name) {\n                dst(orig_name.as_ref(), value);\n            }\n\n            headers.remove(name);\n        }\n\n        // After processing all ordered headers, append any remaining headers\n        let mut prev_name: Option<OrigHeaderName> = None;\n        for (name, value) in headers.drain() {\n            match (name, &prev_name) {\n                (Some(name), _) => {\n                    dst(name.as_ref(), &value);\n                    prev_name.replace(name.into_orig_header_name());\n                }\n                (None, Some(prev_name)) => {\n                    dst(prev_name.as_ref(), &value);\n                }\n                _ => (),\n            };\n        }\n    }\n}\n\nimpl<'a> IntoIterator for &'a OrigHeaderMap {\n    type Item = (&'a HeaderName, &'a OrigHeaderName);\n    type IntoIter = <&'a HeaderMap<OrigHeaderName> as IntoIterator>::IntoIter;\n\n    #[inline]\n    fn into_iter(self) -> Self::IntoIter {\n        self.0.iter()\n    }\n}\n\nimpl IntoIterator for OrigHeaderMap {\n    type Item = (Option<HeaderName>, OrigHeaderName);\n    type IntoIter = <HeaderMap<OrigHeaderName> as IntoIterator>::IntoIter;\n\n    #[inline]\n    fn into_iter(self) -> Self::IntoIter {\n        self.0.into_iter()\n    }\n}\n\nimpl_request_config_value!(OrigHeaderMap);\n\nmod name {\n    use bytes::Bytes;\n    use http::HeaderName;\n\n    use super::IntoOrigHeaderName;\n\n    /// An HTTP header name with both normalized and original casing.\n    ///\n    /// While HTTP headers are case-insensitive, this type stores both\n    /// the canonical `HeaderName` and the original casing as received,\n    /// useful for preserving header order and formatting in proxies,\n    /// debugging, or exact HTTP message reproduction.\n    #[derive(Debug, Clone, PartialEq, Eq)]\n    pub struct OrigHeaderName {\n        pub(super) kind: Kind,\n    }\n\n    #[derive(Debug, Clone, PartialEq, Eq)]\n    pub(super) enum Kind {\n        /// The original casing of the header name as received.\n        Cased(Bytes),\n        /// The canonical (normalized, lowercased) header name.\n        Standard(HeaderName),\n    }\n\n    impl AsRef<[u8]> for OrigHeaderName {\n        #[inline]\n        fn as_ref(&self) -> &[u8] {\n            match &self.kind {\n                Kind::Standard(name) => name.as_ref(),\n                Kind::Cased(orig) => orig.as_ref(),\n            }\n        }\n    }\n\n    impl IntoOrigHeaderName for &'static str {\n        #[inline]\n        fn into_orig_header_name(self) -> OrigHeaderName {\n            Bytes::from_static(self.as_bytes()).into_orig_header_name()\n        }\n    }\n\n    impl IntoOrigHeaderName for String {\n        #[inline]\n        fn into_orig_header_name(self) -> OrigHeaderName {\n            Bytes::from(self).into_orig_header_name()\n        }\n    }\n\n    impl IntoOrigHeaderName for Bytes {\n        #[inline]\n        fn into_orig_header_name(self) -> OrigHeaderName {\n            OrigHeaderName {\n                kind: Kind::Cased(self),\n            }\n        }\n    }\n\n    impl IntoOrigHeaderName for &HeaderName {\n        #[inline]\n        fn into_orig_header_name(self) -> OrigHeaderName {\n            OrigHeaderName {\n                kind: Kind::Standard(self.clone()),\n            }\n        }\n    }\n\n    impl IntoOrigHeaderName for HeaderName {\n        #[inline]\n        fn into_orig_header_name(self) -> OrigHeaderName {\n            OrigHeaderName {\n                kind: Kind::Standard(self),\n            }\n        }\n    }\n\n    impl IntoOrigHeaderName for OrigHeaderName {\n        #[inline]\n        fn into_orig_header_name(self) -> OrigHeaderName {\n            self\n        }\n    }\n\n    impl IntoOrigHeaderName for &OrigHeaderName {\n        #[inline]\n        fn into_orig_header_name(self) -> OrigHeaderName {\n            self.clone()\n        }\n    }\n}\n\nmod sealed {\n\n    use bytes::Bytes;\n    use http::HeaderName;\n\n    use crate::header::OrigHeaderName;\n\n    pub trait Sealed {}\n\n    impl Sealed for &'static str {}\n    impl Sealed for String {}\n    impl Sealed for Bytes {}\n    impl Sealed for &HeaderName {}\n    impl Sealed for HeaderName {}\n    impl Sealed for &OrigHeaderName {}\n    impl Sealed for OrigHeaderName {}\n}\n\n#[cfg(test)]\nmod test {\n    use http::{HeaderMap, HeaderName, HeaderValue};\n\n    use super::OrigHeaderMap;\n\n    /// Returns a view of all spellings associated with that header name,\n    /// in the order they were found.\n    #[inline]\n    pub(crate) fn get_all<'a>(\n        orig_headers: &'a OrigHeaderMap,\n        name: &HeaderName,\n    ) -> impl Iterator<Item = impl AsRef<[u8]> + 'a> + 'a {\n        orig_headers.0.get_all(name).into_iter()\n    }\n\n    #[test]\n    fn test_header_order() {\n        let mut headers = OrigHeaderMap::new();\n\n        // Insert headers with different cases and order\n        headers.insert(\"X-Test\");\n        headers.insert(\"X-Another\");\n        headers.insert(\"x-test2\");\n\n        // Check order and case\n        let mut iter = headers.iter();\n        assert_eq!(iter.next().unwrap().1.as_ref(), b\"X-Test\");\n        assert_eq!(iter.next().unwrap().1.as_ref(), b\"X-Another\");\n        assert_eq!(iter.next().unwrap().1.as_ref(), b\"x-test2\");\n    }\n\n    #[test]\n    fn test_extend_preserves_order() {\n        use super::OrigHeaderMap;\n\n        let mut map1 = OrigHeaderMap::new();\n        map1.insert(\"A-Header\");\n        map1.insert(\"B-Header\");\n\n        let mut map2 = OrigHeaderMap::new();\n        map2.insert(\"C-Header\");\n        map2.insert(\"D-Header\");\n\n        map1.extend(map2);\n\n        let names: Vec<_> = map1.iter().map(|(_, orig)| orig.as_ref()).collect();\n        assert_eq!(\n            names,\n            vec![b\"A-Header\", b\"B-Header\", b\"C-Header\", b\"D-Header\"]\n        );\n    }\n\n    #[test]\n    fn test_header_case() {\n        let mut headers = OrigHeaderMap::new();\n\n        // Insert headers with different cases\n        headers.insert(\"X-Test\");\n        headers.insert(\"x-test\");\n\n        // Check that both headers are stored\n        let all_x_test: Vec<_> = get_all(&headers, &\"X-Test\".parse().unwrap()).collect();\n        assert_eq!(all_x_test.len(), 2);\n        assert!(all_x_test.iter().any(|v| v.as_ref() == b\"X-Test\"));\n        assert!(all_x_test.iter().any(|v| v.as_ref() == b\"x-test\"));\n    }\n\n    #[test]\n    fn test_header_multiple_cases() {\n        let mut headers = OrigHeaderMap::new();\n\n        // Insert multiple headers with the same name but different cases\n        headers.insert(\"X-test\");\n        headers.insert(\"x-test\");\n        headers.insert(\"X-test\");\n\n        // Check that all variations are stored\n        let all_x_test: Vec<_> = get_all(&headers, &\"x-test\".parse().unwrap()).collect();\n        assert_eq!(all_x_test.len(), 3);\n        assert!(all_x_test.iter().any(|v| v.as_ref() == b\"X-test\"));\n        assert!(all_x_test.iter().any(|v| v.as_ref() == b\"x-test\"));\n        assert!(all_x_test.iter().any(|v| v.as_ref() == b\"X-test\"));\n    }\n\n    #[test]\n    fn test_sort_headers_preserves_multiple_cookie_values() {\n        // Create original header map for ordering\n        let mut orig_headers = OrigHeaderMap::new();\n        orig_headers.insert(\"Cookie\");\n        orig_headers.insert(\"User-Agent\");\n        orig_headers.insert(\"Accept\");\n\n        // Create headers with multiple Cookie values\n        let mut headers = HeaderMap::new();\n\n        // Add multiple Cookie headers (this simulates how cookies are often sent)\n        headers.append(\"cookie\", HeaderValue::from_static(\"session=abc123\"));\n        headers.append(\"cookie\", HeaderValue::from_static(\"theme=dark\"));\n        headers.append(\"cookie\", HeaderValue::from_static(\"lang=en\"));\n\n        // Add other headers\n        headers.insert(\"user-agent\", HeaderValue::from_static(\"Mozilla/5.0\"));\n        headers.insert(\"accept\", HeaderValue::from_static(\"text/html\"));\n        headers.insert(\"host\", HeaderValue::from_static(\"example.com\"));\n\n        // Record original cookie values for comparison\n        let original_cookies: Vec<_> = headers\n            .get_all(\"cookie\")\n            .iter()\n            .map(|v| v.to_str().unwrap().to_string())\n            .collect();\n\n        // Sort headers according to orig_headers order\n        orig_headers.sort_headers(&mut headers);\n\n        // Verify all cookie values are preserved\n        let sorted_cookies: Vec<_> = headers\n            .get_all(\"cookie\")\n            .iter()\n            .map(|v| v.to_str().unwrap().to_string())\n            .collect();\n\n        assert_eq!(\n            original_cookies.len(),\n            sorted_cookies.len(),\n            \"Cookie count should be preserved\"\n        );\n        assert_eq!(original_cookies.len(), 3, \"Should have 3 cookie values\");\n\n        // Verify all original cookies are still present (order might change but content preserved)\n        for original_cookie in &original_cookies {\n            assert!(\n                sorted_cookies.contains(original_cookie),\n                \"Cookie '{original_cookie}' should be preserved\"\n            );\n        }\n\n        // Verify header ordering - Cookie should come first\n        let header_names: Vec<_> = headers.keys().collect();\n        assert_eq!(\n            header_names[0].as_str(),\n            \"cookie\",\n            \"Cookie should be first header\"\n        );\n\n        // Verify all headers are preserved\n        assert_eq!(\n            headers.len(),\n            6,\n            \"Should have 6 total header values (3 cookies + 3 others)\"\n        );\n        assert!(headers.contains_key(\"user-agent\"));\n        assert!(headers.contains_key(\"accept\"));\n        assert!(headers.contains_key(\"host\"));\n    }\n\n    #[test]\n    fn test_sort_headers_multiple_values_different_headers() {\n        let mut orig_headers = OrigHeaderMap::new();\n        orig_headers.insert(\"Accept\");\n        orig_headers.insert(\"Cookie\");\n\n        let mut headers = HeaderMap::new();\n\n        // Multiple Accept headers\n        headers.append(\"accept\", HeaderValue::from_static(\"text/html\"));\n        headers.append(\"accept\", HeaderValue::from_static(\"application/json\"));\n\n        // Multiple Cookie headers\n        headers.append(\"cookie\", HeaderValue::from_static(\"a=1\"));\n        headers.append(\"cookie\", HeaderValue::from_static(\"b=2\"));\n\n        // Single header\n        headers.insert(\"host\", HeaderValue::from_static(\"example.com\"));\n\n        let total_before = headers.len();\n\n        orig_headers.sort_headers(&mut headers);\n\n        // Verify all values preserved\n        assert_eq!(\n            headers.len(),\n            total_before,\n            \"Total header count should be preserved\"\n        );\n        assert_eq!(\n            headers.get_all(\"accept\").iter().count(),\n            2,\n            \"Accept headers should be preserved\"\n        );\n        assert_eq!(\n            headers.get_all(\"cookie\").iter().count(),\n            2,\n            \"Cookie headers should be preserved\"\n        );\n        assert_eq!(\n            headers.get_all(\"host\").iter().count(),\n            1,\n            \"Host header should be preserved\"\n        );\n    }\n}\n"
  },
  {
    "path": "src/into_uri.rs",
    "content": "//! URI conversion utilities.\n//!\n//! This module provides the [`IntoUri`] trait, allowing various types\n//! (such as `&str`, `String`, `Vec<u8>`, etc.) to be fallibly converted into an [`http::Uri`].\n//! The conversion is based on `TryFrom<T> for Uri` and ensures the resulting URI is valid and\n//! contains a host.\n//!\n//! Internally, the trait is sealed to prevent\n\nuse bytes::Bytes;\nuse http::Uri;\n\nuse crate::{Error, Result};\n\n/// Converts a value into a [`Uri`] with error handling.\n///\n/// This trait is implemented for common types such as [`Uri`], [`String`], [`&str`], and byte\n/// slices, as well as any type that can be fallibly converted into a [`Uri`] via [`TryFrom`].\npub trait IntoUri: IntoUriSealed {}\n\nimpl IntoUri for Uri {}\nimpl IntoUri for &Uri {}\nimpl IntoUri for &str {}\nimpl IntoUri for String {}\nimpl IntoUri for &String {}\nimpl IntoUri for Vec<u8> {}\nimpl IntoUri for &[u8] {}\n\npub trait IntoUriSealed {\n    // Besides parsing as a valid `Uri`.\n    fn into_uri(self) -> Result<Uri>;\n}\n\nimpl IntoUriSealed for &[u8] {\n    fn into_uri(self) -> Result<Uri> {\n        Uri::try_from(self)\n            .or_else(|_| internal::parse(internal::Kind::Bytes(self)))\n            .and_then(IntoUriSealed::into_uri)\n    }\n}\n\nimpl IntoUriSealed for Vec<u8> {\n    fn into_uri(self) -> Result<Uri> {\n        let bytes = Bytes::from(self);\n        Uri::from_maybe_shared(bytes.clone())\n            .or_else(|_| internal::parse(internal::Kind::Bytes(&bytes)))\n            .and_then(IntoUriSealed::into_uri)\n    }\n}\n\nimpl IntoUriSealed for &str {\n    fn into_uri(self) -> Result<Uri> {\n        Uri::try_from(self)\n            .or_else(|_| internal::parse(internal::Kind::Str(self)))\n            .and_then(IntoUriSealed::into_uri)\n    }\n}\n\nimpl IntoUriSealed for String {\n    #[inline]\n    fn into_uri(self) -> Result<Uri> {\n        self.into_bytes().into_uri()\n    }\n}\n\nimpl IntoUriSealed for &String {\n    #[inline]\n    fn into_uri(self) -> Result<Uri> {\n        IntoUriSealed::into_uri(self.as_str())\n    }\n}\n\nimpl IntoUriSealed for Uri {\n    fn into_uri(self) -> Result<Uri> {\n        match (self.scheme(), self.authority()) {\n            (Some(_), Some(_)) => Ok(self),\n            _ => Err(Error::uri_bad_scheme(self)),\n        }\n    }\n}\n\nimpl IntoUriSealed for &Uri {\n    fn into_uri(self) -> Result<Uri> {\n        match (self.scheme(), self.authority()) {\n            (Some(_), Some(_)) => Ok(self.clone()),\n            _ => Err(Error::uri_bad_scheme(self.clone())),\n        }\n    }\n}\n\nmod internal {\n    use http::Uri;\n    use url::Url;\n\n    use crate::{Error, Result};\n\n    pub(super) enum Kind<'a> {\n        Bytes(&'a [u8]),\n        Str(&'a str),\n    }\n\n    pub(super) fn parse(s: Kind) -> Result<Uri> {\n        let s = match s {\n            Kind::Bytes(bytes) => std::str::from_utf8(bytes).map_err(Error::decode),\n            Kind::Str(s) => Ok(s),\n        }?;\n\n        Url::parse(s)\n            .map(String::from)\n            .map_err(Error::builder)\n            .and_then(|s| Uri::try_from(s).map_err(Error::builder))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::IntoUriSealed;\n\n    #[test]\n    fn into_uri_bad_scheme() {\n        let err = \"/hello/world\".into_uri().unwrap_err();\n        assert_eq!(\n            err.to_string(),\n            \"builder error for uri (/hello/world): URI scheme is not allowed\"\n        );\n\n        let err = \"127.0.0.1\".into_uri().unwrap_err();\n        assert_eq!(\n            err.to_string(),\n            \"builder error for uri (127.0.0.1): URI scheme is not allowed\"\n        );\n    }\n\n    #[test]\n    fn into_uri_with_space_in_path() {\n        let uri = \"http://example.com/hello world\".into_uri().unwrap();\n        assert_eq!(uri, \"http://example.com/hello%20world\");\n    }\n\n    #[test]\n    fn into_uri_with_unicode_in_path() {\n        let uri = \"http://example.com/文件/测试\".into_uri().unwrap();\n        assert_eq!(uri, \"http://example.com/文件/测试\");\n    }\n\n    #[test]\n    fn into_uri_with_special_chars_in_path() {\n        let uri = \"http://example.com/path<>{}\".into_uri().unwrap();\n        assert_eq!(uri, \"http://example.com/path%3C%3E%7B%7D\");\n    }\n\n    #[test]\n    fn into_uri_with_query_preserved() {\n        let uri = \"http://example.com/path?key=value&foo=bar\"\n            .into_uri()\n            .unwrap();\n        assert_eq!(uri, \"http://example.com/path?key=value&foo=bar\");\n    }\n\n    #[test]\n    fn into_uri_bytes_with_encoding() {\n        let bytes = b\"http://example.com/hello world\";\n        let uri = bytes.into_uri().unwrap();\n        assert_eq!(uri, \"http://example.com/hello%20world\");\n    }\n\n    #[test]\n    fn test_bytes_with_query() {\n        let bytes = b\"http://example.com/path?key=hello%20world\";\n        let uri = bytes.into_uri().unwrap();\n        assert_eq!(uri.to_string(), \"http://example.com/path?key=hello%20world\");\n    }\n\n    #[test]\n    fn test_bytes_with_unicode() {\n        let bytes = b\"http://example.com/\\xE6\\xB5\\x8B\\xE8\\xAF\\x95\";\n        let uri = bytes.into_uri().unwrap();\n        assert_eq!(uri, \"http://example.com/测试\");\n    }\n\n    #[test]\n    fn test_bytes_minimal() {\n        let bytes = b\"http://example.com\";\n        let uri = bytes.into_uri().unwrap();\n        assert_eq!(uri, \"http://example.com\");\n    }\n}\n"
  },
  {
    "path": "src/lib.rs",
    "content": "#![deny(unused)]\n#![deny(unsafe_code)]\n#![deny(missing_docs)]\n#![cfg_attr(test, deny(warnings))]\n#![cfg_attr(not(test), warn(unused_crate_dependencies))]\n\n//! # wreq\n//!\n//! An ergonomic and modular Rust HTTP Client for high-fidelity protocol matching, featuring\n//! customizable TLS, JA3/JA4, and HTTP/2 signature capabilities.\n//!\n//! - Plain bodies, [JSON](#json), [urlencoded](#forms), [multipart]\n//! - HTTP Trailer\n//! - Cookie Store\n//! - [Redirect Policy](#redirect-policies)\n//! - Original Header\n//! - Rotating [Proxies](#proxies)\n//! - [Tower](https://docs.rs/tower/latest/tower) Middleware\n//! - [WebSocket](#websocket) Upgrade\n//! - HTTPS via [BoringSSL](#tls)\n//! - HTTP/2 over TLS Parity\n//! - [Certificate Store (CAs & mTLS)](#certificate-store)\n//!\n//! Additional learning resources include:\n//!\n//! - [The Rust Cookbook](https://doc.rust-lang.org/stable/book/ch00-00-introduction.html)\n//! - [Repository Examples](https://github.com/0x676e67/wreq/tree/main/examples)\n//!\n//! ## Making a GET request\n//!\n//! Making a GET request is simple.\n//!\n//! ```rust\n//! # async fn run() -> wreq::Result<()> {\n//! let body = wreq::get(\"https://www.rust-lang.org\")\n//!     .send()\n//!     .await?\n//!     .text()\n//!     .await?;\n//!\n//! println!(\"body = {:?}\", body);\n//! # Ok(())\n//! # }\n//! ```\n//!\n//! **NOTE**: If you plan to perform multiple requests, it is best to create a\n//! [`Client`][client] and reuse it, taking advantage of keep-alive connection\n//! pooling.\n//!\n//! ## Making POST requests (or setting request bodies)\n//!\n//! There are several ways you can set the body of a request. The basic one is\n//! by using the `body()` method of a [`RequestBuilder`][builder]. This lets you set the\n//! exact raw bytes of what the body should be. It accepts various types,\n//! including `String` and `Vec<u8>`. If you wish to pass a custom\n//! type, you can use the `wreq::Body` constructors.\n//!\n//! ```rust\n//! # use wreq::Error;\n//! #\n//! # async fn run() -> Result<(), Error> {\n//! let client = wreq::Client::new();\n//! let res = client\n//!     .post(\"http://httpbin.org/post\")\n//!     .body(\"the exact body that is sent\")\n//!     .send()\n//!     .await?;\n//! # Ok(())\n//! # }\n//! ```\n//!\n//! ### Forms\n//!\n//! It's very common to want to send form data in a request body. This can be\n//! done with any type that can be serialized into form data.\n//!\n//! This can be an array of tuples, or a `HashMap`, or a custom type that\n//! implements [`Serialize`][serde].\n//!\n//! The feature `form` is required.\n//!\n//! ```rust\n//! # use wreq::Error;\n//! # #[cfg(feature = \"form\")]\n//! # async fn run() -> Result<(), Error> {\n//! // This will POST a body of `foo=bar&baz=quux`\n//! let params = [(\"foo\", \"bar\"), (\"baz\", \"quux\")];\n//! let client = wreq::Client::new();\n//! let res = client\n//!     .post(\"http://httpbin.org/post\")\n//!     .form(&params)\n//!     .send()\n//!     .await?;\n//! # Ok(())\n//! # }\n//! ```\n//!\n//! ### JSON\n//!\n//! There is also a `json` method helper on the [`RequestBuilder`][builder] that works in\n//! a similar fashion the `form` method. It can take any value that can be\n//! serialized into JSON. The feature `json` is required.\n//!\n//! ```rust\n//! # use wreq::Error;\n//! # use std::collections::HashMap;\n//! #\n//! # #[cfg(feature = \"json\")]\n//! # async fn run() -> Result<(), Error> {\n//! // This will POST a body of `{\"lang\":\"rust\",\"body\":\"json\"}`\n//! let mut map = HashMap::new();\n//! map.insert(\"lang\", \"rust\");\n//! map.insert(\"body\", \"json\");\n//!\n//! let client = wreq::Client::new();\n//! let res = client\n//!     .post(\"http://httpbin.org/post\")\n//!     .json(&map)\n//!     .send()\n//!     .await?;\n//! # Ok(())\n//! # }\n//! ```\n//!\n//! ## Websocket\n//!\n//! The `websocket` module provides a way to upgrade a connection to a websocket.\n//!\n//! ```rust,no_run\n//! use futures_util::{SinkExt, StreamExt, TryStreamExt};\n//! use wreq::{header, ws::message::Message};\n//!\n//! #[tokio::main]\n//! async fn main() -> wreq::Result<()> {\n//!     // Use the API you're already familiar with\n//!     let websocket = wreq::websocket(\"wss://echo.websocket.org\")\n//!         .header(header::USER_AGENT, env!(\"CARGO_PKG_NAME\"))\n//!         .send()\n//!         .await?;\n//!\n//!     assert_eq!(websocket.version(), http::Version::HTTP_11);\n//!\n//!     let (mut tx, mut rx) = websocket.into_websocket().await?.split();\n//!\n//!     tokio::spawn(async move {\n//!         for i in 1..11 {\n//!             if let Err(err) = tx.send(Message::text(format!(\"Hello, World! {i}\"))).await {\n//!                 eprintln!(\"failed to send message: {err}\");\n//!             }\n//!         }\n//!     });\n//!\n//!     while let Some(message) = rx.try_next().await? {\n//!         if let Message::Text(text) = message {\n//!             println!(\"received: {text}\");\n//!         }\n//!     }\n//!\n//!     Ok(())\n//! }\n//! ```\n//!\n//! ## Redirect Policies\n//!\n//! By default, the client does not handle HTTP redirects.\n//! To customize this behavior, you can use [`redirect::Policy`][redirect] with ClientBuilder.\n//!\n//! ## Cookies\n//!\n//! The automatic storing and sending of session cookies can be enabled with\n//! the [`cookie_store`][ClientBuilder::cookie_store] method on `ClientBuilder`.\n//!\n//! ## Proxies\n//!\n//! **NOTE**: System proxies are enabled by default.\n//!\n//! System proxies look in environment variables to set HTTP or HTTPS proxies.\n//!\n//! `HTTP_PROXY` or `http_proxy` provide HTTP proxies for HTTP connections while\n//! `HTTPS_PROXY` or `https_proxy` provide HTTPS proxies for HTTPS connections.\n//! `ALL_PROXY` or `all_proxy` provide proxies for both HTTP and HTTPS connections.\n//! If both the all proxy and HTTP or HTTPS proxy variables are set the more specific\n//! HTTP or HTTPS proxies take precedence.\n//!\n//! These can be overwritten by adding a [`Proxy`] to `ClientBuilder`\n//! i.e. `let proxy = wreq::Proxy::http(\"https://secure.example\")?;`\n//! or disabled by calling `ClientBuilder::no_proxy()`.\n//!\n//! `socks` feature is required if you have configured socks proxy like this:\n//!\n//! ```bash\n//! export https_proxy=socks5://127.0.0.1:1086\n//! ```\n//!\n//! * `http://` is the scheme for http proxy\n//! * `https://` is the scheme for https proxy\n//! * `socks4://` is the scheme for socks4 proxy\n//! * `socks4a://` is the scheme for socks4a proxy\n//! * `socks5://` is the scheme for socks5 proxy\n//! * `socks5h://` is the scheme for socks5h proxy\n//!  \n//! ## TLS\n//!\n//! By default, clients will utilize BoringSSL transport layer security to connect to HTTPS targets.\n//!\n//! - Various parts of TLS can also be configured or even disabled on the `ClientBuilder`.\n//!\n//! ## Certificate Store\n//!\n//! By default, wreq uses Mozilla's root certificates through the webpki-roots crate.\n//! This static root certificate bundle is not automatically updated and ignores any root\n//! certificates installed on the host. You can disable default-features to use the system's default\n//! certificate path. Additionally, wreq provides a certificate store for users to customize and\n//! update certificates.\n//!\n//! Custom Certificate Store verification supports Root CA certificates, peer certificates, and\n//! self-signed certificate SSL pinning.\n//!\n//! ## Optional Features\n//!\n//! The following are a list of [Cargo features][cargo-features] that can be\n//! enabled or disabled:\n//!\n//! - **cookies**: Provides cookie session support.\n//! - **gzip**: Provides response body gzip decompression.\n//! - **brotli**: Provides response body brotli decompression.\n//! - **zstd**: Provides response body zstd decompression.\n//! - **deflate**: Provides response body deflate decompression.\n//! - **query**: Provides query parameter serialization.\n//! - **form**: Provides form data serialization.\n//! - **json**: Provides serialization and deserialization for JSON bodies.\n//! - **multipart**: Provides functionality for multipart forms.\n//! - **charset**: Improved support for decoding text.\n//! - **stream**: Adds support for `futures::Stream`.\n//! - **socks**: Provides SOCKS5 and SOCKS4 proxy support.\n//! - **ws**: Provides websocket support.\n//! - **hickory-dns**: Enables a hickory-dns async resolver instead of default threadpool using\n//!   `getaddrinfo`.\n//! - **webpki-roots** *(enabled by default)*: Use the webpki-roots crate for root certificates.\n//! - **system-proxy**: Enable system proxy support.\n//! - **tracing**: Enable tracing logging support.\n//! - **prefix-symbols**: Prefix BoringSSL symbols to avoid OpenSSL conflicts.\n//!\n//! [client]: ./struct.Client.html\n//! [response]: ./struct.Response.html\n//! [get]: ./fn.get.html\n//! [builder]: ./struct.RequestBuilder.html\n//! [serde]: http://serde.rs\n//! [redirect]: crate::redirect\n//! [Proxy]: ./struct.Proxy.html\n//! [cargo-features]: https://doc.rust-lang.org/stable/cargo/reference/manifest.html#the-features-section\n\n#[macro_use]\nmod trace;\n#[macro_use]\nmod config;\n#[macro_use]\nmod ext;\nmod client;\nmod error;\nmod into_uri;\nmod proxy;\nmod sync;\nmod util;\n\n#[cfg(feature = \"cookies\")]\npub mod cookie;\npub mod dns;\npub mod header;\npub mod redirect;\npub mod retry;\npub mod tls;\n\npub use http::{Method, StatusCode, Uri, Version};\n#[cfg(unix)]\nuse libc as _;\n\n#[cfg(feature = \"multipart\")]\npub use self::client::multipart;\n#[cfg(feature = \"ws\")]\npub use self::client::ws;\npub use self::{\n    client::{\n        Body, Client, ClientBuilder, Emulation, EmulationBuilder, Group, IntoEmulation, Request,\n        RequestBuilder, Response, Upgraded,\n    },\n    error::{Error, Result},\n    ext::{ResponseBuilderExt, ResponseExt},\n    into_uri::IntoUri,\n    proxy::{NoProxy, Proxy},\n};\n\npub mod http1 {\n    //! HTTP/1 protocol implementation and utilities.\n\n    pub use super::client::http1::{Http1Options, Http1OptionsBuilder};\n}\n\npub mod http2 {\n    //! HTTP/2 protocol implementation and utilities.\n\n    pub use http2::frame::{\n        Priorities, PrioritiesBuilder, Priority, PseudoId, PseudoOrder, Setting, SettingId,\n        SettingsOrder, SettingsOrderBuilder, StreamDependency, StreamId,\n    };\n\n    pub use super::client::http2::{Http2Options, Http2OptionsBuilder};\n}\n\nfn _assert_impls() {\n    fn assert_send<T: Send>() {}\n    fn assert_sync<T: Sync>() {}\n    fn assert_clone<T: Clone>() {}\n\n    assert_send::<Client>();\n    assert_sync::<Client>();\n    assert_clone::<Client>();\n\n    assert_send::<Request>();\n    assert_send::<RequestBuilder>();\n    #[cfg(feature = \"ws\")]\n    assert_send::<ws::WebSocketRequestBuilder>();\n\n    assert_send::<Response>();\n    #[cfg(feature = \"ws\")]\n    assert_send::<ws::WebSocketResponse>();\n    #[cfg(feature = \"ws\")]\n    assert_send::<ws::WebSocket>();\n\n    assert_send::<Error>();\n    assert_sync::<Error>();\n}\n\n/// Shortcut method to quickly make a `GET` request.\n///\n/// See also the methods on the [`wreq::RequestBuilder`](./struct.RequestBuilder.html)\n/// type.\n///\n/// **NOTE**: This function creates a new internal `Client` on each call,\n/// and so should not be used if making many requests. Create a\n/// [`Client`](./struct.Client.html) instead.\n///\n/// # Examples\n///\n/// ```rust\n/// # async fn run() -> wreq::Result<()> {\n/// let body = wreq::get(\"https://www.rust-lang.org\")\n///     .send()\n///     .await?\n///     .text()\n///     .await?;\n/// # Ok(())\n/// # }\n/// ```\n#[inline]\npub fn get<T: IntoUri>(uri: T) -> RequestBuilder {\n    Client::new().get(uri)\n}\n\n/// Shortcut method to quickly make a `POST` request.\n///\n/// See also the methods on the [`wreq::RequestBuilder`](./struct.RequestBuilder.html)\n/// type.\n///\n/// **NOTE**: This function creates a new internal `Client` on each call,\n/// and so should not be used if making many requests. Create a\n/// [`Client`](./struct.Client.html) instead.\n///\n/// # Examples\n///\n/// ```rust\n/// # async fn run() -> wreq::Result<()> {\n/// let res = wreq::post(\"https://httpbin.org/post\")\n///     .body(\"example body\")\n///     .send()\n///     .await?;\n/// # Ok(())\n/// # }\n/// ```\n#[inline]\npub fn post<T: IntoUri>(uri: T) -> RequestBuilder {\n    Client::new().post(uri)\n}\n\n/// Shortcut method to quickly make a `PUT` request.\n///\n/// See also the methods on the [`wreq::RequestBuilder`](./struct.RequestBuilder.html)\n/// type.\n///\n/// **NOTE**: This function creates a new internal `Client` on each call,\n/// and so should not be used if making many requests. Create a\n/// [`Client`](./struct.Client.html) instead.\n///\n/// # Examples\n///\n/// ```rust\n/// # async fn run() -> wreq::Result<()> {\n/// let res = wreq::put(\"https://httpbin.org/put\")\n///     .body(\"update content\")\n///     .send()\n///     .await?;\n/// # Ok(())\n/// # }\n/// ```\n#[inline]\npub fn put<T: IntoUri>(uri: T) -> RequestBuilder {\n    Client::new().put(uri)\n}\n\n/// Shortcut method to quickly make a `DELETE` request.\n///\n/// See also the methods on the [`wreq::RequestBuilder`](./struct.RequestBuilder.html)\n/// type.\n///\n/// **NOTE**: This function creates a new internal `Client` on each call,\n/// and so should not be used if making many requests. Create a\n/// [`Client`](./struct.Client.html) instead.\n///\n/// # Examples\n///\n/// ```rust\n/// # async fn run() -> wreq::Result<()> {\n/// let res = wreq::delete(\"https://httpbin.org/delete\")\n///     .send()\n///     .await?;\n/// # Ok(())\n/// # }\n/// ```\n#[inline]\npub fn delete<T: IntoUri>(uri: T) -> RequestBuilder {\n    Client::new().delete(uri)\n}\n\n/// Shortcut method to quickly make a `HEAD` request.\n///\n/// See also the methods on the [`wreq::RequestBuilder`](./struct.RequestBuilder.html)\n/// type.\n///\n/// **NOTE**: This function creates a new internal `Client` on each call,\n/// and so should not be used if making many requests. Create a\n/// [`Client`](./struct.Client.html) instead.\n///\n/// # Examples\n///\n/// ```rust\n/// # async fn run() -> wreq::Result<()> {\n/// let res = wreq::head(\"https://httpbin.org/get\")\n///     .send()\n///     .await?;\n/// # Ok(())\n/// # }\n/// ```\n#[inline]\npub fn head<T: IntoUri>(uri: T) -> RequestBuilder {\n    Client::new().head(uri)\n}\n\n/// Shortcut method to quickly make a `PATCH` request.\n///\n/// See also the methods on the [`wreq::RequestBuilder`](./struct.RequestBuilder.html)\n/// type.\n///\n/// **NOTE**: This function creates a new internal `Client` on each call,\n/// and so should not be used if making many requests. Create a\n/// [`Client`](./struct.Client.html) instead.\n///\n/// # Examples\n///\n/// ```rust\n/// # async fn run() -> wreq::Result<()> {\n/// let res = wreq::patch(\"https://httpbin.org/patch\")\n///     .body(\"patch content\")\n///     .send()\n///     .await?;\n/// # Ok(())\n/// # }\n/// ```\n#[inline]\npub fn patch<T: IntoUri>(uri: T) -> RequestBuilder {\n    Client::new().patch(uri)\n}\n\n/// Shortcut method to quickly make an `OPTIONS` request.\n///\n/// See also the methods on the [`wreq::RequestBuilder`](./struct.RequestBuilder.html)\n/// type.\n///\n/// **NOTE**: This function creates a new internal `Client` on each call,\n/// and so should not be used if making many requests. Create a\n/// [`Client`](./struct.Client.html) instead.\n///\n/// # Examples\n///\n/// ```rust\n/// # async fn run() -> wreq::Result<()> {\n/// let res = wreq::options(\"https://httpbin.org/get\")\n///     .send()\n///     .await?;\n/// # Ok(())\n/// # }\n/// ```\n#[inline]\npub fn options<T: IntoUri>(uri: T) -> RequestBuilder {\n    Client::new().options(uri)\n}\n\n/// Shortcut method to quickly make a request with a custom HTTP method.\n///\n/// See also the methods on the [`wreq::RequestBuilder`](./struct.RequestBuilder.html)\n/// type.\n///\n/// **NOTE**: This function creates a new internal `Client` on each call,\n/// and so should not be used if making many requests. Create a\n/// [`Client`](./struct.Client.html) instead.\n///\n/// # Examples\n///\n/// ```rust\n/// # async fn run() -> wreq::Result<()> {\n/// use http::Method;\n/// let res = wreq::request(Method::TRACE, \"https://httpbin.org/trace\")\n///     .send()\n///     .await?;\n/// # Ok(())\n/// # }\n/// ```\n#[inline]\npub fn request<T: IntoUri>(method: Method, uri: T) -> RequestBuilder {\n    Client::new().request(method, uri)\n}\n\n/// Shortcut method to quickly make a WebSocket request.\n///\n/// See also the methods on the\n/// [`wreq::ws::WebSocketRequestBuilder`](./ws/struct.WebSocketRequestBuilder.html) type.\n///\n/// **NOTE**: This function creates a new internal `Client` on each call,\n/// and so should not be used if making many requests. Create a\n/// [`Client`](./struct.Client.html) instead.\n///\n/// # Examples\n///\n/// ```rust\n/// # async fn run() -> wreq::Result<()> {\n/// use futures_util::{SinkExt, StreamExt, TryStreamExt};\n/// use wreq::{header, ws::message::Message};\n///\n/// let resp = wreq::websocket(\"wss://echo.websocket.org\")\n///     .header(header::USER_AGENT, env!(\"CARGO_PKG_NAME\"))\n///     .read_buffer_size(1024 * 1024)\n///     .send()\n///     .await?;\n///\n/// assert_eq!(resp.version(), http::Version::HTTP_11);\n///\n/// let websocket = resp.into_websocket().await?;\n/// if let Some(protocol) = websocket.protocol() {\n///     println!(\"WebSocket subprotocol: {:?}\", protocol);\n/// }\n///\n/// let (mut tx, mut rx) = websocket.split();\n///\n/// tokio::spawn(async move {\n///     for i in 1..11 {\n///         if let Err(err) = tx.send(Message::text(format!(\"Hello, World! {i}\"))).await {\n///             eprintln!(\"failed to send message: {err}\");\n///         }\n///     }\n/// });\n///\n/// while let Some(message) = rx.try_next().await? {\n///     if let Message::Text(text) = message {\n///         println!(\"received: {text}\");\n///     }\n/// }\n/// # Ok(())\n/// # }\n/// ```\n#[inline]\n#[cfg(feature = \"ws\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"ws\")))]\npub fn websocket<T: IntoUri>(uri: T) -> ws::WebSocketRequestBuilder {\n    Client::new().websocket(uri)\n}\n"
  },
  {
    "path": "src/proxy/mac.rs",
    "content": "use system_configuration::{\n    core_foundation::{\n        base::CFType,\n        dictionary::CFDictionary,\n        number::CFNumber,\n        string::{CFString, CFStringRef},\n    },\n    dynamic_store::SCDynamicStoreBuilder,\n    sys::schema_definitions::{\n        kSCPropNetProxiesHTTPEnable, kSCPropNetProxiesHTTPPort, kSCPropNetProxiesHTTPProxy,\n        kSCPropNetProxiesHTTPSEnable, kSCPropNetProxiesHTTPSPort, kSCPropNetProxiesHTTPSProxy,\n    },\n};\n\n#[allow(unsafe_code)]\npub(super) fn with_system(builder: &mut super::matcher::Builder) {\n    let Some(proxies_map) = SCDynamicStoreBuilder::new(\"\")\n        .build()\n        .and_then(|store| store.get_proxies())\n    else {\n        return;\n    };\n\n    if builder.http.is_empty() {\n        let http_proxy_config = parse_setting_from_dynamic_store(\n            &proxies_map,\n            unsafe { kSCPropNetProxiesHTTPEnable },\n            unsafe { kSCPropNetProxiesHTTPProxy },\n            unsafe { kSCPropNetProxiesHTTPPort },\n        );\n        if let Some(http) = http_proxy_config {\n            builder.http = http;\n        }\n    }\n\n    if builder.https.is_empty() {\n        let https_proxy_config = parse_setting_from_dynamic_store(\n            &proxies_map,\n            unsafe { kSCPropNetProxiesHTTPSEnable },\n            unsafe { kSCPropNetProxiesHTTPSProxy },\n            unsafe { kSCPropNetProxiesHTTPSPort },\n        );\n\n        if let Some(https) = https_proxy_config {\n            builder.https = https;\n        }\n    }\n}\n\nfn parse_setting_from_dynamic_store(\n    proxies_map: &CFDictionary<CFString, CFType>,\n    enabled_key: CFStringRef,\n    host_key: CFStringRef,\n    port_key: CFStringRef,\n) -> Option<String> {\n    let proxy_enabled = proxies_map\n        .find(enabled_key)\n        .and_then(|flag| flag.downcast::<CFNumber>())\n        .and_then(|flag| flag.to_i32())\n        .unwrap_or(0)\n        == 1;\n\n    if proxy_enabled {\n        let proxy_host = proxies_map\n            .find(host_key)\n            .and_then(|host| host.downcast::<CFString>())\n            .map(|host| host.to_string());\n        let proxy_port = proxies_map\n            .find(port_key)\n            .and_then(|port| port.downcast::<CFNumber>())\n            .and_then(|port| port.to_i32());\n\n        return match (proxy_host, proxy_port) {\n            (Some(proxy_host), Some(proxy_port)) => Some(format!(\"{proxy_host}:{proxy_port}\")),\n            (Some(proxy_host), None) => Some(proxy_host),\n            (None, Some(_)) => None,\n            (None, None) => None,\n        };\n    }\n\n    None\n}\n"
  },
  {
    "path": "src/proxy/matcher.rs",
    "content": "//! Proxy matchers\n//!\n//! This module contains different matchers to configure rules for when a proxy\n//! should be used, and if so, with what arguments.\n//!\n//! A [`Matcher`] can be constructed either using environment variables, or\n//! a [`Matcher::builder()`].\n//!\n//! Once constructed, the `Matcher` can be asked if it intercepts a `Uri` by\n//! calling [`Matcher::intercept()`].\n//!\n//! An [`Intercept`] includes the destination for the proxy, and any parsed\n//! authentication to be used.\n\nuse std::net::IpAddr;\n#[cfg(unix)]\nuse std::{path::Path, sync::Arc};\n\nuse bytes::Bytes;\nuse http::{\n    HeaderMap, Uri,\n    header::HeaderValue,\n    uri::{Authority, Scheme},\n};\nuse ipnet::IpNet;\nuse percent_encoding::percent_decode_str;\n\nuse self::builder::IntoValue;\nuse super::{Extra, Intercepted};\nuse crate::ext::UriExt;\n\n/// A proxy matcher, usually built from environment variables.\n#[derive(Debug, Clone, PartialEq, Eq, Hash)]\npub struct Matcher {\n    http: Option<Intercept>,\n    https: Option<Intercept>,\n    no: NoProxy,\n    #[cfg(unix)]\n    unix: Option<Arc<Path>>,\n}\n\n/// A matched proxy,\n///\n/// This is returned by a matcher if a proxy should be used.\n#[derive(Debug, Clone, PartialEq, Eq, Hash)]\npub struct Intercept {\n    uri: Uri,\n    auth: Auth,\n    extra: Extra,\n}\n\n/// A builder to create a [`Matcher`].\n///\n/// Construct with [`Matcher::builder()`].\n#[derive(Default)]\npub struct Builder {\n    pub(super) is_cgi: bool,\n    pub(super) all: String,\n    pub(super) http: String,\n    pub(super) https: String,\n    pub(super) no: String,\n    #[cfg(unix)]\n    pub(super) unix: Option<Arc<Path>>,\n}\n\n#[derive(Debug, Clone, PartialEq, Eq, Hash)]\npub enum Auth {\n    Empty,\n    Basic(HeaderValue),\n    Raw(Bytes, Bytes),\n}\n\n/// A filter for proxy matchers.\n///\n/// This type is based off the `NO_PROXY` rules used by curl.\n#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)]\nstruct NoProxy {\n    ips: IpMatcher,\n    domains: DomainMatcher,\n}\n\n#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)]\nstruct DomainMatcher(Vec<String>);\n\n#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)]\nstruct IpMatcher(Vec<Ip>);\n\n#[derive(Clone, Debug, PartialEq, Eq, Hash)]\nenum Ip {\n    Address(IpAddr),\n    Network(IpNet),\n}\n\n// ===== impl Matcher =====\n\nimpl Matcher {\n    /// Create a matcher from the environment or system.\n    ///\n    /// This checks the same environment variables as `from_env()`, and if not\n    /// set, checks the system configuration for values for the OS.\n    ///\n    /// This constructor is always available, but if the `client-proxy-system`\n    /// feature is enabled, it will check more configuration. Use this\n    /// constructor if you want to allow users to optionally enable more, or\n    /// use `from_env` if you do not want the values to change based on an\n    /// enabled feature.\n    pub fn from_system() -> Self {\n        Builder::from_system().build(Extra::default())\n    }\n\n    /// Start a builder to configure a matcher.\n    pub fn builder() -> Builder {\n        Builder::default()\n    }\n\n    /// Check if the destination should be intercepted by a proxy.\n    ///\n    /// If the proxy rules match the destination, a new `Uri` will be returned\n    /// to connect to.\n    pub fn intercept(&self, dst: &Uri) -> Option<Intercepted> {\n        // if unix sockets are configured, check them first\n        #[cfg(unix)]\n        if let Some(unix) = &self.unix {\n            return Some(Intercepted::Unix(unix.clone()));\n        }\n\n        // TODO(perf): don't need to check `no` if below doesn't match...\n        if self.no.contains(dst.host()?) {\n            return None;\n        }\n        if dst.is_http() {\n            return self.http.clone().map(Intercepted::Proxy);\n        }\n\n        if dst.is_https() {\n            return self.https.clone().map(Intercepted::Proxy);\n        }\n\n        None\n    }\n}\n\n// ===== impl Intercept =====\n\nimpl Intercept {\n    #[inline]\n    pub(crate) fn uri(&self) -> &Uri {\n        &self.uri\n    }\n\n    pub(crate) fn basic_auth(&self) -> Option<&HeaderValue> {\n        if let Some(ref val) = self.extra.auth {\n            return Some(val);\n        }\n\n        if let Auth::Basic(ref val) = self.auth {\n            Some(val)\n        } else {\n            None\n        }\n    }\n\n    #[inline]\n    pub(crate) fn custom_headers(&self) -> Option<&HeaderMap> {\n        self.extra.misc.as_ref()\n    }\n\n    #[cfg(feature = \"socks\")]\n    pub(crate) fn raw_auth(&self) -> Option<(Bytes, Bytes)> {\n        if let Auth::Raw(ref u, ref p) = self.auth {\n            Some((u.clone(), p.clone()))\n        } else {\n            None\n        }\n    }\n}\n\n// ===== impl Builder =====\n\nimpl Builder {\n    fn from_env() -> Self {\n        Builder {\n            is_cgi: std::env::var_os(\"REQUEST_METHOD\").is_some(),\n            all: get_first_env(&[\"ALL_PROXY\", \"all_proxy\"]),\n            http: get_first_env(&[\"HTTP_PROXY\", \"http_proxy\"]),\n            https: get_first_env(&[\"HTTPS_PROXY\", \"https_proxy\"]),\n            no: get_first_env(&[\"NO_PROXY\", \"no_proxy\"]),\n            #[cfg(unix)]\n            unix: None,\n        }\n    }\n\n    fn from_system() -> Self {\n        #[allow(unused_mut)]\n        let mut builder = Self::from_env();\n\n        #[cfg(all(target_os = \"macos\", feature = \"system-proxy\"))]\n        super::mac::with_system(&mut builder);\n\n        #[cfg(all(windows, feature = \"system-proxy\"))]\n        super::win::with_system(&mut builder);\n\n        builder\n    }\n\n    /// Set the target proxy for all destinations.\n    pub fn all<S>(mut self, val: S) -> Self\n    where\n        S: IntoValue,\n    {\n        self.all = val.into_value();\n        self\n    }\n\n    /// Set the target proxy for HTTP destinations.\n    pub fn http<S>(mut self, val: S) -> Self\n    where\n        S: IntoValue,\n    {\n        self.http = val.into_value();\n        self\n    }\n\n    /// Set the target proxy for HTTPS destinations.\n    pub fn https<S>(mut self, val: S) -> Self\n    where\n        S: IntoValue,\n    {\n        self.https = val.into_value();\n        self\n    }\n\n    /// Set the \"no\" proxy filter.\n    ///\n    /// The rules are as follows:\n    /// * Entries are expected to be comma-separated (whitespace between entries is ignored)\n    /// * IP addresses (both IPv4 and IPv6) are allowed, as are optional subnet masks (by adding\n    ///   /size, for example \"`192.168.1.0/24`\").\n    /// * An entry \"`*`\" matches all hostnames (this is the only wildcard allowed)\n    /// * Any other entry is considered a domain name (and may contain a leading dot, for example\n    ///   `google.com` and `.google.com` are equivalent) and would match both that domain AND all\n    ///   subdomains.\n    ///\n    /// For example, if `\"NO_PROXY=google.com, 192.168.1.0/24\"` was set, all of the following would\n    /// match (and therefore would bypass the proxy):\n    /// * `http://google.com/`\n    /// * `http://www.google.com/`\n    /// * `http://192.168.1.42/`\n    ///\n    /// The URI `http://notgoogle.com/` would not match.\n    pub fn no<S>(mut self, val: S) -> Self\n    where\n        S: IntoValue,\n    {\n        self.no = val.into_value();\n        self\n    }\n\n    // / Set the unix socket target proxy for all destinations.\n    #[cfg(unix)]\n    pub fn unix<S>(mut self, val: S) -> Self\n    where\n        S: super::uds::IntoUnixSocket,\n    {\n        self.unix = Some(val.unix_socket());\n        self\n    }\n\n    /// Construct a [`Matcher`] using the configured values.\n    pub(super) fn build(self, extra: Extra) -> Matcher {\n        if self.is_cgi {\n            return Matcher {\n                http: None,\n                https: None,\n                no: NoProxy::empty(),\n                #[cfg(unix)]\n                unix: None,\n            };\n        }\n\n        let mut all = parse_env_uri(&self.all);\n        let mut http = parse_env_uri(&self.http);\n        let mut https = parse_env_uri(&self.https);\n\n        if let Some(http) = http.as_mut() {\n            http.extra = extra.clone();\n        }\n        if let Some(https) = https.as_mut() {\n            https.extra = extra.clone();\n        }\n        if http.is_none() || https.is_none() {\n            if let Some(all) = all.as_mut() {\n                all.extra = extra;\n            }\n        }\n\n        Matcher {\n            http: http.or_else(|| all.clone()),\n            https: https.or(all),\n            no: NoProxy::from_string(&self.no),\n            #[cfg(unix)]\n            unix: self.unix,\n        }\n    }\n}\n\nfn get_first_env(names: &[&str]) -> String {\n    for name in names {\n        if let Ok(val) = std::env::var(name) {\n            return val;\n        }\n    }\n\n    String::new()\n}\n\nfn parse_env_uri(val: &str) -> Option<Intercept> {\n    let uri = val.parse::<Uri>().ok()?;\n    let mut builder = Uri::builder();\n    let mut is_httpish = false;\n    let mut is_socks = false;\n    let mut auth = Auth::Empty;\n\n    builder = builder.scheme(match uri.scheme() {\n        Some(s) => {\n            if s == &Scheme::HTTP || s == &Scheme::HTTPS {\n                is_httpish = true;\n                s.clone()\n            } else if matches!(s.as_str(), \"socks4\" | \"socks4a\" | \"socks5\" | \"socks5h\") {\n                is_socks = true;\n                s.clone()\n            } else {\n                // can't use this proxy scheme\n                return None;\n            }\n        }\n        // if no scheme provided, assume they meant 'http'\n        None => {\n            is_httpish = true;\n            Scheme::HTTP\n        }\n    });\n\n    let authority = {\n        let authority = uri.authority()?;\n        // default SOCKS port to 1080 if missing\n        if is_socks && authority.port().is_none() {\n            Authority::from_maybe_shared(Bytes::from(format!(\"{authority}:1080\"))).ok()?\n        } else {\n            authority.clone()\n        }\n    };\n\n    if let Some((userinfo, host_port)) = authority.as_str().rsplit_once('@') {\n        let (user, pass) = match userinfo.split_once(':') {\n            Some((user, pass)) => (user, Some(pass)),\n            None => (userinfo, None),\n        };\n\n        let user = percent_decode_str(user).decode_utf8_lossy();\n        let pass = pass.map(|pass| percent_decode_str(pass).decode_utf8_lossy());\n        if is_httpish {\n            auth = Auth::Basic(crate::util::basic_auth(&user, pass.as_deref()));\n        } else {\n            auth = Auth::Raw(\n                Bytes::from(user.into_owned()),\n                Bytes::from(pass.map_or_else(String::new, std::borrow::Cow::into_owned)),\n            );\n        }\n        builder = builder.authority(host_port);\n    } else {\n        builder = builder.authority(authority);\n    }\n\n    // removing any path, but we MUST specify one or the builder errors\n    builder = builder.path_and_query(\"/\");\n\n    Some(Intercept {\n        auth,\n        extra: Extra::default(),\n        uri: builder.build().ok()?,\n    })\n}\n\nimpl NoProxy {\n    fn empty() -> NoProxy {\n        NoProxy {\n            ips: IpMatcher(Vec::new()),\n            domains: DomainMatcher(Vec::new()),\n        }\n    }\n\n    /// Returns a new no-proxy configuration based on a `no_proxy` string (or `None` if no variables\n    /// are set)\n    /// The rules are as follows:\n    /// * The environment variable `NO_PROXY` is checked, if it is not set, `no_proxy` is checked\n    /// * If neither environment variable is set, `None` is returned\n    /// * Entries are expected to be comma-separated (whitespace between entries is ignored)\n    /// * IP addresses (both IPv4 and IPv6) are allowed, as are optional subnet masks (by adding\n    ///   /size, for example \"`192.168.1.0/24`\").\n    /// * An entry \"`*`\" matches all hostnames (this is the only wildcard allowed)\n    /// * Any other entry is considered a domain name (and may contain a leading dot, for example\n    ///   `google.com` and `.google.com` are equivalent) and would match both that domain AND all\n    ///   subdomains.\n    ///\n    /// For example, if `\"NO_PROXY=google.com, 192.168.1.0/24\"` was set, all of the following would\n    /// match (and therefore would bypass the proxy):\n    /// * `http://google.com/`\n    /// * `http://www.google.com/`\n    /// * `http://192.168.1.42/`\n    ///\n    /// The URI `http://notgoogle.com/` would not match.\n    pub fn from_string(no_proxy_list: &str) -> Self {\n        let mut ips = Vec::new();\n        let mut domains = Vec::new();\n        let parts = no_proxy_list.split(',').map(str::trim);\n        for part in parts {\n            match part.parse::<IpNet>() {\n                // If we can parse an IP net or address, then use it, otherwise, assume it is a\n                // domain\n                Ok(ip) => ips.push(Ip::Network(ip)),\n                Err(_) => match part.parse::<IpAddr>() {\n                    Ok(addr) => ips.push(Ip::Address(addr)),\n                    Err(_) => {\n                        if !part.trim().is_empty() {\n                            domains.push(part.to_owned())\n                        }\n                    }\n                },\n            }\n        }\n        NoProxy {\n            ips: IpMatcher(ips),\n            domains: DomainMatcher(domains),\n        }\n    }\n\n    /// Return true if this matches the host (domain or IP).\n    pub fn contains(&self, host: &str) -> bool {\n        // According to RFC3986, raw IPv6 hosts will be wrapped in []. So we need to strip those off\n        // the end in order to parse correctly\n        let host = if host.starts_with('[') {\n            let x: &[_] = &['[', ']'];\n            host.trim_matches(x)\n        } else {\n            host\n        };\n        match host.parse::<IpAddr>() {\n            // If we can parse an IP addr, then use it, otherwise, assume it is a domain\n            Ok(ip) => self.ips.contains(ip),\n            Err(_) => self.domains.contains(host),\n        }\n    }\n}\n\nimpl IpMatcher {\n    fn contains(&self, addr: IpAddr) -> bool {\n        for ip in &self.0 {\n            match ip {\n                Ip::Address(address) => {\n                    if &addr == address {\n                        return true;\n                    }\n                }\n                Ip::Network(net) => {\n                    if net.contains(&addr) {\n                        return true;\n                    }\n                }\n            }\n        }\n        false\n    }\n}\n\nimpl DomainMatcher {\n    // The following links may be useful to understand the origin of these rules:\n    // * https://curl.se/libcurl/c/CURLOPT_NOPROXY.html\n    // * https://github.com/curl/curl/issues/1208\n    fn contains(&self, domain: &str) -> bool {\n        let domain_len = domain.len();\n        for d in &self.0 {\n            if d.eq_ignore_ascii_case(domain)\n                || d.strip_prefix('.')\n                    .is_some_and(|s| s.eq_ignore_ascii_case(domain))\n            {\n                return true;\n            } else if domain\n                .get(domain_len.saturating_sub(d.len())..)\n                .is_some_and(|s| s.eq_ignore_ascii_case(d))\n            {\n                if d.starts_with('.') {\n                    // If the first character of d is a dot, that means the first character of\n                    // domain must also be a dot, so we are looking at a\n                    // subdomain of d and that matches\n                    return true;\n                } else if domain\n                    .as_bytes()\n                    .get(domain_len.saturating_sub(d.len() + 1))\n                    == Some(&b'.')\n                {\n                    // Given that d is a prefix of domain, if the prior character in domain is a dot\n                    // then that means we must be matching a subdomain of d, and that matches\n                    return true;\n                }\n            } else if d == \"*\" {\n                return true;\n            }\n        }\n        false\n    }\n}\n\nmod builder {\n    /// A type that can used as a `Builder` value.\n    ///\n    /// Private and sealed, only visible in docs.\n    pub trait IntoValue {\n        #[doc(hidden)]\n        fn into_value(self) -> String;\n    }\n\n    impl IntoValue for String {\n        #[doc(hidden)]\n        fn into_value(self) -> String {\n            self\n        }\n    }\n\n    impl IntoValue for &String {\n        #[doc(hidden)]\n        fn into_value(self) -> String {\n            self.into()\n        }\n    }\n\n    impl IntoValue for &str {\n        #[doc(hidden)]\n        fn into_value(self) -> String {\n            self.into()\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn test_domain_matcher() {\n        let domains = vec![\".foo.bar\".into(), \"bar.foo\".into()];\n        let matcher = DomainMatcher(domains);\n\n        // domains match with leading `.`\n        assert!(matcher.contains(\"foo.bar\"));\n        assert!(matcher.contains(\"FOO.BAR\"));\n        // subdomains match with leading `.`\n        assert!(matcher.contains(\"www.foo.bar\"));\n        assert!(matcher.contains(\"WWW.FOO.BAR\"));\n\n        // domains match with no leading `.`\n        assert!(matcher.contains(\"bar.foo\"));\n        assert!(matcher.contains(\"Bar.foo\"));\n        // subdomains match with no leading `.`\n        assert!(matcher.contains(\"www.bar.foo\"));\n        assert!(matcher.contains(\"WWW.BAR.FOO\"));\n\n        // non-subdomain string prefixes don't match\n        assert!(!matcher.contains(\"notfoo.bar\"));\n        assert!(!matcher.contains(\"notbar.foo\"));\n    }\n\n    #[test]\n    fn test_no_proxy_wildcard() {\n        let no_proxy = NoProxy::from_string(\"*\");\n        assert!(no_proxy.contains(\"any.where\"));\n    }\n\n    #[test]\n    fn test_no_proxy_ip_ranges() {\n        let no_proxy =\n            NoProxy::from_string(\".foo.bar, bar.baz,10.42.1.1/24,::1,10.124.7.8,2001::/17\");\n\n        let should_not_match = [\n            // random uri, not in no_proxy\n            \"hyper.rs\",\n            // make sure that random non-subdomain string prefixes don't match\n            \"notfoo.bar\",\n            // make sure that random non-subdomain string prefixes don't match\n            \"notbar.baz\",\n            // ipv4 address out of range\n            \"10.43.1.1\",\n            // ipv4 address out of range\n            \"10.124.7.7\",\n            // ipv6 address out of range\n            \"[ffff:db8:a0b:12f0::1]\",\n            // ipv6 address out of range\n            \"[2005:db8:a0b:12f0::1]\",\n        ];\n\n        for host in &should_not_match {\n            assert!(!no_proxy.contains(host), \"should not contain {host:?}\");\n        }\n\n        let should_match = [\n            // make sure subdomains (with leading .) match\n            \"hello.foo.bar\",\n            // make sure exact matches (without leading .) match (also makes sure spaces between\n            // entries work)\n            \"bar.baz\",\n            // make sure subdomains (without leading . in no_proxy) match\n            \"foo.bar.baz\",\n            // make sure subdomains (without leading . in no_proxy) match - this differs from cURL\n            \"foo.bar\",\n            // ipv4 address match within range\n            \"10.42.1.100\",\n            // ipv6 address exact match\n            \"[::1]\",\n            // ipv6 address match within range\n            \"[2001:db8:a0b:12f0::1]\",\n            // ipv4 address exact match\n            \"10.124.7.8\",\n        ];\n\n        for host in &should_match {\n            assert!(no_proxy.contains(host), \"should contain {host:?}\");\n        }\n    }\n\n    macro_rules! p {\n        ($($n:ident = $v:expr,)*) => ({Builder {\n            $($n: $v.into(),)*\n            ..Builder::default()\n        }.build(Extra::default())});\n    }\n\n    fn intercept(p: &Matcher, u: &str) -> Intercept {\n        match p.intercept(&u.parse().unwrap()).unwrap() {\n            Intercepted::Proxy(intercept) => intercept,\n            #[cfg(unix)]\n            Intercepted::Unix(path) => {\n                unreachable!(\"should not intercept unix socket: {path:?}\")\n            }\n        }\n    }\n\n    #[test]\n    fn test_all_proxy() {\n        let p = p! {\n            all = \"http://om.nom\",\n        };\n\n        assert_eq!(\"http://om.nom\", intercept(&p, \"http://example.com\").uri());\n\n        assert_eq!(\"http://om.nom\", intercept(&p, \"https://example.com\").uri());\n    }\n\n    #[test]\n    fn test_specific_overrides_all() {\n        let p = p! {\n            all = \"http://no.pe\",\n            http = \"http://y.ep\",\n        };\n\n        assert_eq!(\"http://no.pe\", intercept(&p, \"https://example.com\").uri());\n\n        // the http rule is \"more specific\" than the all rule\n        assert_eq!(\"http://y.ep\", intercept(&p, \"http://example.com\").uri());\n    }\n\n    #[test]\n    fn test_parse_no_scheme_defaults_to_http() {\n        let p = p! {\n            https = \"y.ep\",\n            http = \"127.0.0.1:8887\",\n        };\n\n        assert_eq!(intercept(&p, \"https://example.local\").uri(), \"http://y.ep\");\n        assert_eq!(\n            intercept(&p, \"http://example.local\").uri(),\n            \"http://127.0.0.1:8887\"\n        );\n    }\n\n    #[test]\n    fn test_parse_http_auth() {\n        let p = p! {\n            all = \"http://Aladdin:opensesame@y.ep\",\n        };\n\n        let proxy = intercept(&p, \"https://example.local\");\n        assert_eq!(proxy.uri(), \"http://y.ep\");\n        assert_eq!(\n            proxy.basic_auth().expect(\"basic_auth\"),\n            \"Basic QWxhZGRpbjpvcGVuc2VzYW1l\"\n        );\n    }\n\n    #[test]\n    fn test_parse_http_auth_without_password() {\n        let p = p! {\n            all = \"http://Aladdin@y.ep\",\n        };\n        let proxy = intercept(&p, \"https://example.local\");\n        assert_eq!(proxy.uri(), \"http://y.ep\");\n        assert_eq!(\n            proxy.basic_auth().expect(\"basic_auth\"),\n            \"Basic QWxhZGRpbjo=\"\n        );\n    }\n\n    #[test]\n    fn test_parse_http_auth_without_scheme() {\n        let p = p! {\n            all = \"Aladdin:opensesame@y.ep\",\n        };\n\n        let proxy = intercept(&p, \"https://example.local\");\n        assert_eq!(proxy.uri(), \"http://y.ep\");\n        assert_eq!(\n            proxy.basic_auth().expect(\"basic_auth\"),\n            \"Basic QWxhZGRpbjpvcGVuc2VzYW1l\"\n        );\n    }\n\n    #[test]\n    fn test_dont_parse_http_when_is_cgi() {\n        let mut builder = Matcher::builder();\n        builder.is_cgi = true;\n        builder.http = \"http://never.gonna.let.you.go\".into();\n        let m = builder.build(Extra::default());\n\n        assert!(m.intercept(&\"http://rick.roll\".parse().unwrap()).is_none());\n    }\n\n    fn test_parse_socks(uri: &str) {\n        let p = p! {\n            all = uri,\n        };\n\n        let proxy = intercept(&p, \"https://example.local\");\n        assert_eq!(proxy.uri(), uri);\n    }\n\n    #[test]\n    fn test_parse_socks4() {\n        test_parse_socks(\"socks4://localhost:8887\");\n        test_parse_socks(\"socks4a://localhost:8887\");\n    }\n\n    #[test]\n    fn test_parse_socks5() {\n        test_parse_socks(\"socks5://localhost:8887\");\n        test_parse_socks(\"socks5h://localhost:8887\");\n    }\n\n    #[test]\n    fn test_domain_matcher_case_insensitive() {\n        let domains = vec![\".foo.bar\".into()];\n        let matcher = DomainMatcher(domains);\n\n        assert!(matcher.contains(\"foo.bar\"));\n        assert!(matcher.contains(\"FOO.BAR\"));\n        assert!(matcher.contains(\"Foo.Bar\"));\n\n        assert!(matcher.contains(\"www.foo.bar\"));\n        assert!(matcher.contains(\"WWW.FOO.BAR\"));\n        assert!(matcher.contains(\"Www.Foo.Bar\"));\n    }\n\n    #[test]\n    fn test_no_proxy_case_insensitive() {\n        let p = p! {\n            all = \"http://proxy.local\",\n            no = \".example.com\",\n        };\n\n        // should bypass proxy (case insensitive match)\n        assert!(\n            p.intercept(&\"http://example.com\".parse().unwrap())\n                .is_none()\n        );\n        assert!(\n            p.intercept(&\"http://EXAMPLE.COM\".parse().unwrap())\n                .is_none()\n        );\n        assert!(\n            p.intercept(&\"http://Example.com\".parse().unwrap())\n                .is_none()\n        );\n\n        // subdomain should bypass proxy (case insensitive match)\n        assert!(\n            p.intercept(&\"http://www.example.com\".parse().unwrap())\n                .is_none()\n        );\n        assert!(\n            p.intercept(&\"http://WWW.EXAMPLE.COM\".parse().unwrap())\n                .is_none()\n        );\n        assert!(\n            p.intercept(&\"http://Www.Example.Com\".parse().unwrap())\n                .is_none()\n        );\n    }\n}\n"
  },
  {
    "path": "src/proxy/uds.rs",
    "content": "use std::{\n    path::{Path, PathBuf},\n    sync::Arc,\n};\n\n/// Trait for converting various types into a shared Unix Domain Socket path (`Arc<Path>`).\n///\n/// This trait is sealed to allow future extension while controlling which types can implement it.\n/// It enables ergonomic conversion from common path types such as `String`, `&str`, `PathBuf`,\n/// `&Path`, and `Arc<Path>` into a unified `Arc<Path>` representation for Unix socket usage.\n///\n/// # Supported types\n/// - `String`\n/// - `&str`\n/// - `PathBuf`\n/// - `&Path`\n/// - `Arc<Path>`\npub trait IntoUnixSocket: sealed::Sealed {\n    /// Returns the Unix Domain Socket path as an [`Arc<Path>`].\n    fn unix_socket(self) -> Arc<Path>;\n}\n\nimpl IntoUnixSocket for String {\n    fn unix_socket(self) -> Arc<Path> {\n        Arc::from(PathBuf::from(self))\n    }\n}\n\nimpl IntoUnixSocket for &'_ str {\n    fn unix_socket(self) -> Arc<Path> {\n        Arc::from(PathBuf::from(self))\n    }\n}\n\nimpl IntoUnixSocket for &'_ Path {\n    fn unix_socket(self) -> Arc<Path> {\n        Arc::from(self)\n    }\n}\nimpl IntoUnixSocket for PathBuf {\n    fn unix_socket(self) -> Arc<Path> {\n        Arc::from(self)\n    }\n}\n\nimpl IntoUnixSocket for Arc<Path> {\n    fn unix_socket(self) -> Arc<Path> {\n        self\n    }\n}\n\nmod sealed {\n    use std::{\n        path::{Path, PathBuf},\n        sync::Arc,\n    };\n\n    /// Sealed trait to prevent external implementations of `IntoUnixSocket`.\n    pub trait Sealed {}\n\n    impl Sealed for String {}\n    impl Sealed for &'_ str {}\n    impl Sealed for &'_ Path {}\n    impl Sealed for PathBuf {}\n    impl Sealed for Arc<Path> {}\n}\n"
  },
  {
    "path": "src/proxy/win.rs",
    "content": "pub(super) fn with_system(builder: &mut super::matcher::Builder) {\n    let Ok(settings) = windows_registry::CURRENT_USER\n        .open(\"Software\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Internet Settings\")\n    else {\n        return;\n    };\n\n    if settings.get_u32(\"ProxyEnable\").unwrap_or(0) == 0 {\n        return;\n    }\n\n    if let Ok(val) = settings.get_string(\"ProxyServer\") {\n        if builder.http.is_empty() {\n            builder.http = val.clone();\n        }\n        if builder.https.is_empty() {\n            builder.https = val;\n        }\n    }\n\n    if builder.no.is_empty() {\n        if let Ok(val) = settings.get_string(\"ProxyOverride\") {\n            builder.no = val\n                .split(';')\n                .map(|s| s.trim())\n                .collect::<Vec<&str>>()\n                .join(\",\")\n                .replace(\"*.\", \"\");\n        }\n    }\n}\n"
  },
  {
    "path": "src/proxy.rs",
    "content": "#[cfg(all(target_os = \"macos\", feature = \"system-proxy\"))]\nmod mac;\n#[cfg(unix)]\nmod uds;\n#[cfg(all(windows, feature = \"system-proxy\"))]\nmod win;\n\npub(crate) mod matcher;\n\nuse std::hash::{Hash, Hasher};\n#[cfg(unix)]\nuse std::{path::Path, sync::Arc};\n\nuse http::{HeaderMap, Uri, header::HeaderValue};\n\nuse crate::{IntoUri, ext::UriExt};\n\n// # Internals\n//\n// This module is a couple pieces:\n//\n// - The public builder API\n// - The internal built types that our Connector knows how to use.\n//\n// The user creates a builder (`wreq::Proxy`), and configures any extras.\n// Once that type is passed to the `ClientBuilder`, we convert it into the\n// built matcher types, making use of `core`'s matchers.\n\n/// Configuration of a proxy that a `Client` should pass requests to.\n///\n/// A `Proxy` has a couple pieces to it:\n///\n/// - a URI of how to talk to the proxy\n/// - rules on what `Client` requests should be directed to the proxy\n///\n/// For instance, let's look at `Proxy::http`:\n///\n/// ```rust\n/// # fn run() -> Result<(), Box<dyn std::error::Error>> {\n/// let proxy = wreq::Proxy::http(\"https://secure.example\")?;\n/// # Ok(())\n/// # }\n/// ```\n///\n/// This proxy will intercept all HTTP requests, and make use of the proxy\n/// at `https://secure.example`. A request to `http://hyper.rs` will talk\n/// to your proxy. A request to `https://hyper.rs` will not.\n///\n/// Multiple `Proxy` rules can be configured for a `Client`. The `Client` will\n/// check each `Proxy` in the order it was added. This could mean that a\n/// `Proxy` added first with eager intercept rules, such as `Proxy::all`,\n/// would prevent a `Proxy` later in the list from ever working, so take care.\n///\n/// By enabling the `\"socks\"` feature it is possible to use a socks proxy:\n/// ```rust\n/// # fn run() -> Result<(), Box<dyn std::error::Error>> {\n/// let proxy = wreq::Proxy::http(\"socks5://192.168.1.1:9000\")?;\n/// # Ok(())\n/// # }\n/// ```\n#[derive(Clone, Debug)]\npub struct Proxy {\n    extra: Extra,\n    scheme: ProxyScheme,\n    no_proxy: Option<NoProxy>,\n}\n\n/// A configuration for filtering out requests that shouldn't be proxied\n#[derive(Clone, Debug, Default)]\npub struct NoProxy {\n    inner: String,\n}\n\n// ===== Internal =====\n\n#[allow(clippy::large_enum_variant)]\n#[derive(Clone, PartialEq, Eq)]\npub(crate) enum Intercepted {\n    Proxy(matcher::Intercept),\n    #[cfg(unix)]\n    Unix(Arc<Path>),\n}\n\n#[derive(Debug, Clone, PartialEq, Eq, Hash)]\npub(crate) struct Matcher {\n    inner: Box<matcher::Matcher>,\n}\n\n#[derive(Clone, Debug)]\nenum ProxyScheme {\n    All(Uri),\n    Http(Uri),\n    Https(Uri),\n    #[cfg(unix)]\n    Unix(Arc<Path>),\n}\n\n#[derive(Debug, Clone, Default, PartialEq, Eq)]\nstruct Extra {\n    auth: Option<HeaderValue>,\n    misc: Option<HeaderMap>,\n}\n\n// ===== impl Proxy =====\n\nimpl Proxy {\n    /// Proxy all HTTP traffic to the passed URI.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # extern crate wreq;\n    /// # fn run() -> Result<(), Box<dyn std::error::Error>> {\n    /// let client = wreq::Client::builder()\n    ///     .proxy(wreq::Proxy::http(\"https://my.prox\")?)\n    ///     .build()?;\n    /// # Ok(())\n    /// # }\n    /// # fn main() {}\n    /// ```\n    pub fn http<U: IntoUri>(uri: U) -> crate::Result<Proxy> {\n        uri.into_uri().map(ProxyScheme::Http).map(Proxy::new)\n    }\n\n    /// Proxy all HTTPS traffic to the passed URI.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # extern crate wreq;\n    /// # fn run() -> Result<(), Box<dyn std::error::Error>> {\n    /// let client = wreq::Client::builder()\n    ///     .proxy(wreq::Proxy::https(\"https://example.prox:4545\")?)\n    ///     .build()?;\n    /// # Ok(())\n    /// # }\n    /// # fn main() {}\n    /// ```\n    pub fn https<U: IntoUri>(uri: U) -> crate::Result<Proxy> {\n        uri.into_uri().map(ProxyScheme::Https).map(Proxy::new)\n    }\n\n    /// Proxy **all** traffic to the passed URI.\n    ///\n    /// \"All\" refers to `https` and `http` URIs. Other schemes are not\n    /// recognized by wreq.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # extern crate wreq;\n    /// # fn run() -> Result<(), Box<dyn std::error::Error>> {\n    /// let client = wreq::Client::builder()\n    ///     .proxy(wreq::Proxy::all(\"http://pro.xy\")?)\n    ///     .build()?;\n    /// # Ok(())\n    /// # }\n    /// # fn main() {}\n    /// ```\n    pub fn all<U: IntoUri>(uri: U) -> crate::Result<Proxy> {\n        uri.into_uri().map(ProxyScheme::All).map(Proxy::new)\n    }\n\n    /// Proxy all traffic to the passed Unix Domain Socket path.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # extern crate wreq;\n    /// # fn run() -> Result<(), Box<dyn std::error::Error>> {\n    /// let client = wreq::Client::builder()\n    ///     .proxy(wreq::Proxy::unix(\"/var/run/docker.sock\")?)\n    ///     .build()?;\n    /// # Ok(())\n    /// # }\n    /// # fn main() {}\n    /// ```\n    #[cfg(unix)]\n    pub fn unix<P: uds::IntoUnixSocket>(unix: P) -> crate::Result<Proxy> {\n        Ok(Proxy::new(ProxyScheme::Unix(unix.unix_socket())))\n    }\n\n    fn new(scheme: ProxyScheme) -> Proxy {\n        Proxy {\n            extra: Extra {\n                auth: None,\n                misc: None,\n            },\n            scheme,\n            no_proxy: None,\n        }\n    }\n\n    /// Set the `Proxy-Authorization` header using Basic auth.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # extern crate wreq;\n    /// # fn run() -> Result<(), Box<dyn std::error::Error>> {\n    /// let proxy = wreq::Proxy::https(\"http://localhost:1234\")?.basic_auth(\"Aladdin\", \"open sesame\");\n    /// # Ok(())\n    /// # }\n    /// # fn main() {}\n    /// ```\n    pub fn basic_auth(mut self, username: &str, password: &str) -> Proxy {\n        match self.scheme {\n            ProxyScheme::All(ref mut uri)\n            | ProxyScheme::Http(ref mut uri)\n            | ProxyScheme::Https(ref mut uri) => {\n                let header = crate::util::basic_auth(username, Some(password));\n                uri.set_userinfo(username, Some(password));\n                self.extra.auth = Some(header);\n            }\n            #[cfg(unix)]\n            ProxyScheme::Unix(_) => {\n                // For Unix sockets, we don't set the auth header.\n                // This is a no-op, but keeps the API consistent.\n            }\n        }\n\n        self\n    }\n\n    /// Set the `Proxy-Authorization` header to a specified value.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # extern crate wreq;\n    /// # use wreq::header::*;\n    /// # fn run() -> Result<(), Box<dyn std::error::Error>> {\n    /// let proxy = wreq::Proxy::https(\"http://localhost:1234\")?\n    ///     .custom_http_auth(HeaderValue::from_static(\"justletmeinalreadyplease\"));\n    /// # Ok(())\n    /// # }\n    /// # fn main() {}\n    /// ```\n    pub fn custom_http_auth(mut self, header_value: HeaderValue) -> Proxy {\n        self.extra.auth = Some(header_value);\n        self\n    }\n\n    /// Adds a Custom Headers to Proxy\n    /// Adds custom headers to this Proxy\n    ///\n    /// # Example\n    /// ```\n    /// # extern crate wreq;\n    /// # use wreq::header::*;\n    /// # fn run() -> Result<(), Box<dyn std::error::Error>> {\n    /// let mut headers = HeaderMap::new();\n    /// headers.insert(USER_AGENT, \"wreq\".parse().unwrap());\n    /// let proxy = wreq::Proxy::https(\"http://localhost:1234\")?.custom_http_headers(headers);\n    /// # Ok(())\n    /// # }\n    /// # fn main() {}\n    /// ```\n    pub fn custom_http_headers(mut self, headers: HeaderMap) -> Proxy {\n        match self.scheme {\n            ProxyScheme::All(_) | ProxyScheme::Http(_) | ProxyScheme::Https(_) => {\n                self.extra.misc = Some(headers);\n            }\n            #[cfg(unix)]\n            ProxyScheme::Unix(_) => {\n                // For Unix sockets, we don't set custom headers.\n                // This is a no-op, but keeps the API consistent.\n            }\n        }\n\n        self\n    }\n\n    /// Adds a `No Proxy` exclusion list to this Proxy\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # extern crate wreq;\n    /// # fn run() -> Result<(), Box<dyn std::error::Error>> {\n    /// let proxy = wreq::Proxy::https(\"http://localhost:1234\")?\n    ///     .no_proxy(wreq::NoProxy::from_string(\"direct.tld, sub.direct2.tld\"));\n    /// # Ok(())\n    /// # }\n    /// # fn main() {}\n    /// ```\n    pub fn no_proxy(mut self, no_proxy: Option<NoProxy>) -> Proxy {\n        self.no_proxy = no_proxy;\n        self\n    }\n\n    pub(crate) fn into_matcher(self) -> Matcher {\n        let Proxy {\n            scheme,\n            extra,\n            no_proxy,\n        } = self;\n\n        let no_proxy = no_proxy.as_ref().map_or(\"\", |n| n.inner.as_ref());\n\n        let inner = match scheme {\n            ProxyScheme::All(uri) => matcher::Matcher::builder()\n                .all(uri.to_string())\n                .no(no_proxy)\n                .build(extra),\n            ProxyScheme::Http(uri) => matcher::Matcher::builder()\n                .http(uri.to_string())\n                .no(no_proxy)\n                .build(extra),\n            ProxyScheme::Https(uri) => matcher::Matcher::builder()\n                .https(uri.to_string())\n                .no(no_proxy)\n                .build(extra),\n            #[cfg(unix)]\n            ProxyScheme::Unix(unix) => matcher::Matcher::builder()\n                .unix(unix)\n                .no(no_proxy)\n                .build(extra),\n        };\n\n        Matcher {\n            inner: Box::new(inner),\n        }\n    }\n}\n\n// ===== impl NoProxy =====\n\nimpl NoProxy {\n    /// Returns a new no-proxy configuration based on environment variables (or `None` if no\n    /// variables are set) see [self::NoProxy::from_string()] for the string format\n    pub fn from_env() -> Option<NoProxy> {\n        let raw = std::env::var(\"NO_PROXY\")\n            .or_else(|_| std::env::var(\"no_proxy\"))\n            .ok()?;\n\n        // Per the docs, this returns `None` if no environment variable is set. We can only reach\n        // here if an env var is set, so we return `Some(NoProxy::default)` if `from_string`\n        // returns None, which occurs with an empty string.\n        Some(Self::from_string(&raw).unwrap_or_default())\n    }\n\n    /// Returns a new no-proxy configuration based on a `no_proxy` string (or `None` if no variables\n    /// are set)\n    /// The rules are as follows:\n    /// * The environment variable `NO_PROXY` is checked, if it is not set, `no_proxy` is checked\n    /// * If neither environment variable is set, `None` is returned\n    /// * Entries are expected to be comma-separated (whitespace between entries is ignored)\n    /// * IP addresses (both IPv4 and IPv6) are allowed, as are optional subnet masks (by adding\n    ///   /size, for example \"`192.168.1.0/24`\").\n    /// * An entry \"`*`\" matches all hostnames (this is the only wildcard allowed)\n    /// * Any other entry is considered a domain name (and may contain a leading dot, for example\n    ///   `google.com` and `.google.com` are equivalent) and would match both that domain AND all\n    ///   subdomains.\n    ///\n    /// For example, if `\"NO_PROXY=google.com, 192.168.1.0/24\"` was set, all the following would\n    /// match (and therefore would bypass the proxy):\n    /// * `http://google.com/`\n    /// * `http://www.google.com/`\n    /// * `http://192.168.1.42/`\n    ///\n    /// The URI `http://notgoogle.com/` would not match.\n    pub fn from_string(no_proxy_list: &str) -> Option<Self> {\n        Some(NoProxy {\n            inner: no_proxy_list.into(),\n        })\n    }\n}\n\n// ===== impl Matcher =====\n\nimpl Matcher {\n    pub(crate) fn system() -> Self {\n        Self {\n            inner: Box::new(matcher::Matcher::from_system()),\n        }\n    }\n\n    /// Intercept the given destination URI, returning the intercepted\n    /// proxy configuration if there is a match.\n    #[inline]\n    pub(crate) fn intercept(&self, dst: &Uri) -> Option<Intercepted> {\n        self.inner.intercept(dst)\n    }\n}\n\n// ===== impl Extra =====\n\nimpl Hash for Extra {\n    fn hash<H: Hasher>(&self, state: &mut H) {\n        self.auth.hash(state);\n        if let Some(ref misc) = self.misc {\n            for (k, v) in misc.iter() {\n                k.as_str().hash(state);\n                v.as_bytes().hash(state);\n            }\n        } else {\n            1u8.hash(state);\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    fn uri(s: &str) -> Uri {\n        s.parse().unwrap()\n    }\n\n    fn intercept(p: &Matcher, s: &Uri) -> matcher::Intercept {\n        match p.intercept(s).unwrap() {\n            Intercepted::Proxy(proxy) => proxy,\n            #[cfg(unix)]\n            _ => {\n                unreachable!(\"intercepted_port should only be called with a Proxy matcher\")\n            }\n        }\n    }\n\n    fn intercepted_uri(p: &Matcher, s: &str) -> Uri {\n        match p.intercept(&s.parse().unwrap()).unwrap() {\n            Intercepted::Proxy(proxy) => proxy.uri().clone(),\n            #[cfg(unix)]\n            _ => {\n                unreachable!(\"intercepted_uri should only be called with a Proxy matcher\")\n            }\n        }\n    }\n\n    #[test]\n    fn test_http() {\n        let target = \"http://example.domain/\";\n        let p = Proxy::http(target).unwrap().into_matcher();\n\n        let http = \"http://hyper.rs\";\n        let other = \"https://hyper.rs\";\n\n        assert_eq!(intercepted_uri(&p, http), target);\n        assert!(p.intercept(&uri(other)).is_none());\n    }\n\n    #[test]\n    fn test_https() {\n        let target = \"http://example.domain/\";\n        let p = Proxy::https(target).unwrap().into_matcher();\n\n        let http = \"http://hyper.rs\";\n        let other = \"https://hyper.rs\";\n\n        assert!(p.intercept(&uri(http)).is_none());\n        assert_eq!(intercepted_uri(&p, other), target);\n    }\n\n    #[test]\n    fn test_all() {\n        let target = \"http://example.domain/\";\n        let p = Proxy::all(target).unwrap().into_matcher();\n\n        let http = \"http://hyper.rs\";\n        let https = \"https://hyper.rs\";\n        // no longer supported\n        //let other = \"x-youve-never-heard-of-me-mr-proxy://hyper.rs\";\n\n        assert_eq!(intercepted_uri(&p, http), target);\n        assert_eq!(intercepted_uri(&p, https), target);\n        //assert_eq!(intercepted_uri(&p, other), target);\n    }\n\n    #[test]\n    fn test_standard_with_custom_auth_header() {\n        let target = \"http://example.domain/\";\n        let p = Proxy::all(target)\n            .unwrap()\n            .custom_http_auth(http::HeaderValue::from_static(\"testme\"))\n            .into_matcher();\n\n        let got = intercept(&p, &uri(\"http://anywhere.local\"));\n        let auth = got.basic_auth().unwrap();\n        assert_eq!(auth, \"testme\");\n    }\n\n    #[test]\n    fn test_maybe_has_http_auth() {\n        let uri = uri(\"http://example.domain/\");\n\n        let m = Proxy::all(\"https://letme:in@yo.local\")\n            .unwrap()\n            .into_matcher();\n\n        let got = intercept(&m, &uri);\n        assert!(got.basic_auth().is_some(), \"https forwards\");\n\n        let m = Proxy::all(\"http://letme:in@yo.local\")\n            .unwrap()\n            .into_matcher();\n\n        let got = intercept(&m, &uri);\n        assert!(got.basic_auth().is_some(), \"http forwards\");\n    }\n\n    #[test]\n    fn test_maybe_has_http_custom_headers() {\n        let uri = uri(\"http://example.domain/\");\n\n        let mut headers = HeaderMap::new();\n        headers.insert(\"x-custom-header\", HeaderValue::from_static(\"custom-value\"));\n\n        let m = Proxy::all(\"https://yo.local\")\n            .unwrap()\n            .custom_http_headers(headers.clone())\n            .into_matcher();\n\n        match m.intercept(&uri).unwrap() {\n            Intercepted::Proxy(proxy) => {\n                let got_headers = proxy.custom_headers().unwrap();\n                assert_eq!(got_headers, &headers, \"https forwards\");\n            }\n            #[cfg(unix)]\n            _ => {\n                unreachable!(\"Expected a Proxy Intercepted\");\n            }\n        }\n\n        let m = Proxy::all(\"http://yo.local\")\n            .unwrap()\n            .custom_http_headers(headers.clone())\n            .into_matcher();\n\n        match m.intercept(&uri).unwrap() {\n            Intercepted::Proxy(proxy) => {\n                let got_headers = proxy.custom_headers().unwrap();\n                assert_eq!(got_headers, &headers, \"http forwards\");\n            }\n            #[cfg(unix)]\n            _ => {\n                unreachable!(\"Expected a Proxy Intercepted\");\n            }\n        }\n    }\n\n    fn test_socks_proxy_default_port(uri: &str, url2: &str, port: u16) {\n        let m = Proxy::all(uri).unwrap().into_matcher();\n\n        let http = \"http://hyper.rs\";\n        let https = \"https://hyper.rs\";\n\n        assert_eq!(intercepted_uri(&m, http).port_u16(), Some(1080));\n        assert_eq!(intercepted_uri(&m, https).port_u16(), Some(1080));\n\n        // custom port\n        let m = Proxy::all(url2).unwrap().into_matcher();\n\n        assert_eq!(intercepted_uri(&m, http).port_u16(), Some(port));\n        assert_eq!(intercepted_uri(&m, https).port_u16(), Some(port));\n    }\n\n    #[test]\n    fn test_socks4_proxy_default_port() {\n        test_socks_proxy_default_port(\"socks4://example.com\", \"socks4://example.com:1234\", 1234);\n        test_socks_proxy_default_port(\"socks4a://example.com\", \"socks4a://example.com:1234\", 1234);\n    }\n\n    #[test]\n    fn test_socks5_proxy_default_port() {\n        test_socks_proxy_default_port(\"socks5://example.com\", \"socks5://example.com:1234\", 1234);\n        test_socks_proxy_default_port(\"socks5h://example.com\", \"socks5h://example.com:1234\", 1234);\n    }\n}\n"
  },
  {
    "path": "src/redirect.rs",
    "content": "//! Redirect Handling\n//!\n//! By default, a `Client` does not follow HTTP redirects. To enable automatic\n//! redirect handling with a maximum redirect chain of 10 hops, use a [`Policy`]\n//! with [`ClientBuilder::redirect()`](crate::ClientBuilder::redirect).\n\nuse std::{borrow::Cow, error::Error as StdError, fmt, sync::Arc};\n\nuse bytes::Bytes;\nuse futures_util::FutureExt;\nuse http::{HeaderMap, HeaderName, HeaderValue, StatusCode, Uri};\n\nuse crate::{\n    client::{Body, layer::redirect},\n    config::RequestConfig,\n    error::{BoxError, Error},\n    ext::UriExt,\n    header::{AUTHORIZATION, COOKIE, PROXY_AUTHORIZATION, REFERER, WWW_AUTHENTICATE},\n};\n\n/// A type that controls the policy on how to handle the following of redirects.\n///\n/// The default value will catch redirect loops, and has a maximum of 10\n/// redirects it will follow in a chain before returning an error.\n///\n/// - `limited` can be used have the same as the default behavior, but adjust the allowed maximum\n///   redirect hops in a chain.\n/// - `none` can be used to disable all redirect behavior.\n/// - `custom` can be used to create a customized policy.\n#[derive(Debug, Clone)]\npub struct Policy {\n    inner: PolicyKind,\n}\n\n/// A type that holds information on the next request and previous requests\n/// in redirect chain.\n#[derive(Debug)]\n#[non_exhaustive]\npub struct Attempt<'a, const PENDING: bool = true> {\n    /// The status code of the redirect response.\n    pub status: StatusCode,\n\n    /// The headers of the redirect response.\n    pub headers: Cow<'a, HeaderMap>,\n\n    /// The URI to redirect to.\n    pub uri: Cow<'a, Uri>,\n\n    /// The list of previous URIs that have already been requested in this chain.\n    pub previous: Cow<'a, [Uri]>,\n}\n\n/// An action to perform when a redirect status code is found.\n#[derive(Debug)]\npub struct Action {\n    inner: redirect::Action,\n}\n\n/// Redirect history information for a response.\n#[derive(Debug, Clone)]\npub struct History(Vec<HistoryEntry>);\n\n/// An entry in the redirect history.\n#[derive(Debug, Clone)]\n#[non_exhaustive]\npub struct HistoryEntry {\n    /// The status code of the redirect response.\n    pub status: StatusCode,\n\n    /// The URI of the redirect response.\n    pub uri: Uri,\n\n    /// The previous URI before the redirect response.\n    pub previous: Uri,\n\n    /// The headers of the redirect response.\n    pub headers: HeaderMap,\n}\n\n#[derive(Clone)]\nenum PolicyKind {\n    Custom(Arc<dyn Fn(Attempt) -> Action + Send + Sync + 'static>),\n    Limit(usize),\n    None,\n}\n\n#[derive(Debug)]\nstruct TooManyRedirects;\n\n/// A redirect policy handler for HTTP clients.\n///\n/// [`FollowRedirectPolicy`] manages how HTTP redirects are handled by the client,\n/// including the maximum number of redirects, whether to set the `Referer` header,\n/// HTTPS-only enforcement, and redirect history tracking.\n///\n/// This type is used internally by the client to implement redirect logic according to\n/// the configured [`Policy`]. It ensures that only allowed redirects are followed,\n/// sensitive headers are removed when crossing hosts, and the `Referer` header is set\n/// when appropriate.\n#[derive(Clone)]\npub(crate) struct FollowRedirectPolicy {\n    policy: RequestConfig<Policy>,\n    referer: bool,\n    uris: Vec<Uri>,\n    https_only: bool,\n    history: Option<Vec<HistoryEntry>>,\n}\n\n// ===== impl Policy =====\n\nimpl Policy {\n    /// Create a [`Policy`] with a maximum number of redirects.\n    ///\n    /// An [`Error`] will be returned if the max is reached.\n    #[inline]\n    pub fn limited(max: usize) -> Self {\n        Self {\n            inner: PolicyKind::Limit(max),\n        }\n    }\n\n    /// Create a [`Policy`] that does not follow any redirect.\n    #[inline]\n    pub fn none() -> Self {\n        Self {\n            inner: PolicyKind::None,\n        }\n    }\n\n    /// Create a custom [`Policy`] using the passed function.\n    ///\n    /// # Note\n    ///\n    /// The default [`Policy`] handles a maximum loop\n    /// chain, but the custom variant does not do that for you automatically.\n    /// The custom policy should have some way of handling those.\n    ///\n    /// Information on the next request and previous requests can be found\n    /// on the [`Attempt`] argument passed to the closure.\n    ///\n    /// Actions can be conveniently created from methods on the\n    /// [`Attempt`].\n    ///\n    /// # Example\n    ///\n    /// ```rust\n    /// # use wreq::{Error, redirect};\n    /// #\n    /// # fn run() -> Result<(), Error> {\n    /// let custom = redirect::Policy::custom(|attempt| {\n    ///     if attempt.previous.len() > 5 {\n    ///         attempt.error(\"too many redirects\")\n    ///     } else if attempt.uri() == \"example.domain\" {\n    ///         // prevent redirects to 'example.domain'\n    ///         attempt.stop()\n    ///     } else {\n    ///         attempt.follow()\n    ///     }\n    /// });\n    /// let client = wreq::Client::builder().redirect(custom).build()?;\n    /// # Ok(())\n    /// # }\n    /// ```\n    #[inline]\n    pub fn custom<T>(policy: T) -> Self\n    where\n        T: Fn(Attempt) -> Action + Send + Sync + 'static,\n    {\n        Self {\n            inner: PolicyKind::Custom(Arc::new(policy)),\n        }\n    }\n\n    /// Apply this policy to a given [`Attempt`] to produce a [`Action`].\n    ///\n    /// # Note\n    ///\n    /// This method can be used together with [`Policy::custom()`]\n    /// to construct one [`Policy`] that wraps another.\n    ///\n    /// # Example\n    ///\n    /// ```rust\n    /// # use wreq::{Error, redirect};\n    /// #\n    /// # fn run() -> Result<(), Error> {\n    /// let custom = redirect::Policy::custom(|attempt| {\n    ///     eprintln!(\"{}, Location: {:?}\", attempt.status(), attempt.uri());\n    ///     redirect::Policy::default().redirect(attempt)\n    /// });\n    /// # Ok(())\n    /// # }\n    /// ```\n    pub fn redirect(&self, attempt: Attempt) -> Action {\n        match self.inner {\n            PolicyKind::Custom(ref custom) => custom(attempt),\n            PolicyKind::Limit(max) => {\n                // The first URI in the previous is the initial URI and not a redirection. It needs\n                // to be excluded.\n                if attempt.previous.len() > max {\n                    attempt.error(TooManyRedirects)\n                } else {\n                    attempt.follow()\n                }\n            }\n            PolicyKind::None => attempt.stop(),\n        }\n    }\n\n    #[inline]\n    fn check(\n        &self,\n        status: StatusCode,\n        headers: &HeaderMap,\n        next: &Uri,\n        previous: &[Uri],\n    ) -> redirect::Action {\n        self.redirect(Attempt {\n            status,\n            headers: Cow::Borrowed(headers),\n            uri: Cow::Borrowed(next),\n            previous: Cow::Borrowed(previous),\n        })\n        .inner\n    }\n}\n\nimpl Default for Policy {\n    #[inline]\n    fn default() -> Policy {\n        // Keep `is_default` in sync\n        Policy::limited(10)\n    }\n}\n\nimpl_request_config_value!(Policy);\n\n// ===== impl Attempt =====\n\nimpl<const PENDING: bool> Attempt<'_, PENDING> {\n    /// Returns an action meaning wreq should follow the next URI.\n    #[inline]\n    pub fn follow(self) -> Action {\n        Action {\n            inner: redirect::Action::Follow,\n        }\n    }\n\n    /// Returns an action meaning wreq should not follow the next URI.\n    ///\n    /// The 30x response will be returned as the `Ok` result.\n    #[inline]\n    pub fn stop(self) -> Action {\n        Action {\n            inner: redirect::Action::Stop,\n        }\n    }\n\n    /// Returns an [`Action`] failing the redirect with an error.\n    ///\n    /// The [`Error`] will be returned for the result of the sent request.\n    #[inline]\n    pub fn error<E: Into<BoxError>>(self, error: E) -> Action {\n        Action {\n            inner: redirect::Action::Error(error.into()),\n        }\n    }\n}\n\nimpl Attempt<'_, true> {\n    /// Returns an action meaning wreq should perform the redirect asynchronously.\n    ///\n    /// The provided async closure receives an owned [`Attempt<'static>`] and should\n    /// return an [`Action`] to determine the final redirect behavior.\n    ///\n    /// # Example\n    ///\n    /// ```rust\n    /// # use wreq::redirect;\n    /// #\n    /// let policy = redirect::Policy::custom(|attempt| {\n    ///     attempt.pending(|attempt| async move {\n    ///         // Perform some async operation\n    ///         if attempt.uri().host() == Some(\"trusted.domain\") {\n    ///             attempt.follow()\n    ///         } else {\n    ///             attempt.stop()\n    ///         }\n    ///     })\n    /// });\n    /// ```\n    pub fn pending<F, Fut>(self, func: F) -> Action\n    where\n        F: FnOnce(Attempt<'static, false>) -> Fut + Send + 'static,\n        Fut: Future<Output = Action> + Send + 'static,\n    {\n        let attempt = Attempt {\n            status: self.status,\n            headers: Cow::Owned(self.headers.into_owned()),\n            uri: Cow::Owned(self.uri.into_owned()),\n            previous: Cow::Owned(self.previous.into_owned()),\n        };\n        let pending = Box::pin(func(attempt).map(|action| action.inner));\n        Action {\n            inner: redirect::Action::Pending(pending),\n        }\n    }\n}\n\n// ===== impl History =====\n\nimpl IntoIterator for History {\n    type Item = HistoryEntry;\n    type IntoIter = std::vec::IntoIter<HistoryEntry>;\n\n    #[inline]\n    fn into_iter(self) -> Self::IntoIter {\n        self.0.into_iter()\n    }\n}\n\nimpl<'a> IntoIterator for &'a History {\n    type Item = &'a HistoryEntry;\n    type IntoIter = std::slice::Iter<'a, HistoryEntry>;\n\n    #[inline]\n    fn into_iter(self) -> Self::IntoIter {\n        self.0.iter()\n    }\n}\n\n// ===== impl PolicyKind =====\n\nimpl fmt::Debug for PolicyKind {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        match *self {\n            PolicyKind::Custom(..) => f.pad(\"Custom\"),\n            PolicyKind::Limit(max) => f.debug_tuple(\"Limit\").field(&max).finish(),\n            PolicyKind::None => f.pad(\"None\"),\n        }\n    }\n}\n\n// ===== impl TooManyRedirects =====\n\nimpl fmt::Display for TooManyRedirects {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        f.write_str(\"too many redirects\")\n    }\n}\n\nimpl StdError for TooManyRedirects {}\n\n// ===== impl FollowRedirectPolicy =====\n\nimpl FollowRedirectPolicy {\n    /// Creates a new redirect policy handler with the given [`Policy`].\n    pub fn new(policy: Policy) -> Self {\n        Self {\n            policy: RequestConfig::new(Some(policy)),\n            referer: false,\n            uris: Vec::new(),\n            https_only: false,\n            history: None,\n        }\n    }\n\n    /// Enables or disables automatic Referer header management.\n    #[inline]\n    pub fn with_referer(mut self, referer: bool) -> Self {\n        self.referer = referer;\n        self\n    }\n\n    /// Enables or disables HTTPS-only redirect enforcement.\n    #[inline]\n    pub fn with_https_only(mut self, https_only: bool) -> Self {\n        self.https_only = https_only;\n        self\n    }\n}\n\nimpl redirect::Policy<Body, BoxError> for FollowRedirectPolicy {\n    fn redirect(&mut self, attempt: redirect::Attempt<'_>) -> Result<redirect::Action, BoxError> {\n        // Parse the next URI from the attempt.\n        let previous_uri = attempt.previous;\n        let next_uri = attempt.location;\n\n        // Push the previous URI to the list of URLs.\n        self.uris.push(previous_uri.clone());\n\n        // Get policy from config\n        let policy = self\n            .policy\n            .as_ref()\n            .expect(\"[BUG] FollowRedirectPolicy should always have a policy set\");\n\n        // Check if the next URI is already in the list of URLs.\n        match policy.check(attempt.status, attempt.headers, next_uri, &self.uris) {\n            redirect::Action::Follow => {\n                // Validate the redirect URI scheme\n                if !(next_uri.is_http() || next_uri.is_https()) {\n                    return Err(Error::uri_bad_scheme(next_uri.clone()).into());\n                }\n\n                // Check HTTPS-only policy\n                if self.https_only && !next_uri.is_https() {\n                    return Err(Error::redirect(\n                        Error::uri_bad_scheme(next_uri.clone()),\n                        next_uri.clone(),\n                    )\n                    .into());\n                }\n\n                // Record redirect history\n                if !matches!(policy.inner, PolicyKind::None) {\n                    self.history.get_or_insert_default().push(HistoryEntry {\n                        status: attempt.status,\n                        uri: attempt.location.clone(),\n                        previous: attempt.previous.clone(),\n                        headers: attempt.headers.clone(),\n                    });\n                }\n\n                Ok(redirect::Action::Follow)\n            }\n            redirect::Action::Stop => Ok(redirect::Action::Stop),\n            redirect::Action::Pending(task) => Ok(redirect::Action::Pending(task)),\n            redirect::Action::Error(err) => Err(Error::redirect(err, previous_uri.clone()).into()),\n        }\n    }\n\n    fn follow_redirects(&mut self, request: &mut http::Request<Body>) -> bool {\n        self.policy\n            .load(request.extensions_mut())\n            .is_some_and(|policy| !matches!(policy.inner, PolicyKind::None))\n    }\n\n    fn on_request(&mut self, req: &mut http::Request<Body>) {\n        let next_url = req.uri().clone();\n        remove_sensitive_headers(req.headers_mut(), &next_url, &self.uris);\n        if self.referer {\n            if let Some(previous_url) = self.uris.last() {\n                if let Some(v) = make_referer(next_url, previous_url) {\n                    req.headers_mut().insert(REFERER, v);\n                }\n            }\n        }\n    }\n\n    fn on_response<Body>(&mut self, response: &mut http::Response<Body>) {\n        if let Some(history) = self.history.take() {\n            response.extensions_mut().insert(History(history));\n        }\n    }\n\n    #[inline]\n    fn clone_body(&self, body: &Body) -> Option<Body> {\n        body.try_clone()\n    }\n}\n\nfn make_referer(next: Uri, previous: &Uri) -> Option<HeaderValue> {\n    if next.is_http() && previous.is_https() {\n        return None;\n    }\n\n    let mut referer = previous.clone();\n    referer.set_userinfo(\"\", None);\n    HeaderValue::from_maybe_shared(Bytes::from(referer.to_string())).ok()\n}\n\nfn remove_sensitive_headers(headers: &mut HeaderMap, next: &Uri, previous: &[Uri]) {\n    if let Some(previous) = previous.last() {\n        let cross_host = next.host() != previous.host()\n            || next.port() != previous.port()\n            || next.scheme() != previous.scheme();\n        if cross_host {\n            /// Avoid dynamic allocation of `HeaderName` by using `from_static`.\n            /// https://github.com/hyperium/http/blob/e9de46c9269f0a476b34a02a401212e20f639df2/src/header/map.rs#L3794\n            const COOKIE2: HeaderName = HeaderName::from_static(\"cookie2\");\n\n            headers.remove(AUTHORIZATION);\n            headers.remove(COOKIE);\n            headers.remove(COOKIE2);\n            headers.remove(PROXY_AUTHORIZATION);\n            headers.remove(WWW_AUTHENTICATE);\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn test_redirect_policy_limit() {\n        let policy = Policy::default();\n        let next = Uri::try_from(\"http://x.y/z\").unwrap();\n        let mut previous = (0..=9)\n            .map(|i| Uri::try_from(&format!(\"http://a.b/c/{i}\")).unwrap())\n            .collect::<Vec<_>>();\n\n        match policy.check(StatusCode::FOUND, &HeaderMap::new(), &next, &previous) {\n            redirect::Action::Follow => (),\n            other => panic!(\"unexpected {other:?}\"),\n        }\n\n        previous.push(Uri::try_from(\"http://a.b.d/e/33\").unwrap());\n\n        match policy.check(StatusCode::FOUND, &HeaderMap::new(), &next, &previous) {\n            redirect::Action::Error(err) if err.is::<TooManyRedirects>() => (),\n            other => panic!(\"unexpected {other:?}\"),\n        }\n    }\n\n    #[test]\n    fn test_redirect_policy_limit_to_0() {\n        let policy = Policy::limited(0);\n        let next = Uri::try_from(\"http://x.y/z\").unwrap();\n        let previous = vec![Uri::try_from(\"http://a.b/c\").unwrap()];\n\n        match policy.check(StatusCode::FOUND, &HeaderMap::new(), &next, &previous) {\n            redirect::Action::Error(err) if err.is::<TooManyRedirects>() => (),\n            other => panic!(\"unexpected {other:?}\"),\n        }\n    }\n\n    #[test]\n    fn test_redirect_policy_custom() {\n        let policy = Policy::custom(|attempt| {\n            if attempt.uri.host() == Some(\"foo\") {\n                attempt.stop()\n            } else {\n                attempt.follow()\n            }\n        });\n\n        let next = Uri::try_from(\"http://bar/baz\").unwrap();\n        match policy.check(StatusCode::FOUND, &HeaderMap::new(), &next, &[]) {\n            redirect::Action::Follow => (),\n            other => panic!(\"unexpected {other:?}\"),\n        }\n\n        let next = Uri::try_from(\"http://foo/baz\").unwrap();\n        match policy.check(StatusCode::FOUND, &HeaderMap::new(), &next, &[]) {\n            redirect::Action::Stop => (),\n            other => panic!(\"unexpected {other:?}\"),\n        }\n    }\n\n    #[test]\n    fn test_remove_sensitive_headers() {\n        use http::header::{ACCEPT, AUTHORIZATION, COOKIE, HeaderValue};\n\n        let mut headers = HeaderMap::new();\n        headers.insert(ACCEPT, HeaderValue::from_static(\"*/*\"));\n        headers.insert(AUTHORIZATION, HeaderValue::from_static(\"let me in\"));\n        headers.insert(COOKIE, HeaderValue::from_static(\"foo=bar\"));\n\n        let next = Uri::try_from(\"http://initial-domain.com/path\").unwrap();\n        let mut prev = vec![Uri::try_from(\"http://initial-domain.com/new_path\").unwrap()];\n        let mut filtered_headers = headers.clone();\n\n        remove_sensitive_headers(&mut headers, &next, &prev);\n        assert_eq!(headers, filtered_headers);\n\n        prev.push(Uri::try_from(\"http://new-domain.com/path\").unwrap());\n        filtered_headers.remove(AUTHORIZATION);\n        filtered_headers.remove(COOKIE);\n\n        remove_sensitive_headers(&mut headers, &next, &prev);\n        assert_eq!(headers, filtered_headers);\n    }\n}\n"
  },
  {
    "path": "src/retry.rs",
    "content": "//! Retry requests\n//!\n//! A `Client` has the ability to retry requests, by sending additional copies\n//! to the server if a response is considered retryable.\n//!\n//! The [`Policy`] makes it easier to configure what requests to retry, along\n//! with including best practices by default, such as a retry budget.\n//!\n//! # Defaults\n//!\n//! The default retry behavior of a `Client` is to only retry requests where an\n//! error or low-level protocol NACK is encountered that is known to be safe to\n//! retry. Note however that providing a specific retry policy will override\n//! the default, and you will need to explicitly include that behavior.\n//!\n//! All policies default to including a retry budget that permits 20% extra\n//! requests to be sent.\n//!\n//! # Scoped\n//!\n//! A client's retry policy is scoped. That means that the policy doesn't\n//! apply to all requests, but only those within a user-defined scope.\n//!\n//! Since all policies include a budget by default, it doesn't make sense to\n//! apply it on _all_ requests. Rather, the retry history applied by a budget\n//! should likely only be applied to the same host.\n//!\n//! # Classifiers\n//!\n//! A retry policy needs to be configured with a classifier that determines\n//! if a request should be retried. Knowledge of the destination server's\n//! behavior is required to make a safe classifier. **Requests should not be\n//! retried** if the server cannot safely handle the same request twice, or if\n//! it causes side effects.\n//!\n//! Some common properties to check include if the request method is\n//! idempotent, or if the response status code indicates a transient error.\n\nuse std::sync::Arc;\n\nuse http::Request;\n\nuse crate::{\n    Body,\n    client::layer::retry::{Action, Classifier, ClassifyFn, ReqRep, ScopeFn, Scoped},\n};\n\n/// A retry policy.\npub struct Policy {\n    pub(crate) budget: Option<f32>,\n    pub(crate) classifier: Classifier,\n    pub(crate) max_retries_per_request: u32,\n    pub(crate) scope: Scoped,\n}\n\nimpl Policy {\n    /// Create a retry policy that will never retry any request.\n    ///\n    /// This is useful for disabling the `Client`s default behavior of retrying\n    /// protocol nacks.\n    #[inline]\n    pub fn never() -> Policy {\n        Self::scoped(|_| false).no_budget()\n    }\n\n    /// Create a retry policy scoped to requests for a specific host.\n    ///\n    /// This is a convenience method that creates a retry policy which only applies\n    /// to requests targeting the specified host. Requests to other hosts will not\n    /// be retried under this policy.\n    ///\n    /// # Arguments\n    /// * `host` - The hostname to match against request URIs (e.g., \"api.example.com\")\n    ///\n    /// # Example\n    /// ```rust\n    /// use wreq::retry::Policy;\n    ///\n    /// // Only retry requests to rust-lang.org\n    /// let policy = Policy::for_host(\"rust-lang.org\");\n    /// ```\n    #[inline]\n    pub fn for_host<S>(host: S) -> Policy\n    where\n        S: for<'a> PartialEq<&'a str> + Send + Sync + 'static,\n    {\n        Self::scoped(move |req| {\n            req.uri()\n                .host()\n                .is_some_and(|request_host| host == request_host)\n        })\n    }\n\n    /// Create a scoped retry policy.\n    ///\n    /// For a more convenient constructor, see [`Policy::for_host()`].\n    #[inline]\n    fn scoped<F>(func: F) -> Policy\n    where\n        F: Fn(&Request<Body>) -> bool + Send + Sync + 'static,\n    {\n        Self {\n            budget: Some(0.2),\n            classifier: Classifier::Never,\n            max_retries_per_request: 2,\n            scope: Scoped::Dyn(Arc::new(ScopeFn(func))),\n        }\n    }\n\n    /// Set no retry budget.\n    ///\n    /// Sets that no budget will be enforced. This could also be considered\n    /// to be an infinite budget.\n    ///\n    /// This is NOT recommended. Disabling the budget can make your system more\n    /// susceptible to retry storms.\n    #[inline]\n    pub fn no_budget(mut self) -> Self {\n        self.budget = None;\n        self\n    }\n\n    /// Sets the max extra load the budget will allow.\n    ///\n    /// Think of the amount of requests your client generates, and how much\n    /// load that puts on the server. This option configures as a percentage\n    /// how much extra load is allowed via retries.\n    ///\n    /// For example, if you send 1,000 requests per second, setting a maximum\n    /// extra load value of `0.3` would allow 300 more requests per second\n    /// in retries. A value of `2.5` would allow 2,500 more requests.\n    ///\n    /// # Panics\n    ///\n    /// The `extra_percent` value must be within reasonable values for a\n    /// percentage. This method will panic if it is less than `0.0`, or greater\n    /// than `1000.0`.\n    #[inline]\n    pub fn max_extra_load(mut self, extra_percent: f32) -> Self {\n        assert!(extra_percent >= 0.0);\n        assert!(extra_percent <= 1000.0);\n        self.budget = Some(extra_percent);\n        self\n    }\n\n    /// Set the max retries allowed per request.\n    ///\n    /// For each logical (initial) request, only retry up to `max` times.\n    ///\n    /// This value is used in combination with a token budget that is applied\n    /// to all requests. Even if the budget would allow more requests, this\n    /// limit will prevent. Likewise, the budget may prevent retrying up to\n    /// `max` times. This setting prevents a single request from consuming\n    /// the entire budget.\n    ///\n    /// Default is currently 2 retries.\n    #[inline]\n    pub fn max_retries_per_request(mut self, max: u32) -> Self {\n        self.max_retries_per_request = max;\n        self\n    }\n\n    /// Provide a classifier to determine if a request should be retried.\n    ///\n    /// # Example\n    ///\n    /// ```rust\n    /// # fn with_policy(policy: wreq::retry::Policy) -> wreq::retry::Policy {\n    /// policy.classify_fn(|req_rep| {\n    ///     match (req_rep.method(), req_rep.status()) {\n    ///         (&http::Method::GET, Some(http::StatusCode::SERVICE_UNAVAILABLE)) => {\n    ///             req_rep.retryable()\n    ///         },\n    ///         _ => req_rep.success()\n    ///     }\n    /// })\n    /// # }\n    /// ```\n    #[inline]\n    pub fn classify_fn<F>(mut self, func: F) -> Self\n    where\n        F: Fn(ReqRep<'_>) -> Action + Send + Sync + 'static,\n    {\n        self.classifier = Classifier::Dyn(Arc::new(ClassifyFn(func)));\n        self\n    }\n}\n\nimpl Default for Policy {\n    fn default() -> Self {\n        Self {\n            budget: None,\n            classifier: Classifier::ProtocolNacks,\n            max_retries_per_request: 2,\n            scope: Scoped::Unscoped,\n        }\n    }\n}\n"
  },
  {
    "path": "src/sync.rs",
    "content": "//! Synchronization primitives: [`Mutex`] and [`RwLock`] that never poison.\n//!\n//! These types expose APIs identical to [`std::sync::Mutex`] and [`std::sync::RwLock`],\n//! but **do not return** [`std::sync::PoisonError`] even if a thread panics while holding the lock.\n//!\n//! This is useful in high-availability systems where panic recovery is done externally,\n//! or poisoning is not meaningful in context.\n//!\n//! ## Implementation\n//! - When the `parking_lot` feature is enabled, it uses [`parking_lot::Mutex`] and\n//!   [`parking_lot::RwLock`].\n//! - Otherwise, it wraps [`std::sync::Mutex`] and [`std::sync::RwLock`], using `.unwrap_or_else(|e|\n//!   e.into_inner())` to silently recover from poisoning.\n\n#[cfg(feature = \"parking_lot\")]\npub use parking_lot::*;\n\n#[cfg(not(feature = \"parking_lot\"))]\npub use self::std::*;\n\n#[cfg(not(feature = \"parking_lot\"))]\nmod std {\n    use std::{\n        ops::{Deref, DerefMut},\n        sync,\n    };\n\n    /// A [`Mutex`] that never poisons and has the same interface as [`std::sync::Mutex`].\n    ///\n    /// See [`crate::sync`] for more details.\n    #[derive(Debug)]\n    pub struct Mutex<T: ?Sized>(sync::Mutex<T>);\n\n    impl<T> Mutex<T> {\n        /// Like [`std::sync::Mutex::new`].\n        #[inline]\n        pub fn new(t: T) -> Mutex<T> {\n            Mutex(sync::Mutex::new(t))\n        }\n    }\n\n    impl<T: ?Sized> Mutex<T> {\n        /// Like [`std::sync::Mutex::lock`].\n        #[inline]\n        pub fn lock<'a>(&'a self) -> MutexGuard<'a, T> {\n            MutexGuard(self.0.lock().unwrap_or_else(|e| e.into_inner()))\n        }\n    }\n\n    /// Like [`std::sync::MutexGuard`].\n    #[must_use]\n    pub struct MutexGuard<'a, T: ?Sized + 'a>(sync::MutexGuard<'a, T>);\n\n    impl<'a, T: ?Sized> Deref for MutexGuard<'a, T> {\n        type Target = T;\n\n        #[inline]\n        fn deref(&self) -> &T {\n            self.0.deref()\n        }\n    }\n\n    impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T> {\n        #[inline]\n        fn deref_mut(&mut self) -> &mut T {\n            self.0.deref_mut()\n        }\n    }\n\n    impl<T: Default> Default for Mutex<T> {\n        fn default() -> Self {\n            Mutex(Default::default())\n        }\n    }\n\n    /// A [`RwLock`] that never poisons and has the same interface as [`std::sync::RwLock`].\n    ///\n    /// See [`crate::sync`] for more details.\n    #[derive(Debug, Default)]\n    pub struct RwLock<T: ?Sized>(sync::RwLock<T>);\n\n    impl<T: ?Sized> RwLock<T> {\n        /// Like [`std::sync::RwLock::read`].\n        #[inline]\n        pub fn read<'a>(&'a self) -> RwLockReadGuard<'a, T> {\n            RwLockReadGuard(self.0.read().unwrap_or_else(|e| e.into_inner()))\n        }\n\n        /// Like [`std::sync::RwLock::write`].\n        #[inline]\n        pub fn write<'a>(&'a self) -> RwLockWriteGuard<'a, T> {\n            RwLockWriteGuard(self.0.write().unwrap_or_else(|e| e.into_inner()))\n        }\n    }\n\n    /// Like [`std::sync::RwLockReadGuard`].\n    #[must_use]\n    pub struct RwLockReadGuard<'a, T: ?Sized + 'a>(sync::RwLockReadGuard<'a, T>);\n\n    impl<'a, T: ?Sized> Deref for RwLockReadGuard<'a, T> {\n        type Target = T;\n\n        #[inline]\n        fn deref(&self) -> &T {\n            self.0.deref()\n        }\n    }\n\n    /// Like [`std::sync::RwLockWriteGuard`].\n    #[must_use]\n    pub struct RwLockWriteGuard<'a, T: ?Sized + 'a>(sync::RwLockWriteGuard<'a, T>);\n\n    impl<'a, T: ?Sized> Deref for RwLockWriteGuard<'a, T> {\n        type Target = T;\n\n        #[inline]\n        fn deref(&self) -> &T {\n            self.0.deref()\n        }\n    }\n\n    impl<'a, T: ?Sized> DerefMut for RwLockWriteGuard<'a, T> {\n        #[inline]\n        fn deref_mut(&mut self) -> &mut T {\n            self.0.deref_mut()\n        }\n    }\n}\n"
  },
  {
    "path": "src/tls/compress.rs",
    "content": "//! TLS certificate compression [RFC 8879](https://datatracker.ietf.org/doc/html/rfc8879).\n//!\n//! Reduces handshake latency by compressing certificate chains.\n//! Supports Zlib, Brotli, and Zstd algorithms to minimize bytes-on-wire\n//! and fit within the initial congestion window.\n\nuse std::{fmt::Debug, io};\n\nuse btls::{\n    error::ErrorStack,\n    ssl::{self, SslConnectorBuilder},\n};\nuse btls_sys as ffi;\n// Re-export the `CertificateCompressionAlgorithm` enum for users of this module.\npub use ssl::CertificateCompressionAlgorithm;\n\n/// Certificate compression or decompression.\n///\n/// Wraps a function pointer or closure that processes certificate data.\n#[allow(clippy::type_complexity)]\npub enum Codec {\n    /// Function pointer.\n    Pointer(fn(&[u8], &mut dyn io::Write) -> io::Result<()>),\n    /// Closure or function object.\n    Dynamic(Box<dyn Fn(&[u8], &mut dyn io::Write) -> io::Result<()> + Send + Sync>),\n}\n\n/// Trait for TLS certificate compression implementations.\n///\n/// Provides methods for compressing and decompressing certificate data,\n/// as well as identifying the algorithm in use.\n///\n/// See [RFC 8879, §3](https://www.rfc-editor.org/rfc/rfc8879.html#name-compression-algorithms)\n/// for the list of IANA-assigned compression algorithm identifiers.\npub trait CertificateCompressor: Debug + Sync + Send + 'static {\n    /// Returns the [`Codec`] used to compress certificate chains for this algorithm.\n    fn compress(&self) -> Codec;\n\n    /// Returns the [`Codec`] used to decompress certificate chains for this algorithm.\n    fn decompress(&self) -> Codec;\n\n    /// Returns the IANA-assigned identifier of the compression algorithm.\n    fn algorithm(&self) -> CertificateCompressionAlgorithm;\n}\n\nstruct Compressor<const ALGORITHM: i32> {\n    compress: Codec,\n    decompress: Codec,\n}\n\n// ===== impl Codec =====\n\nimpl Codec {\n    #[inline]\n    fn call(&self, input: &[u8], output: &mut dyn io::Write) -> io::Result<()> {\n        match self {\n            Codec::Pointer(func) => func(input, output),\n            Codec::Dynamic(closure) => closure(input, output),\n        }\n    }\n}\n\n// ===== impl Compressor =====\n\nimpl<const ALGORITHM: i32> ssl::CertificateCompressor for Compressor<ALGORITHM> {\n    const ALGORITHM: CertificateCompressionAlgorithm = match ALGORITHM {\n        ffi::TLSEXT_cert_compression_zlib => CertificateCompressionAlgorithm::ZLIB,\n        ffi::TLSEXT_cert_compression_brotli => CertificateCompressionAlgorithm::BROTLI,\n        ffi::TLSEXT_cert_compression_zstd => CertificateCompressionAlgorithm::ZSTD,\n        _ => unreachable!(),\n    };\n    const CAN_COMPRESS: bool = true;\n    const CAN_DECOMPRESS: bool = true;\n\n    #[inline]\n    fn compress<W>(&self, input: &[u8], output: &mut W) -> io::Result<()>\n    where\n        W: io::Write,\n    {\n        self.compress.call(input, output)\n    }\n\n    #[inline]\n    fn decompress<W>(&self, input: &[u8], output: &mut W) -> io::Result<()>\n    where\n        W: io::Write,\n    {\n        self.decompress.call(input, output)\n    }\n}\n\n/// Register a certificate compressor with the given [`SslConnectorBuilder`].\npub(super) fn register(\n    compressor: &dyn CertificateCompressor,\n    builder: &mut SslConnectorBuilder,\n) -> Result<(), ErrorStack> {\n    match compressor.algorithm() {\n        CertificateCompressionAlgorithm::ZLIB => {\n            builder.add_certificate_compression_algorithm(Compressor::<\n                { ffi::TLSEXT_cert_compression_zlib },\n            > {\n                compress: compressor.compress(),\n                decompress: compressor.decompress(),\n            })\n        }\n        CertificateCompressionAlgorithm::BROTLI => {\n            builder.add_certificate_compression_algorithm(Compressor::<\n                { ffi::TLSEXT_cert_compression_brotli },\n            > {\n                compress: compressor.compress(),\n                decompress: compressor.decompress(),\n            })\n        }\n        CertificateCompressionAlgorithm::ZSTD => {\n            builder.add_certificate_compression_algorithm(Compressor::<\n                { ffi::TLSEXT_cert_compression_zstd },\n            > {\n                compress: compressor.compress(),\n                decompress: compressor.decompress(),\n            })\n        }\n        _ => unreachable!(),\n    }\n}\n"
  },
  {
    "path": "src/tls/conn/ext.rs",
    "content": "use std::borrow::Cow;\n\nuse btls::ssl::{SslConnectorBuilder, SslVerifyMode};\n\nuse crate::{\n    Error,\n    tls::{\n        compress::{self, CertificateCompressor},\n        trust::CertStore,\n    },\n};\n\n/// SslConnectorBuilderExt trait for `SslConnectorBuilder`.\npub trait SslConnectorBuilderExt {\n    /// Configure the CertStore for the given `SslConnectorBuilder`.\n    fn set_cert_store(self, store: Option<&CertStore>) -> crate::Result<SslConnectorBuilder>;\n\n    /// Configure the certificate verification for the given `SslConnectorBuilder`.\n    fn set_cert_verification(self, enable: bool) -> crate::Result<SslConnectorBuilder>;\n\n    /// Configure the certificate compressors for the given `SslConnectorBuilder`.\n    fn set_cert_compressors(\n        self,\n        compressors: Option<&Cow<'static, [&'static dyn CertificateCompressor]>>,\n    ) -> crate::Result<SslConnectorBuilder>;\n}\n\nimpl SslConnectorBuilderExt for SslConnectorBuilder {\n    #[inline]\n    fn set_cert_store(mut self, store: Option<&CertStore>) -> crate::Result<SslConnectorBuilder> {\n        if let Some(store) = store {\n            store.add_to_tls(&mut self);\n        } else {\n            self.set_default_verify_paths().map_err(Error::tls)?;\n        }\n\n        Ok(self)\n    }\n\n    #[inline]\n    fn set_cert_verification(mut self, enable: bool) -> crate::Result<SslConnectorBuilder> {\n        if enable {\n            self.set_verify(SslVerifyMode::PEER);\n        } else {\n            self.set_verify(SslVerifyMode::NONE);\n        }\n        Ok(self)\n    }\n\n    #[inline]\n    fn set_cert_compressors(\n        mut self,\n        compressors: Option<&Cow<'static, [&'static dyn CertificateCompressor]>>,\n    ) -> crate::Result<SslConnectorBuilder> {\n        if let Some(compressors) = compressors {\n            for compressor in compressors.as_ref() {\n                compress::register(*compressor, &mut self).map_err(Error::tls)?;\n            }\n        }\n\n        Ok(self)\n    }\n}\n"
  },
  {
    "path": "src/tls/conn/macros.rs",
    "content": "macro_rules! set_bool {\n    ($cfg:expr, $field:ident, $conn:expr, $setter:ident) => {\n        if $cfg.$field {\n            $conn.$setter();\n        }\n    };\n    ($cfg:expr, !$field:ident, $conn:expr, $setter:ident, $arg:expr) => {\n        if !$cfg.$field {\n            $conn.$setter($arg);\n        }\n    };\n}\n\nmacro_rules! set_option {\n    ($cfg:expr, $field:ident, $conn:expr, $setter:ident) => {\n        if let Some(val) = $cfg.$field {\n            $conn.$setter(val);\n        }\n    };\n}\n\nmacro_rules! set_option_ref_try {\n    ($cfg:expr, $field:ident, $conn:expr, $setter:ident) => {\n        if let Some(val) = $cfg.$field.as_ref() {\n            $conn.$setter(val).map_err(Error::tls)?;\n        }\n    };\n}\n\nmacro_rules! set_option_inner_try {\n    ($field:ident, $conn:expr, $setter:ident) => {\n        $conn.$setter($field.map(|v| v.0)).map_err(Error::tls)?;\n    };\n}\n"
  },
  {
    "path": "src/tls/conn/service.rs",
    "content": "use std::{\n    fmt::Debug,\n    future::Future,\n    pin::Pin,\n    task::{Context, Poll},\n};\n\nuse http::{Uri, uri::Scheme};\nuse tokio::io::{AsyncRead, AsyncWrite};\nuse tokio_btls::SslStream;\nuse tower::{BoxError, Service};\n\nuse super::{EstablishedConn, HttpsConnector, MaybeHttpsStream};\nuse crate::{\n    client::{Connection, ConnectionDescriptor},\n    ext::UriExt,\n};\n\ntype BoxFuture<T, E> = Pin<Box<dyn Future<Output = Result<T, E>> + Send>>;\n\nasync fn perform_handshake<T>(ssl: btls::ssl::Ssl, conn: T) -> Result<MaybeHttpsStream<T>, BoxError>\nwhere\n    T: AsyncRead + AsyncWrite + Unpin,\n{\n    let mut stream = SslStream::new(ssl, conn)?;\n    Pin::new(&mut stream).connect().await?;\n    Ok(MaybeHttpsStream::Https(stream))\n}\n\nimpl<T, S> Service<Uri> for HttpsConnector<S>\nwhere\n    S: Service<Uri, Response = T> + Send,\n    S::Error: Into<BoxError>,\n    S::Future: Unpin + Send + 'static,\n    T: AsyncRead + AsyncWrite + Connection + Unpin + Debug + Sync + Send + 'static,\n{\n    type Response = MaybeHttpsStream<T>;\n    type Error = BoxError;\n    type Future = BoxFuture<Self::Response, Self::Error>;\n\n    #[inline]\n    fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {\n        self.http.poll_ready(cx).map_err(Into::into)\n    }\n\n    fn call(&mut self, uri: Uri) -> Self::Future {\n        let connect = self.http.call(uri.clone());\n        let tls = self.tls.clone();\n\n        let f = async move {\n            let conn = connect.await.map_err(Into::into)?;\n\n            // Early return if it is not a tls scheme\n            if uri.scheme() != Some(&Scheme::HTTPS) {\n                return Ok(MaybeHttpsStream::Http(conn));\n            }\n\n            let ssl = tls.setup_ssl(uri)?;\n            perform_handshake(ssl, conn).await\n        };\n\n        Box::pin(f)\n    }\n}\n\nimpl<T, S> Service<ConnectionDescriptor> for HttpsConnector<S>\nwhere\n    S: Service<Uri, Response = T> + Send,\n    S::Error: Into<BoxError>,\n    S::Future: Unpin + Send + 'static,\n    T: AsyncRead + AsyncWrite + Connection + Unpin + Debug + Sync + Send + 'static,\n{\n    type Response = MaybeHttpsStream<T>;\n    type Error = BoxError;\n    type Future = BoxFuture<Self::Response, Self::Error>;\n\n    #[inline]\n    fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {\n        self.http.poll_ready(cx).map_err(Into::into)\n    }\n\n    fn call(&mut self, descriptor: ConnectionDescriptor) -> Self::Future {\n        let uri = descriptor.uri().clone();\n        let connect = self.http.call(uri.clone());\n        let tls = self.tls.clone();\n\n        let f = async move {\n            let conn = connect.await.map_err(Into::into)?;\n\n            // Early return if it is not a tls scheme\n            if uri.is_http() {\n                return Ok(MaybeHttpsStream::Http(conn));\n            }\n\n            let ssl = tls.setup_ssl2(descriptor)?;\n            perform_handshake(ssl, conn).await\n        };\n\n        Box::pin(f)\n    }\n}\n\nimpl<T, S, IO> Service<EstablishedConn<IO>> for HttpsConnector<S>\nwhere\n    S: Service<Uri, Response = T> + Send + Clone + 'static,\n    S::Error: Into<BoxError>,\n    S::Future: Unpin + Send + 'static,\n    T: AsyncRead + AsyncWrite + Connection + Unpin + Debug + Sync + Send + 'static,\n    IO: AsyncRead + AsyncWrite + Unpin + Send + Sync + Debug + 'static,\n{\n    type Response = MaybeHttpsStream<IO>;\n    type Error = BoxError;\n    type Future = BoxFuture<Self::Response, Self::Error>;\n\n    #[inline]\n    fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {\n        self.http.poll_ready(cx).map_err(Into::into)\n    }\n\n    fn call(&mut self, conn: EstablishedConn<IO>) -> Self::Future {\n        let tls = self.tls.clone();\n        let fut = async move {\n            // Early return if it is not a tls scheme\n            if conn.descriptor.uri().is_http() {\n                return Ok(MaybeHttpsStream::Http(conn.io));\n            }\n\n            let ssl = tls.setup_ssl2(conn.descriptor)?;\n            perform_handshake(ssl, conn.io).await\n        };\n\n        Box::pin(fut)\n    }\n}\n"
  },
  {
    "path": "src/tls/conn.rs",
    "content": "//! SSL support via BoringSSL.\n\n#[macro_use]\nmod macros;\nmod ext;\nmod service;\n\nuse std::{\n    borrow::Cow,\n    fmt::{self, Debug},\n    io,\n    pin::Pin,\n    sync::{Arc, LazyLock},\n    task::{Context, Poll},\n};\n\nuse btls::{\n    error::ErrorStack,\n    ex_data::Index,\n    ssl::{Ssl, SslConnector, SslMethod, SslOptions, SslSessionCacheMode},\n};\nuse http::{Uri, Version};\nuse tokio::io::{AsyncRead, AsyncWrite, ReadBuf};\nuse tokio_btls::SslStream;\nuse tower::{BoxError, Service};\n\nuse crate::{\n    Error,\n    client::{Connected, Connection, ConnectionDescriptor},\n    tls::{\n        AlpnProtocol, AlpsProtocol, KeyShare, TlsOptions, TlsVersion,\n        conn::ext::SslConnectorBuilderExt,\n        keylog::KeyLog,\n        session::{Key, LruTlsSessionCache, TlsSession, TlsSessionCache},\n        trust::{CertStore, Identity},\n    },\n};\n\nfn key_index() -> Result<Index<Ssl, Key>, ErrorStack> {\n    static IDX: LazyLock<Result<Index<Ssl, Key>, ErrorStack>> = LazyLock::new(Ssl::new_ex_index);\n    IDX.clone()\n}\n\n/// Settings for [`TlsConnector`]\n#[derive(Clone)]\npub struct HandshakeSettings {\n    no_ticket: bool,\n    enable_ech_grease: bool,\n    verify_hostname: bool,\n    tls_sni: bool,\n    alpn_protocols: Option<Cow<'static, [AlpnProtocol]>>,\n    alps_protocols: Option<Cow<'static, [AlpsProtocol]>>,\n    alps_use_new_codepoint: bool,\n    key_shares: Option<Cow<'static, [KeyShare]>>,\n    random_aes_hw_override: bool,\n}\n\n/// A Connector using BoringSSL to support `http` and `https` schemes.\n#[derive(Clone)]\npub struct HttpsConnector<T> {\n    http: T,\n    tls: TlsConnector,\n}\n\n/// A builder for creating a `TlsConnector`.\npub struct TlsConnectorBuilder {\n    alpn_protocol: Option<AlpnProtocol>,\n    max_version: Option<TlsVersion>,\n    min_version: Option<TlsVersion>,\n    tls_sni: bool,\n    verify_hostname: bool,\n    identity: Option<Identity>,\n    cert_store: Option<CertStore>,\n    cert_verification: bool,\n    keylog: Option<KeyLog>,\n    session_cache: Arc<dyn TlsSessionCache>,\n}\n\n/// A layer which wraps services in an `SslConnector`.\n#[derive(Clone)]\npub struct TlsConnector {\n    ssl: SslConnector,\n    cache: Option<Arc<dyn TlsSessionCache>>,\n    settings: HandshakeSettings,\n}\n\n// ===== impl HttpsConnector =====\n\nimpl<S, T> HttpsConnector<S>\nwhere\n    S: Service<Uri, Response = T> + Send,\n    S::Error: Into<BoxError>,\n    S::Future: Unpin + Send + 'static,\n    T: AsyncRead + AsyncWrite + Connection + Unpin + Debug + Sync + Send + 'static,\n{\n    /// Creates a new [`HttpsConnector`] with a given [`TlsConnector`].\n    #[inline]\n    pub fn new(http: S, tls: TlsConnector) -> HttpsConnector<S> {\n        HttpsConnector { http, tls }\n    }\n\n    /// Disables ALPN negotiation.\n    #[inline]\n    pub fn no_alpn(&mut self) -> &mut Self {\n        self.tls.settings.alpn_protocols = None;\n        self\n    }\n}\n\n// ===== impl TlsConnector =====\n\nimpl TlsConnector {\n    /// Creates a new [`TlsConnectorBuilder`] with the given configuration.\n    pub fn builder() -> TlsConnectorBuilder {\n        TlsConnectorBuilder {\n            alpn_protocol: None,\n            min_version: None,\n            max_version: None,\n            identity: None,\n            tls_sni: true,\n            verify_hostname: true,\n            cert_store: None,\n            cert_verification: true,\n            keylog: None,\n            session_cache: Arc::new(LruTlsSessionCache::new(8)),\n        }\n    }\n\n    fn setup_ssl(&self, uri: Uri) -> Result<Ssl, BoxError> {\n        let cfg = self.ssl.configure()?;\n        let host = uri.host().ok_or(\"URI missing host\")?;\n        let host = Self::normalize_host(host);\n        let ssl = cfg.into_ssl(host)?;\n        Ok(ssl)\n    }\n\n    fn setup_ssl2(&self, descriptor: ConnectionDescriptor) -> Result<Ssl, BoxError> {\n        let mut cfg = self.ssl.configure()?;\n\n        // Use server name indication\n        cfg.set_use_server_name_indication(self.settings.tls_sni);\n\n        // Verify hostname\n        cfg.set_verify_hostname(self.settings.verify_hostname);\n\n        // Set ECH grease\n        cfg.set_enable_ech_grease(self.settings.enable_ech_grease);\n\n        // Set random AES hardware override\n        if self.settings.random_aes_hw_override {\n            let random = (crate::util::fast_random() & 1) == 0;\n            cfg.set_aes_hw_override(random);\n        }\n\n        // Set ALPN protocols\n        if let Some(version) = descriptor.version() {\n            match version {\n                Version::HTTP_11 | Version::HTTP_10 | Version::HTTP_09 => {\n                    cfg.set_alpn_protos(&AlpnProtocol::HTTP1.encode())?;\n                }\n                Version::HTTP_2 => {\n                    cfg.set_alpn_protos(&AlpnProtocol::HTTP2.encode())?;\n                }\n                // No ALPN protocol for other versions\n                _ => {}\n            }\n        } else {\n            // Default use the connector configuration.\n            if let Some(ref alpn_values) = self.settings.alpn_protocols {\n                let encoded = AlpnProtocol::encode_sequence(alpn_values.as_ref());\n                cfg.set_alpn_protos(&encoded)?;\n            }\n        }\n\n        // Set ALPS protos\n        if let Some(ref alps_values) = self.settings.alps_protocols {\n            for alps in alps_values.iter() {\n                cfg.add_application_settings(alps.0)?;\n            }\n\n            // By default, the new endpoint is used.\n            if !alps_values.is_empty() {\n                cfg.set_alps_use_new_codepoint(self.settings.alps_use_new_codepoint);\n            }\n        }\n\n        // Set TLS key shares\n        if let Some(ref key_shares) = self.settings.key_shares {\n            cfg.set_client_key_shares(key_shares.as_ref())?;\n        }\n\n        let uri = descriptor.uri().clone();\n        let host = uri.host().ok_or(\"URI missing host\")?;\n        let host = Self::normalize_host(host);\n\n        if let Some(ref cache) = self.cache {\n            let key = Key(descriptor.id());\n\n            // If the session cache is enabled, we try to retrieve the session\n            // associated with the key. If it exists, we set it in the SSL configuration.\n            if let Some(session) = cache.pop(&key) {\n                #[allow(unsafe_code)]\n                unsafe { cfg.set_session(&session.0) }?;\n\n                if self.settings.no_ticket {\n                    cfg.set_options(SslOptions::NO_TICKET);\n                }\n            }\n\n            let idx = key_index()?;\n            cfg.set_ex_data(idx, key);\n        }\n\n        Ok(cfg.into_ssl(host)?)\n    }\n\n    /// If `host` is an IPv6 address, we must strip away the square brackets that surround\n    /// it (otherwise, boring will fail to parse the host as an IP address, eventually\n    /// causing the handshake to fail due a hostname verification error).\n    fn normalize_host(host: &str) -> &str {\n        if host.is_empty() {\n            return host;\n        }\n\n        let last = host.len() - 1;\n        let mut chars = host.chars();\n\n        if let (Some('['), Some(']')) = (chars.next(), chars.last()) {\n            if host[1..last].parse::<std::net::Ipv6Addr>().is_ok() {\n                return &host[1..last];\n            }\n        }\n\n        host\n    }\n}\n\n// ====== impl TlsConnectorBuilder =====\n\nimpl TlsConnectorBuilder {\n    /// Sets the alpn protocol to be used.\n    #[inline]\n    pub fn alpn_protocol(mut self, protocol: Option<AlpnProtocol>) -> Self {\n        self.alpn_protocol = protocol;\n        self\n    }\n\n    /// Sets the TLS keylog policy.\n    #[inline]\n    pub fn keylog(mut self, keylog: Option<KeyLog>) -> Self {\n        self.keylog = keylog;\n        self\n    }\n\n    /// Sets the identity to be used for client certificate authentication.\n    #[inline]\n    pub fn identity(mut self, identity: Option<Identity>) -> Self {\n        self.identity = identity;\n        self\n    }\n\n    /// Sets the certificate store used for TLS verification.\n    #[inline]\n    pub fn cert_store<T>(mut self, cert_store: T) -> Self\n    where\n        T: Into<Option<CertStore>>,\n    {\n        self.cert_store = cert_store.into();\n        self\n    }\n\n    /// Sets the certificate verification flag.\n    #[inline]\n    pub fn cert_verification(mut self, enabled: bool) -> Self {\n        self.cert_verification = enabled;\n        self\n    }\n\n    /// Sets the minimum TLS version to use.\n    #[inline]\n    pub fn min_version<T>(mut self, version: T) -> Self\n    where\n        T: Into<Option<TlsVersion>>,\n    {\n        self.min_version = version.into();\n        self\n    }\n\n    /// Sets the maximum TLS version to use.\n    #[inline]\n    pub fn max_version<T>(mut self, version: T) -> Self\n    where\n        T: Into<Option<TlsVersion>>,\n    {\n        self.max_version = version.into();\n        self\n    }\n\n    /// Sets the Server Name Indication (SNI) flag.\n    #[inline]\n    pub fn tls_sni(mut self, enabled: bool) -> Self {\n        self.tls_sni = enabled;\n        self\n    }\n\n    /// Sets the hostname verification flag.\n    #[inline]\n    pub fn verify_hostname(mut self, enabled: bool) -> Self {\n        self.verify_hostname = enabled;\n        self\n    }\n\n    /// Sets a custom TLS session store.\n    ///\n    /// By default, a [`LruSessionStore`] is used. Use this method to provide\n    /// a custom [`TlsSessionCache`] implementation (e.g., file-based or distributed).\n    #[inline]\n    pub fn session_store(mut self, store: Option<Arc<dyn TlsSessionCache>>) -> Self {\n        if let Some(store) = store {\n            self.session_cache = store;\n        }\n        self\n    }\n\n    /// Build the `TlsConnector` with the provided configuration.\n    pub fn build<'a, T>(&self, opts: T) -> crate::Result<TlsConnector>\n    where\n        T: Into<Cow<'a, TlsOptions>>,\n    {\n        let opts = opts.into();\n\n        // Replace the default configuration with the provided one\n        let max_tls_version = opts.max_tls_version.or(self.max_version);\n        let min_tls_version = opts.min_tls_version.or(self.min_version);\n        let alpn_protocols = self\n            .alpn_protocol\n            .map(|proto| Cow::Owned(vec![proto]))\n            .or_else(|| opts.alpn_protocols.clone());\n\n        // Create the SslConnector with the provided options\n        let mut connector = SslConnector::bare_builder(SslMethod::tls())\n            .map_err(Error::tls)?\n            .set_cert_store(self.cert_store.as_ref())?\n            .set_cert_verification(self.cert_verification)?\n            .set_cert_compressors(opts.certificate_compressors.as_ref())?;\n\n        // Set Identity\n        if let Some(ref identity) = self.identity {\n            identity.add_to_tls(&mut connector)?;\n        }\n\n        // Set minimum TLS version\n        set_option_inner_try!(min_tls_version, connector, set_min_proto_version);\n\n        // Set maximum TLS version\n        set_option_inner_try!(max_tls_version, connector, set_max_proto_version);\n\n        // Set OCSP stapling\n        set_bool!(opts, enable_ocsp_stapling, connector, enable_ocsp_stapling);\n\n        // Set Signed Certificate Timestamps (SCT)\n        set_bool!(\n            opts,\n            enable_signed_cert_timestamps,\n            connector,\n            enable_signed_cert_timestamps\n        );\n\n        // Set TLS Session ticket options\n        set_bool!(\n            opts,\n            !session_ticket,\n            connector,\n            set_options,\n            SslOptions::NO_TICKET\n        );\n\n        // Set TLS PSK DHE key exchange options\n        set_bool!(\n            opts,\n            !psk_dhe_ke,\n            connector,\n            set_options,\n            SslOptions::NO_PSK_DHE_KE\n        );\n\n        // Set TLS No Renegotiation options\n        set_bool!(\n            opts,\n            !renegotiation,\n            connector,\n            set_options,\n            SslOptions::NO_RENEGOTIATION\n        );\n\n        // Set TLS grease options\n        set_option!(opts, grease_enabled, connector, set_grease_enabled);\n\n        // Set TLS permute extensions options\n        set_option!(opts, permute_extensions, connector, set_permute_extensions);\n\n        // Set TLS curves list\n        set_option_ref_try!(opts, curves_list, connector, set_curves_list);\n\n        // Set TLS signature algorithms list\n        set_option_ref_try!(opts, sigalgs_list, connector, set_sigalgs_list);\n\n        // Set TLS prreserve TLS 1.3 cipher list order\n        set_option!(\n            opts,\n            preserve_tls13_cipher_list,\n            connector,\n            set_preserve_tls13_cipher_list\n        );\n\n        // Set TLS cipher list\n        set_option_ref_try!(opts, cipher_list, connector, set_cipher_list);\n\n        // Set TLS delegated credentials\n        set_option_ref_try!(\n            opts,\n            delegated_credentials,\n            connector,\n            set_delegated_credentials\n        );\n\n        // Set TLS record size limit\n        set_option!(opts, record_size_limit, connector, set_record_size_limit);\n\n        // Set TLS aes hardware override\n        set_option!(opts, aes_hw_override, connector, set_aes_hw_override);\n\n        // Set TLS extension permutation\n        if let Some(ref extension_permutation) = opts.extension_permutation {\n            connector\n                .set_extension_permutation(extension_permutation)\n                .map_err(Error::tls)?;\n        }\n\n        // Set TLS keylog handler.\n        if let Some(ref policy) = self.keylog {\n            let handle = policy.clone().handle().map_err(Error::tls)?;\n            connector.set_keylog_callback(move |_, line| {\n                handle.write(line);\n            });\n        }\n\n        // Create the handshake settings with the default session cache capacity.\n        let settings = HandshakeSettings {\n            tls_sni: self.tls_sni,\n            verify_hostname: self.verify_hostname,\n            no_ticket: opts.psk_skip_session_ticket,\n            alpn_protocols,\n            alps_protocols: opts.alps_protocols.clone(),\n            alps_use_new_codepoint: opts.alps_use_new_codepoint,\n            enable_ech_grease: opts.enable_ech_grease,\n            key_shares: opts.key_shares.clone(),\n            random_aes_hw_override: opts.random_aes_hw_override,\n        };\n\n        // If the session cache is disabled, we don't need to set up any callbacks.\n        let cache = opts.pre_shared_key.then(|| {\n            let session_cache = self.session_cache.clone();\n\n            connector.set_session_cache_mode(SslSessionCacheMode::CLIENT);\n            connector.set_new_session_callback({\n                let cache = session_cache.clone();\n                move |ssl, session| {\n                    if let Ok(Some(key)) = key_index().map(|idx| ssl.ex_data(idx)) {\n                        cache.put(key.clone(), TlsSession(session));\n                    }\n                }\n            });\n\n            session_cache\n        });\n\n        Ok(TlsConnector {\n            ssl: connector.build(),\n            cache,\n            settings,\n        })\n    }\n}\n\n/// A stream which may be wrapped with TLS.\npub enum MaybeHttpsStream<T> {\n    /// A raw HTTP stream.\n    Http(T),\n    /// An SSL-wrapped HTTP stream.\n    Https(SslStream<T>),\n}\n\n/// A connection that has been established with a TLS handshake.\npub struct EstablishedConn<IO> {\n    io: IO,\n    descriptor: ConnectionDescriptor,\n}\n\n// ===== impl MaybeHttpsStream =====\n\nimpl<T> AsRef<T> for MaybeHttpsStream<T> {\n    #[inline]\n    fn as_ref(&self) -> &T {\n        match self {\n            MaybeHttpsStream::Http(s) => s,\n            MaybeHttpsStream::Https(s) => s.get_ref(),\n        }\n    }\n}\n\nimpl<T> fmt::Debug for MaybeHttpsStream<T> {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        match *self {\n            MaybeHttpsStream::Http(..) => f.pad(\"Http(..)\"),\n            MaybeHttpsStream::Https(..) => f.pad(\"Https(..)\"),\n        }\n    }\n}\n\nimpl<T> Connection for MaybeHttpsStream<T>\nwhere\n    T: Connection,\n{\n    fn connected(&self) -> Connected {\n        match self {\n            MaybeHttpsStream::Http(s) => s.connected(),\n            MaybeHttpsStream::Https(s) => {\n                let mut connected = s.get_ref().connected();\n\n                if s.ssl().selected_alpn_protocol() == Some(b\"h2\") {\n                    connected = connected.negotiated_h2();\n                }\n\n                connected\n            }\n        }\n    }\n}\n\nimpl<T> AsyncRead for MaybeHttpsStream<T>\nwhere\n    T: AsyncRead + AsyncWrite + Unpin,\n{\n    #[inline]\n    fn poll_read(\n        mut self: Pin<&mut Self>,\n        cx: &mut Context<'_>,\n        buf: &mut ReadBuf<'_>,\n    ) -> Poll<io::Result<()>> {\n        match self.as_mut().get_mut() {\n            MaybeHttpsStream::Http(inner) => Pin::new(inner).poll_read(cx, buf),\n            MaybeHttpsStream::Https(inner) => Pin::new(inner).poll_read(cx, buf),\n        }\n    }\n}\n\nimpl<T> AsyncWrite for MaybeHttpsStream<T>\nwhere\n    T: AsyncRead + AsyncWrite + Unpin,\n{\n    #[inline]\n    fn poll_write(\n        mut self: Pin<&mut Self>,\n        ctx: &mut Context<'_>,\n        buf: &[u8],\n    ) -> Poll<io::Result<usize>> {\n        match self.as_mut().get_mut() {\n            MaybeHttpsStream::Http(inner) => Pin::new(inner).poll_write(ctx, buf),\n            MaybeHttpsStream::Https(inner) => Pin::new(inner).poll_write(ctx, buf),\n        }\n    }\n\n    #[inline]\n    fn poll_flush(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll<io::Result<()>> {\n        match self.as_mut().get_mut() {\n            MaybeHttpsStream::Http(inner) => Pin::new(inner).poll_flush(ctx),\n            MaybeHttpsStream::Https(inner) => Pin::new(inner).poll_flush(ctx),\n        }\n    }\n\n    #[inline]\n    fn poll_shutdown(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll<io::Result<()>> {\n        match self.as_mut().get_mut() {\n            MaybeHttpsStream::Http(inner) => Pin::new(inner).poll_shutdown(ctx),\n            MaybeHttpsStream::Https(inner) => Pin::new(inner).poll_shutdown(ctx),\n        }\n    }\n\n    #[inline]\n    fn is_write_vectored(&self) -> bool {\n        match self {\n            MaybeHttpsStream::Http(inner) => inner.is_write_vectored(),\n            MaybeHttpsStream::Https(inner) => inner.is_write_vectored(),\n        }\n    }\n\n    #[inline]\n    fn poll_write_vectored(\n        self: Pin<&mut Self>,\n        cx: &mut Context<'_>,\n        bufs: &[io::IoSlice<'_>],\n    ) -> Poll<io::Result<usize>> {\n        match self.get_mut() {\n            MaybeHttpsStream::Http(inner) => Pin::new(inner).poll_write_vectored(cx, bufs),\n            MaybeHttpsStream::Https(inner) => Pin::new(inner).poll_write_vectored(cx, bufs),\n        }\n    }\n}\n\n// ===== impl EstablishedConn =====\n\nimpl<IO> EstablishedConn<IO> {\n    /// Creates a new [`EstablishedConn`].\n    #[inline]\n    pub fn new(io: IO, descriptor: ConnectionDescriptor) -> EstablishedConn<IO> {\n        EstablishedConn { io, descriptor }\n    }\n}\n"
  },
  {
    "path": "src/tls/keylog/handle.rs",
    "content": "use std::{\n    fs::OpenOptions,\n    io::{Result, Write},\n    path::Path,\n    sync::{\n        Arc,\n        mpsc::{self, Sender},\n    },\n};\n\n/// Handle for writing to a key log file.\n#[derive(Debug, Clone)]\npub struct Handle {\n    #[allow(unused)]\n    filepath: Arc<Path>,\n    sender: Sender<String>,\n}\n\nimpl Handle {\n    /// Create a new [`Handle`] with the specified path and sender.\n    pub fn new(filepath: Arc<Path>) -> Result<Self> {\n        if let Some(parent) = filepath.parent() {\n            std::fs::create_dir_all(parent)?;\n        }\n\n        let mut file = OpenOptions::new()\n            .create(true)\n            .append(true)\n            .open(&filepath)?;\n\n        let (sender, receiver) = mpsc::channel::<String>();\n\n        let _path_name = filepath.clone();\n        std::thread::spawn(move || {\n            trace!(\n                file = ?_path_name,\n                \"Handle: receiver task up and running\",\n            );\n            while let Ok(line) = receiver.recv() {\n                if let Err(_err) = file.write_all(line.as_bytes()) {\n                    error!(\n                        file = ?_path_name,\n                        error = %_err,\n                        \"Handle: failed to write file\",\n                    );\n                }\n            }\n        });\n\n        Ok(Handle { filepath, sender })\n    }\n\n    /// Write a line to the keylogger.\n    pub fn write(&self, line: &str) {\n        let line = format!(\"{line}\\n\");\n        if let Err(_err) = self.sender.send(line) {\n            error!(\n                file = ?self.filepath,\n                error = %_err,\n                \"Handle: failed to send log line for writing\",\n            );\n        }\n    }\n}\n"
  },
  {
    "path": "src/tls/keylog.rs",
    "content": "//! TLS Key Log Management\n//!\n//! This module provides utilities for managing TLS key logging, allowing session keys to be\n//! written to a file for debugging or analysis (e.g., with Wireshark).\n//!\n//! The [`KeyLog`] enum lets you control key log behavior, either by respecting the\n//! `SSLKEYLOGFILE` environment variable or by specifying a custom file path. Handles are cached\n//! globally to avoid duplicate file access.\n\nmod handle;\n\nuse std::{\n    borrow::Cow,\n    collections::{HashMap, hash_map::Entry},\n    io::{Error, ErrorKind, Result},\n    path::{Component, Path, PathBuf},\n    sync::{Arc, OnceLock},\n};\n\nuse handle::Handle;\n\nuse crate::sync::RwLock;\n\n/// Specifies the intent for a (TLS) keylogger.\n#[derive(Debug, Clone)]\npub struct KeyLog(Option<Arc<Path>>);\n\nimpl KeyLog {\n    /// Creates a [`KeyLog`] based on the `SSLKEYLOGFILE` environment variable.\n    pub fn from_env() -> KeyLog {\n        match std::env::var(\"SSLKEYLOGFILE\") {\n            Ok(ref s) if !s.trim().is_empty() => {\n                KeyLog(Some(Arc::from(normalize_path(Path::new(s)))))\n            }\n            _ => KeyLog(None),\n        }\n    }\n\n    /// Creates a [`KeyLog`] that writes to the specified file path.\n    pub fn from_file<P: AsRef<Path>>(path: P) -> KeyLog {\n        KeyLog(Some(Arc::from(normalize_path(path.as_ref()))))\n    }\n\n    /// Creates a new key log file [`Handle`] based on the policy.\n    pub(crate) fn handle(self) -> Result<Handle> {\n        static GLOBAL_KEYLOG_CACHE: OnceLock<RwLock<HashMap<Arc<Path>, Handle>>> = OnceLock::new();\n\n        let path = self\n            .0\n            .ok_or_else(|| Error::new(ErrorKind::NotFound, \"KeyLog: file path is not specified\"))?;\n\n        let cache = GLOBAL_KEYLOG_CACHE.get_or_init(RwLock::default);\n        if let Some(handle) = cache.read().get(path.as_ref()).cloned() {\n            return Ok(handle);\n        }\n\n        match cache.write().entry(path.clone()) {\n            Entry::Occupied(entry) => Ok(entry.get().clone()),\n            Entry::Vacant(entry) => {\n                let handle = Handle::new(path)?;\n                entry.insert(handle.clone());\n                Ok(handle)\n            }\n        }\n    }\n}\n\nfn normalize_path<'a, P>(path: P) -> PathBuf\nwhere\n    P: Into<Cow<'a, Path>>,\n{\n    let path = path.into();\n    let mut components = path.components().peekable();\n    let mut ret = if let Some(c @ Component::Prefix(..)) = components.peek().cloned() {\n        components.next();\n        PathBuf::from(c.as_os_str())\n    } else {\n        PathBuf::new()\n    };\n\n    for component in components {\n        match component {\n            Component::Prefix(..) => unreachable!(),\n            Component::RootDir => {\n                ret.push(component.as_os_str());\n            }\n            Component::CurDir => {}\n            Component::ParentDir => {\n                ret.pop();\n            }\n            Component::Normal(c) => {\n                ret.push(c);\n            }\n        }\n    }\n    ret\n}\n"
  },
  {
    "path": "src/tls/session.rs",
    "content": "//! TLS session caching and resumption.\n//!\n//! Handshakes are expensive. This module lets you reuse sessions to save\n//! CPU cycles and reduce latency.\n//!\n//! By default, we use an in-memory LRU cache, but you can plug in your own\n//! implementation if you're running at scale or need to share sessions\n//! across multiple instances.\n\nuse std::{\n    borrow::Borrow,\n    collections::{HashMap, hash_map::Entry},\n    hash::{Hash, Hasher},\n    num::NonZeroUsize,\n    sync::Arc,\n};\n\nuse btls::ssl::{SslSession, SslVersion};\nuse lru::LruCache;\n\nuse crate::{client::ConnectionId, sync::Mutex, tls::TlsVersion};\n\n/// An opaque key identifying a TLS session cache entry.\n#[derive(Clone, PartialEq, Eq, Hash)]\npub struct Key(pub(super) ConnectionId);\n\n/// A TLS session that can be stored and retrieved from a session cache.\n#[derive(Clone)]\npub struct TlsSession(pub(super) SslSession);\n\n/// A trait for cache storing and retrieving TLS sessions.\n///\n/// # TLS 1.3 Session Handling\n///\n/// For TLS 1.3 sessions, implementations **should** remove the session after\n/// retrieval to comply with [RFC 8446 Appendix C.4](https://tools.ietf.org/html/rfc8446#appendix-C.4),\n/// which requires that session tickets are used at most once to prevent\n/// concurrent handshakes from reusing the same session.\npub trait TlsSessionCache: Send + Sync {\n    /// Store a TLS session associated with the given key.\n    fn put(&self, key: Key, session: TlsSession);\n\n    /// Retrieve a TLS session for the given key.\n    ///\n    /// For TLS 1.3, the session should be removed from the cache upon retrieval\n    /// to ensure single-use semantics (see [RFC 8446 Appendix C.4]).\n    fn pop(&self, key: &Key) -> Option<TlsSession>;\n}\n\nimpl_into_shared!(\n    /// Trait for converting types into a shared [`TlsSessionCache`].\n    ///\n    /// This allows accepting bare types, `Arc<T>`, or `Arc<dyn TlsSessionCache>`.\n    pub trait IntoTlsSessionCache => TlsSessionCache\n);\n\n/// The default two-level LRU session cache.\n///\n/// Maintains both forward (key → sessions) and reverse (session → key) lookups\n/// for efficient session storage, retrieval, and cleanup operations.\n///\n/// This is the built-in implementation of [`TlsSessionCache`] used when no\n/// custom session store is configured.\npub struct LruTlsSessionCache {\n    inner: Mutex<Inner>,\n    per_host_session_capacity: usize,\n}\n\nstruct Inner {\n    reverse: HashMap<TlsSession, Key>,\n    per_host_sessions: HashMap<Key, LruCache<TlsSession, ()>>,\n}\n\n// ===== impl TlsSession =====\n\nimpl TlsSession {\n    /// Returns the TLS session ID.\n    #[inline]\n    pub fn id(&self) -> &[u8] {\n        self.0.id()\n    }\n\n    /// Returns the time at which the session was established, in seconds since the Unix epoch.\n    #[inline]\n    pub fn time(&self) -> u64 {\n        self.0.time()\n    }\n\n    /// Returns the sessions timeout, in seconds.\n    ///\n    /// A session older than this time should not be used for session resumption.\n    #[inline]\n    pub fn timeout(&self) -> u32 {\n        self.0.timeout()\n    }\n\n    /// Returns the TLS protocol version negotiated for this session.\n    #[inline]\n    pub fn protocol_version(&self) -> TlsVersion {\n        let version = self.0.protocol_version();\n        if version == SslVersion::SSL3 {\n            // SSLv3 (SSL 3.0) is obsolete and insecure, and is not supported by btls.\n            // This branch should never be reached in normal operation. If it is,\n            // it indicates a bug or an unsupported/legacy OpenSSL configuration.\n            unreachable!(\n                \"Encountered unsupported protocol: SSLv3 (SSL 3.0) is obsolete and not accepted by btls\"\n            );\n        }\n        TlsVersion(version)\n    }\n}\n\nimpl Eq for TlsSession {}\n\nimpl PartialEq for TlsSession {\n    #[inline]\n    fn eq(&self, other: &TlsSession) -> bool {\n        self.0.id() == other.0.id()\n    }\n}\n\nimpl Hash for TlsSession {\n    #[inline]\n    fn hash<H: Hasher>(&self, state: &mut H) {\n        self.0.id().hash(state);\n    }\n}\n\nimpl Borrow<[u8]> for TlsSession {\n    #[inline]\n    fn borrow(&self) -> &[u8] {\n        self.0.id()\n    }\n}\n\n// ===== impl LruTlsSessionCache =====\n\nimpl LruTlsSessionCache {\n    /// Creates a new [`LruTlsSessionCache`] with the given per-host capacity.\n    pub fn new(per_host_session_capacity: usize) -> Self {\n        LruTlsSessionCache {\n            inner: Mutex::new(Inner {\n                reverse: HashMap::new(),\n                per_host_sessions: HashMap::new(),\n            }),\n            per_host_session_capacity,\n        }\n    }\n}\n\nimpl TlsSessionCache for LruTlsSessionCache {\n    fn put(&self, key: Key, session: TlsSession) {\n        let mut inner = self.inner.lock();\n\n        let evicted = {\n            let per_host_sessions =\n                inner\n                    .per_host_sessions\n                    .entry(key.clone())\n                    .or_insert_with(|| {\n                        NonZeroUsize::new(self.per_host_session_capacity)\n                            .map_or_else(LruCache::unbounded, LruCache::new)\n                    });\n\n            // Enforce per-key capacity limit by evicting the least recently used session\n            let evicted = if per_host_sessions.len() >= self.per_host_session_capacity {\n                per_host_sessions.pop_lru().map(|(s, _)| s)\n            } else {\n                None\n            };\n\n            per_host_sessions.put(session.clone(), ());\n            evicted\n        };\n\n        if let Some(evicted_session) = evicted {\n            inner.reverse.remove(&evicted_session);\n        }\n        inner.reverse.insert(session, key);\n    }\n\n    fn pop(&self, key: &Key) -> Option<TlsSession> {\n        let mut inner = self.inner.lock();\n        let session = {\n            let per_host_sessions = inner.per_host_sessions.get_mut(key)?;\n            per_host_sessions.peek_lru()?.0.clone()\n        };\n\n        // https://tools.ietf.org/html/rfc8446#appendix-C.4\n        // OpenSSL will remove the session from its cache after the handshake completes anyway, but\n        // this ensures that concurrent handshakes don't end up with the same session.\n        if session.protocol_version() == TlsVersion::TLS_1_3 {\n            if let Some(key) = inner.reverse.remove(&session) {\n                if let Entry::Occupied(mut entry) = inner.per_host_sessions.entry(key) {\n                    entry.get_mut().pop(&session);\n                    if entry.get().is_empty() {\n                        entry.remove();\n                    }\n                }\n            }\n        }\n\n        Some(session)\n    }\n}\n"
  },
  {
    "path": "src/tls/trust/identity.rs",
    "content": "use btls::{\n    pkcs12::Pkcs12,\n    pkey::{PKey, Private},\n    x509::X509,\n};\n\nuse crate::Error;\n\n/// Represents a private key and X509 cert as a client certificate.\n#[derive(Debug, Clone)]\npub struct Identity {\n    pkey: PKey<Private>,\n    cert: X509,\n    chain: Vec<X509>,\n}\n\nimpl Identity {\n    /// Parses a DER-formatted PKCS #12 archive, using the specified password to decrypt the key.\n    ///\n    /// The archive should contain a leaf certificate and its private key, as well any intermediate\n    /// certificates that allow clients to build a chain to a trusted root.\n    /// The chain certificates should be in order from the leaf certificate towards the root.\n    ///\n    /// PKCS #12 archives typically have the file extension `.p12` or `.pfx`, and can be created\n    /// with the OpenSSL `pkcs12` tool:\n    ///\n    /// ```bash\n    /// openssl pkcs12 -export -out identity.pfx -inkey key.pem -in cert.pem -certfile chain_certs.pem\n    /// ```\n    ///\n    /// # Examples\n    ///\n    /// ```\n    /// # use std::fs::File;\n    /// # use std::io::Read;\n    /// # fn pkcs12() -> Result<(), Box<dyn std::error::Error>> {\n    /// let mut buf = Vec::new();\n    /// File::open(\"my-ident.pfx\")?.read_to_end(&mut buf)?;\n    /// let pkcs12 = wreq::Identity::from_pkcs12_der(&buf, \"my-privkey-password\")?;\n    /// # drop(pkcs12);\n    /// # Ok(())\n    /// # }\n    /// ```\n    pub fn from_pkcs12_der(buf: &[u8], pass: &str) -> crate::Result<Identity> {\n        let pkcs12 = Pkcs12::from_der(buf).map_err(Error::tls)?;\n        let parsed = pkcs12.parse(pass).map_err(Error::tls)?;\n        Ok(Identity {\n            pkey: parsed.pkey,\n            cert: parsed.cert,\n            // > The stack is the reverse of what you might expect due to the way\n            // > PKCS12_parse is implemented, so we need to load it backwards.\n            // > https://github.com/sfackler/rust-native-tls/commit/05fb5e583be589ab63d9f83d986d095639f8ec44\n            chain: parsed.chain.into_iter().flatten().rev().collect(),\n        })\n    }\n\n    /// Parses a chain of PEM encoded X509 certificates, with the leaf certificate first.\n    /// `key` is a PEM encoded PKCS #8 formatted private key for the leaf certificate.\n    ///\n    /// The certificate chain should contain any intermediate certificates that should be sent to\n    /// clients to allow them to build a chain to a trusted root.\n    ///\n    /// A certificate chain here means a series of PEM encoded certificates concatenated together.\n    ///\n    /// # Examples\n    ///\n    /// ```\n    /// # use std::fs;\n    /// # fn pkcs8() -> Result<(), Box<dyn std::error::Error>> {\n    /// let cert = fs::read(\"client.pem\")?;\n    /// let key = fs::read(\"key.pem\")?;\n    /// let pkcs8 = wreq::Identity::from_pkcs8_pem(&cert, &key)?;\n    /// # drop(pkcs8);\n    /// # Ok(())\n    /// # }\n    /// ```\n    pub fn from_pkcs8_pem(buf: &[u8], key: &[u8]) -> crate::Result<Identity> {\n        if !key.starts_with(b\"-----BEGIN PRIVATE KEY-----\") {\n            return Err(Error::builder(\"expected PKCS#8 PEM\"));\n        }\n\n        let pkey = PKey::private_key_from_pem(key).map_err(Error::tls)?;\n        let mut cert_chain = X509::stack_from_pem(buf).map_err(Error::tls)?.into_iter();\n        let cert = cert_chain.next().ok_or_else(|| {\n            Error::builder(\"at least one certificate must be provided to create an identity\")\n        })?;\n        let chain = cert_chain.collect();\n        Ok(Identity { pkey, cert, chain })\n    }\n\n    pub(crate) fn add_to_tls(\n        &self,\n        connector: &mut btls::ssl::SslConnectorBuilder,\n    ) -> crate::Result<()> {\n        connector.set_certificate(&self.cert).map_err(Error::tls)?;\n        connector.set_private_key(&self.pkey).map_err(Error::tls)?;\n        for cert in self.chain.iter() {\n            // https://www.openssl.org/docs/manmaster/man3/SSL_CTX_add_extra_chain_cert.html\n            // specifies that \"When sending a certificate chain, extra chain certificates are\n            // sent in order following the end entity certificate.\"\n            connector\n                .add_extra_chain_cert(cert.clone())\n                .map_err(Error::tls)?;\n        }\n        Ok(())\n    }\n}\n\n#[cfg(test)]\nmod test {\n    use super::Identity;\n\n    #[test]\n    fn identity_from_pkcs12_der_invalid() {\n        Identity::from_pkcs12_der(b\"not der\", \"nope\").unwrap_err();\n    }\n\n    #[test]\n    fn identity_from_pkcs8_pem_invalid() {\n        Identity::from_pkcs8_pem(b\"not pem\", b\"not key\").unwrap_err();\n    }\n}\n"
  },
  {
    "path": "src/tls/trust/parse.rs",
    "content": "use btls::x509::store::{X509Store, X509StoreBuilder};\n\nuse super::{Certificate, CertificateInput};\nuse crate::{Error, Result};\n\npub fn parse_certs<'c, I>(\n    certs: I,\n    parser: fn(&'c [u8]) -> crate::Result<Certificate>,\n) -> Result<X509Store>\nwhere\n    I: IntoIterator,\n    I::Item: Into<CertificateInput<'c>>,\n{\n    let mut store = X509StoreBuilder::new().map_err(Error::tls)?;\n    let certs = filter_map_certs(certs, parser);\n    process_certs(certs.into_iter(), &mut store)?;\n    Ok(store.build())\n}\n\npub fn parse_certs_with_stack<C, F>(certs: C, parse: F) -> Result<X509Store>\nwhere\n    C: AsRef<[u8]>,\n    F: Fn(C) -> Result<Vec<Certificate>>,\n{\n    let mut store = X509StoreBuilder::new().map_err(Error::tls)?;\n    let certs = parse(certs)?;\n    process_certs(certs.into_iter(), &mut store)?;\n    Ok(store.build())\n}\n\npub fn process_certs<I>(iter: I, store: &mut X509StoreBuilder) -> Result<()>\nwhere\n    I: Iterator<Item = Certificate>,\n{\n    let mut valid_count = 0;\n    let mut invalid_count = 0;\n    for cert in iter {\n        if let Err(_err) = store.add_cert(cert.0) {\n            invalid_count += 1;\n            warn!(\"tls failed to parse certificate: {:?}\", _err);\n        } else {\n            valid_count += 1;\n        }\n    }\n\n    if valid_count == 0 && invalid_count > 0 {\n        return Err(Error::builder(\"invalid certificate\"));\n    }\n\n    Ok(())\n}\n\npub fn filter_map_certs<'c, I>(\n    certs: I,\n    parser: fn(&'c [u8]) -> Result<Certificate>,\n) -> impl Iterator<Item = Certificate>\nwhere\n    I: IntoIterator,\n    I::Item: Into<CertificateInput<'c>>,\n{\n    certs\n        .into_iter()\n        .map(Into::into)\n        .filter_map(move |data| match data.with_parser(parser) {\n            Ok(cert) => Some(cert),\n            Err(_err) => {\n                warn!(\"tls failed to parse certificate: {:?}\", _err);\n                None\n            }\n        })\n}\n"
  },
  {
    "path": "src/tls/trust/store.rs",
    "content": "use std::sync::Arc;\n\nuse btls::{\n    ssl::SslConnectorBuilder,\n    x509::store::{X509Store, X509StoreBuilder},\n};\n\nuse super::{\n    Certificate, CertificateInput,\n    parse::{filter_map_certs, parse_certs, parse_certs_with_stack, process_certs},\n};\nuse crate::{Error, Result};\n\n/// A builder for constructing a `CertStore`.\npub struct CertStoreBuilder {\n    builder: Result<X509StoreBuilder>,\n}\n\n// ====== impl CertStoreBuilder ======\n\nimpl CertStoreBuilder {\n    /// Adds a DER-encoded certificate to the certificate store.\n    #[inline]\n    pub fn add_der_cert<'c, C>(self, cert: C) -> Self\n    where\n        C: Into<CertificateInput<'c>>,\n    {\n        self.parse_cert(cert, Certificate::from_der)\n    }\n\n    /// Adds a PEM-encoded certificate to the certificate store.\n    #[inline]\n    pub fn add_pem_cert<'c, C>(self, cert: C) -> Self\n    where\n        C: Into<CertificateInput<'c>>,\n    {\n        self.parse_cert(cert, Certificate::from_pem)\n    }\n\n    /// Adds multiple DER-encoded certificates to the certificate store.\n    #[inline]\n    pub fn add_der_certs<'c, I>(self, certs: I) -> Self\n    where\n        I: IntoIterator,\n        I::Item: Into<CertificateInput<'c>>,\n    {\n        self.parse_certs(certs, Certificate::from_der)\n    }\n\n    /// Adds multiple PEM-encoded certificates to the certificate store.\n    #[inline]\n    pub fn add_pem_certs<'c, I>(self, certs: I) -> Self\n    where\n        I: IntoIterator,\n        I::Item: Into<CertificateInput<'c>>,\n    {\n        self.parse_certs(certs, Certificate::from_pem)\n    }\n\n    /// Adds a PEM-encoded certificate stack to the certificate store.\n    pub fn add_stack_pem_certs<C>(mut self, certs: C) -> Self\n    where\n        C: AsRef<[u8]>,\n    {\n        if let Ok(ref mut builder) = self.builder {\n            let result = Certificate::stack_from_pem(certs.as_ref())\n                .and_then(|certs| process_certs(certs.into_iter(), builder));\n\n            if let Err(err) = result {\n                self.builder = Err(err);\n            }\n        }\n        self\n    }\n\n    /// Load certificates from their default locations.\n    ///\n    /// These locations are read from the `SSL_CERT_FILE` and `SSL_CERT_DIR`\n    /// environment variables if present, or defaults specified at OpenSSL\n    /// build time otherwise.\n    pub fn set_default_paths(mut self) -> Self {\n        if let Ok(ref mut builder) = self.builder {\n            if let Err(err) = builder.set_default_paths() {\n                self.builder = Err(Error::tls(err));\n            }\n        }\n        self\n    }\n\n    /// Constructs the `CertStore`.\n    ///\n    /// This method finalizes the builder and constructs the `CertStore`\n    /// containing all the added certificates.\n    #[inline]\n    pub fn build(self) -> Result<CertStore> {\n        self.builder\n            .map(X509StoreBuilder::build)\n            .map(Arc::new)\n            .map(CertStore)\n    }\n}\n\nimpl CertStoreBuilder {\n    fn parse_cert<'c, C, P>(mut self, cert: C, parser: P) -> Self\n    where\n        C: Into<CertificateInput<'c>>,\n        P: Fn(&'c [u8]) -> Result<Certificate>,\n    {\n        if let Ok(ref mut builder) = self.builder {\n            let input = cert.into();\n            let result = input\n                .with_parser(parser)\n                .and_then(|cert| builder.add_cert(cert.0).map_err(Error::tls));\n\n            if let Err(err) = result {\n                self.builder = Err(err);\n            }\n        }\n        self\n    }\n\n    fn parse_certs<'c, I>(mut self, certs: I, parser: fn(&'c [u8]) -> Result<Certificate>) -> Self\n    where\n        I: IntoIterator,\n        I::Item: Into<CertificateInput<'c>>,\n    {\n        if let Ok(ref mut builder) = self.builder {\n            let certs = filter_map_certs(certs, parser);\n            if let Err(err) = process_certs(certs, builder) {\n                self.builder = Err(err);\n            }\n        }\n        self\n    }\n}\n\n/// A thread-safe certificate store for TLS connections.\n///\n/// [`CertStore`] manages a collection of trusted certificates used for verifying peer identities.\n/// It is designed to be shared and reused across requests and connections, similar to `Client`.\n///\n/// Internally, [`CertStore`] uses an [`Arc`] for reference counting, so you do **not** need to wrap\n/// it in an additional [`Rc`] or [`Arc`] for sharing between threads or tasks.\n///\n/// To configure a [`CertStore`], use [`CertStore::builder()`]. You can also construct it from DER\n/// or PEM certificates, or load system defaults.\n///\n/// [`Rc`]: std::rc::Rc\n/// [`Arc`]: std::sync::Arc\n#[derive(Clone)]\npub struct CertStore(Arc<X509Store>);\n\n// ====== impl CertStore ======\n\nimpl CertStore {\n    /// Creates a new `CertStoreBuilder`.\n    #[inline]\n    pub fn builder() -> CertStoreBuilder {\n        CertStoreBuilder {\n            builder: X509StoreBuilder::new().map_err(Error::builder),\n        }\n    }\n\n    /// Creates a new `CertStore` from a collection of DER-encoded certificates.\n    #[inline]\n    pub fn from_der_certs<'c, C>(certs: C) -> Result<CertStore>\n    where\n        C: IntoIterator,\n        C::Item: Into<CertificateInput<'c>>,\n    {\n        parse_certs(certs, Certificate::from_der)\n            .map(Arc::new)\n            .map(CertStore)\n    }\n\n    /// Creates a new `CertStore` from a collection of PEM-encoded certificates.\n    #[inline]\n    pub fn from_pem_certs<'c, C>(certs: C) -> Result<CertStore>\n    where\n        C: IntoIterator,\n        C::Item: Into<CertificateInput<'c>>,\n    {\n        parse_certs(certs, Certificate::from_pem)\n            .map(Arc::new)\n            .map(CertStore)\n    }\n\n    /// Creates a new `CertStore` from a PEM-encoded certificate stack.\n    #[inline]\n    pub fn from_pem_stack<C>(certs: C) -> Result<CertStore>\n    where\n        C: AsRef<[u8]>,\n    {\n        parse_certs_with_stack(certs, Certificate::stack_from_pem)\n            .map(Arc::new)\n            .map(CertStore)\n    }\n}\n\nimpl CertStore {\n    #[inline]\n    pub(crate) fn add_to_tls(&self, tls: &mut SslConnectorBuilder) {\n        tls.set_cert_store_ref(&self.0);\n    }\n}\n\nimpl Default for CertStore {\n    fn default() -> Self {\n        #[cfg(feature = \"webpki-roots\")]\n        static LOAD_CERTS: std::sync::LazyLock<CertStore> = std::sync::LazyLock::new(|| {\n            CertStore::builder()\n                .add_der_certs(webpki_root_certs::TLS_SERVER_ROOT_CERTS)\n                .build()\n                .expect(\"failed to load default cert store\")\n        });\n\n        #[cfg(not(feature = \"webpki-roots\"))]\n        {\n            CertStore::builder()\n                .set_default_paths()\n                .build()\n                .expect(\"failed to load default cert store\")\n        }\n\n        #[cfg(feature = \"webpki-roots\")]\n        LOAD_CERTS.clone()\n    }\n}\n"
  },
  {
    "path": "src/tls/trust.rs",
    "content": "//! TLS Trust and Identity management.\n//!\n//! Handles server certificate verification, mTLS identity, and CA\n//! bundle management. Provides DER/PEM parsing for BoringSSL and\n//! supports both system and custom trust stores.\n\nmod identity;\nmod parse;\nmod store;\n\nuse btls::x509::X509;\n\npub use self::{\n    identity::Identity,\n    store::{CertStore, CertStoreBuilder},\n};\nuse crate::Error;\n\n/// A certificate input.\npub enum CertificateInput<'c> {\n    /// Raw DER or PEM data.\n    Raw(&'c [u8]),\n    /// An already parsed certificate.\n    Parsed(Certificate),\n}\n\nimpl<'a> CertificateInput<'a> {\n    pub(crate) fn with_parser<F>(self, parser: F) -> crate::Result<Certificate>\n    where\n        F: Fn(&'a [u8]) -> crate::Result<Certificate>,\n    {\n        match self {\n            CertificateInput::Raw(data) => parser(data),\n            CertificateInput::Parsed(cert) => Ok(cert),\n        }\n    }\n}\n\nimpl From<Certificate> for CertificateInput<'_> {\n    fn from(cert: Certificate) -> Self {\n        CertificateInput::Parsed(cert)\n    }\n}\n\nimpl<'c, T: AsRef<[u8]> + ?Sized + 'c> From<&'c T> for CertificateInput<'c> {\n    fn from(value: &'c T) -> CertificateInput<'c> {\n        CertificateInput::Raw(value.as_ref())\n    }\n}\n\n/// A certificate.\n#[derive(Clone)]\npub struct Certificate(X509);\n\nimpl Certificate {\n    /// Parse a certificate from DER data.\n    #[inline]\n    pub fn from_der<C: AsRef<[u8]>>(cert: C) -> crate::Result<Self> {\n        X509::from_der(cert.as_ref()).map(Self).map_err(Error::tls)\n    }\n\n    /// Parse a certificate from PEM data.\n    #[inline]\n    pub fn from_pem<C: AsRef<[u8]>>(cert: C) -> crate::Result<Self> {\n        X509::from_pem(cert.as_ref()).map(Self).map_err(Error::tls)\n    }\n\n    /// Parse a stack of certificates from DER data.\n    #[inline]\n    pub fn stack_from_pem<C: AsRef<[u8]>>(cert: C) -> crate::Result<Vec<Self>> {\n        let certs = X509::stack_from_pem(cert.as_ref()).map_err(Error::tls)?;\n        Ok(certs.into_iter().map(Self).collect())\n    }\n}\n"
  },
  {
    "path": "src/tls.rs",
    "content": "//!  TLS options configuration\n//!\n//! - Various parts of TLS can also be configured or even disabled on the `ClientBuilder`.\n\npub(crate) mod conn;\n\npub mod compress;\npub mod keylog;\npub mod session;\npub mod trust;\n\nuse std::borrow::Cow;\n\nuse ::bytes::{BufMut, Bytes, BytesMut};\n/// Re-exports of TLS-related types from `btls` for public use.\npub use btls::ssl::{ExtensionType, KeyShare};\n\nuse self::compress::CertificateCompressor;\n\n/// Http extension carrying extra TLS layer information.\n/// Made available to clients on responses when `tls_info` is set.\n#[derive(Debug, Clone)]\npub struct TlsInfo {\n    pub(crate) peer_certificate: Option<Bytes>,\n    pub(crate) peer_certificate_chain: Option<Vec<Bytes>>,\n}\n\nimpl TlsInfo {\n    /// Get the DER encoded leaf certificate of the peer.\n    pub fn peer_certificate(&self) -> Option<&[u8]> {\n        self.peer_certificate.as_deref()\n    }\n\n    /// Get the DER encoded certificate chain of the peer.\n    ///\n    /// This includes the leaf certificate on the client side.\n    pub fn peer_certificate_chain(&self) -> Option<impl Iterator<Item = &[u8]>> {\n        self.peer_certificate_chain\n            .as_ref()\n            .map(|v| v.iter().map(|b| b.as_ref()))\n    }\n}\n\n/// A TLS protocol version.\n#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]\npub struct TlsVersion(btls::ssl::SslVersion);\n\nimpl TlsVersion {\n    /// Version 1.0 of the TLS protocol.\n    pub const TLS_1_0: TlsVersion = TlsVersion(btls::ssl::SslVersion::TLS1);\n\n    /// Version 1.1 of the TLS protocol.\n    pub const TLS_1_1: TlsVersion = TlsVersion(btls::ssl::SslVersion::TLS1_1);\n\n    /// Version 1.2 of the TLS protocol.\n    pub const TLS_1_2: TlsVersion = TlsVersion(btls::ssl::SslVersion::TLS1_2);\n\n    /// Version 1.3 of the TLS protocol.\n    pub const TLS_1_3: TlsVersion = TlsVersion(btls::ssl::SslVersion::TLS1_3);\n}\n\n/// A TLS ALPN protocol.\n#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]\npub struct AlpnProtocol(&'static [u8]);\n\nimpl AlpnProtocol {\n    /// Prefer HTTP/1.1\n    pub const HTTP1: AlpnProtocol = AlpnProtocol(b\"http/1.1\");\n\n    /// Prefer HTTP/2\n    pub const HTTP2: AlpnProtocol = AlpnProtocol(b\"h2\");\n\n    /// Prefer HTTP/3\n    pub const HTTP3: AlpnProtocol = AlpnProtocol(b\"h3\");\n\n    /// Create a new [`AlpnProtocol`] from a static byte slice.\n    #[inline]\n    pub const fn new(value: &'static [u8]) -> Self {\n        AlpnProtocol(value)\n    }\n\n    #[inline]\n    fn encode(self) -> Bytes {\n        Self::encode_sequence(std::iter::once(&self))\n    }\n\n    fn encode_sequence<'a, I>(items: I) -> Bytes\n    where\n        I: IntoIterator<Item = &'a AlpnProtocol>,\n    {\n        let mut buf = BytesMut::new();\n        for item in items {\n            buf.put_u8(item.0.len() as u8);\n            buf.extend_from_slice(item.0);\n        }\n        buf.freeze()\n    }\n}\n\n/// A TLS ALPS protocol.\n#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]\npub struct AlpsProtocol(&'static [u8]);\n\nimpl AlpsProtocol {\n    /// Prefer HTTP/1.1\n    pub const HTTP1: AlpsProtocol = AlpsProtocol(b\"http/1.1\");\n\n    /// Prefer HTTP/2\n    pub const HTTP2: AlpsProtocol = AlpsProtocol(b\"h2\");\n\n    /// Prefer HTTP/3\n    pub const HTTP3: AlpsProtocol = AlpsProtocol(b\"h3\");\n}\n\n/// Builder for `[`TlsOptions`]`.\n#[must_use]\n#[derive(Debug, Clone)]\npub struct TlsOptionsBuilder {\n    config: TlsOptions,\n}\n\n/// TLS connection configuration options.\n///\n/// This struct provides fine-grained control over the behavior of TLS\n/// connections, including:\n/// - **Protocol negotiation** (ALPN, ALPS, TLS versions)\n/// - **Session management** (tickets, PSK, key shares)\n/// - **Security & privacy** (OCSP, GREASE, ECH, delegated credentials)\n/// - **Performance tuning** (record size, cipher preferences, hardware overrides)\n///\n/// All fields are optional or have defaults. See each field for details.\n#[non_exhaustive]\n#[derive(Debug, Clone)]\npub struct TlsOptions {\n    /// Application-Layer Protocol Negotiation ([RFC 7301](https://datatracker.ietf.org/doc/html/rfc7301)).\n    ///\n    /// Specifies which application protocols (e.g., HTTP/2, HTTP/1.1) may be negotiated\n    /// over a single TLS connection.\n    ///\n    /// **Default:** `Some([HTTP/2, HTTP/1.1])`\n    pub alpn_protocols: Option<Cow<'static, [AlpnProtocol]>>,\n\n    /// Application-Layer Protocol Settings (ALPS).\n    ///\n    /// Enables exchanging application-layer settings during the handshake\n    /// for protocols negotiated via ALPN.\n    ///\n    /// **Default:** `None`\n    pub alps_protocols: Option<Cow<'static, [AlpsProtocol]>>,\n\n    /// Whether to use an alternative ALPS codepoint for compatibility.\n    ///\n    /// Useful when larger ALPS payloads are required.\n    ///\n    /// **Default:** `false`\n    pub alps_use_new_codepoint: bool,\n\n    /// Enables TLS Session Tickets ([RFC 5077](https://tools.ietf.org/html/rfc5077)).\n    ///\n    /// Allows session resumption without requiring server-side state.\n    ///\n    /// **Default:** `true`\n    pub session_ticket: bool,\n\n    /// Minimum TLS version allowed for the connection.\n    ///\n    /// **Default:** `None` (library default applied)\n    pub min_tls_version: Option<TlsVersion>,\n\n    /// Maximum TLS version allowed for the connection.\n    ///\n    /// **Default:** `None` (library default applied)\n    pub max_tls_version: Option<TlsVersion>,\n\n    /// Enables Pre-Shared Key (PSK) cipher suites ([RFC 4279](https://datatracker.ietf.org/doc/html/rfc4279)).\n    ///\n    /// Authentication relies on out-of-band pre-shared keys instead of certificates.\n    ///\n    /// **Default:** `false`\n    pub pre_shared_key: bool,\n\n    /// Controls whether to send a GREASE Encrypted ClientHello (ECH) extension\n    /// when no supported ECH configuration is available.\n    ///\n    /// GREASE prevents protocol ossification by sending unknown extensions.\n    ///\n    /// **Default:** `false`\n    pub enable_ech_grease: bool,\n\n    /// Controls whether ClientHello extensions should be permuted.\n    ///\n    /// **Default:** `None` (implementation default)\n    pub permute_extensions: Option<bool>,\n\n    /// Controls whether GREASE extensions ([RFC 8701](https://datatracker.ietf.org/doc/html/rfc8701))\n    /// are enabled in general.\n    ///\n    /// **Default:** `None` (implementation default)\n    pub grease_enabled: Option<bool>,\n\n    /// Enables OCSP stapling for the connection.\n    ///\n    /// **Default:** `false`\n    pub enable_ocsp_stapling: bool,\n\n    /// Enables Signed Certificate Timestamps (SCT).\n    ///\n    /// **Default:** `false`\n    pub enable_signed_cert_timestamps: bool,\n\n    /// Sets the maximum TLS record size.\n    ///\n    /// **Default:** `None`\n    pub record_size_limit: Option<u16>,\n\n    /// Whether to skip session tickets when using PSK.\n    ///\n    /// **Default:** `false`\n    pub psk_skip_session_ticket: bool,\n\n    /// Whether to set specific key shares for TLS 1.3 handshakes.\n    ///\n    /// **Default:** `None`\n    pub key_shares: Option<Cow<'static, [KeyShare]>>,\n\n    /// Enables PSK with (EC)DHE key establishment (`psk_dhe_ke`).\n    ///\n    /// **Default:** `true`\n    pub psk_dhe_ke: bool,\n\n    /// Enables TLS renegotiation by sending the `renegotiation_info` extension.\n    ///\n    /// **Default:** `true`\n    pub renegotiation: bool,\n\n    /// Delegated Credentials ([RFC 9345](https://datatracker.ietf.org/doc/html/rfc9345)).\n    ///\n    /// Allows TLS 1.3 endpoints to use temporary delegated credentials\n    /// for authentication with reduced long-term key exposure.\n    ///\n    /// **Default:** `None`\n    pub delegated_credentials: Option<Cow<'static, str>>,\n\n    /// List of supported elliptic curves.\n    ///\n    /// **Default:** `None`\n    pub curves_list: Option<Cow<'static, str>>,\n\n    /// List of supported signature algorithms.\n    ///\n    /// **Default:** `None`\n    pub sigalgs_list: Option<Cow<'static, str>>,\n\n    /// Cipher suite configuration string.\n    ///\n    /// Uses BoringSSL's mini-language to select, enable, and prioritize ciphers.\n    ///\n    /// **Default:** `None`\n    pub cipher_list: Option<Cow<'static, str>>,\n\n    /// Sets whether to preserve the TLS 1.3 cipher list as configured by [`Self::cipher_list`].\n    ///\n    /// **Default:** `None`\n    pub preserve_tls13_cipher_list: Option<bool>,\n\n    /// Supported certificate compression algorithms ([RFC 8879](https://datatracker.ietf.org/doc/html/rfc8879)).\n    ///\n    /// **Default:** `None`\n    pub certificate_compressors: Option<Cow<'static, [&'static dyn CertificateCompressor]>>,\n\n    /// Supported TLS extensions, used for extension ordering/permutation.\n    ///\n    /// **Default:** `None`\n    pub extension_permutation: Option<Cow<'static, [ExtensionType]>>,\n\n    /// Overrides AES hardware acceleration.\n    ///\n    /// **Default:** `None`\n    pub aes_hw_override: Option<bool>,\n\n    /// Overrides the random AES hardware acceleration.\n    ///\n    /// **Default:** `false`\n    pub random_aes_hw_override: bool,\n}\n\nimpl TlsOptionsBuilder {\n    /// Sets the ALPN protocols to use.\n    #[inline]\n    pub fn alpn_protocols<I>(mut self, alpn: I) -> Self\n    where\n        I: IntoIterator<Item = AlpnProtocol>,\n    {\n        self.config.alpn_protocols = Some(Cow::Owned(alpn.into_iter().collect()));\n        self\n    }\n\n    /// Sets the ALPS protocols to use.\n    #[inline]\n    pub fn alps_protocols<I>(mut self, alps: I) -> Self\n    where\n        I: IntoIterator<Item = AlpsProtocol>,\n    {\n        self.config.alps_protocols = Some(Cow::Owned(alps.into_iter().collect()));\n        self\n    }\n\n    /// Sets whether to use a new codepoint for ALPS.\n    #[inline]\n    pub fn alps_use_new_codepoint(mut self, enabled: bool) -> Self {\n        self.config.alps_use_new_codepoint = enabled;\n        self\n    }\n    /// Sets the session ticket flag.\n    #[inline]\n    pub fn session_ticket(mut self, enabled: bool) -> Self {\n        self.config.session_ticket = enabled;\n        self\n    }\n\n    /// Sets the minimum TLS version to use.\n    #[inline]\n    pub fn min_tls_version<T>(mut self, version: T) -> Self\n    where\n        T: Into<Option<TlsVersion>>,\n    {\n        self.config.min_tls_version = version.into();\n        self\n    }\n\n    /// Sets the maximum TLS version to use.\n    #[inline]\n    pub fn max_tls_version<T>(mut self, version: T) -> Self\n    where\n        T: Into<Option<TlsVersion>>,\n    {\n        self.config.max_tls_version = version.into();\n        self\n    }\n\n    /// Sets the pre-shared key flag.\n    #[inline]\n    pub fn pre_shared_key(mut self, enabled: bool) -> Self {\n        self.config.pre_shared_key = enabled;\n        self\n    }\n\n    /// Sets the GREASE ECH extension flag.\n    #[inline]\n    pub fn enable_ech_grease(mut self, enabled: bool) -> Self {\n        self.config.enable_ech_grease = enabled;\n        self\n    }\n\n    /// Sets whether to permute ClientHello extensions.\n    #[inline]\n    pub fn permute_extensions<T>(mut self, permute: T) -> Self\n    where\n        T: Into<Option<bool>>,\n    {\n        self.config.permute_extensions = permute.into();\n        self\n    }\n\n    /// Sets the GREASE enabled flag.\n    #[inline]\n    pub fn grease_enabled<T>(mut self, enabled: T) -> Self\n    where\n        T: Into<Option<bool>>,\n    {\n        self.config.grease_enabled = enabled.into();\n        self\n    }\n\n    /// Sets the OCSP stapling flag.\n    #[inline]\n    pub fn enable_ocsp_stapling(mut self, enabled: bool) -> Self {\n        self.config.enable_ocsp_stapling = enabled;\n        self\n    }\n\n    /// Sets the signed certificate timestamps flag.\n    #[inline]\n    pub fn enable_signed_cert_timestamps(mut self, enabled: bool) -> Self {\n        self.config.enable_signed_cert_timestamps = enabled;\n        self\n    }\n\n    /// Sets the record size limit.\n    #[inline]\n    pub fn record_size_limit<U: Into<Option<u16>>>(mut self, limit: U) -> Self {\n        self.config.record_size_limit = limit.into();\n        self\n    }\n\n    /// Sets the PSK skip session ticket flag.\n    #[inline]\n    pub fn psk_skip_session_ticket(mut self, skip: bool) -> Self {\n        self.config.psk_skip_session_ticket = skip;\n        self\n    }\n\n    /// Sets the PSK DHE key establishment flag.\n    #[inline]\n    pub fn psk_dhe_ke(mut self, enabled: bool) -> Self {\n        self.config.psk_dhe_ke = enabled;\n        self\n    }\n\n    /// Sets the renegotiation flag.\n    #[inline]\n    pub fn renegotiation(mut self, enabled: bool) -> Self {\n        self.config.renegotiation = enabled;\n        self\n    }\n\n    /// Sets the delegated credentials.\n    #[inline]\n    pub fn delegated_credentials<T>(mut self, creds: T) -> Self\n    where\n        T: Into<Cow<'static, str>>,\n    {\n        self.config.delegated_credentials = Some(creds.into());\n        self\n    }\n\n    /// Sets the client key shares to be used in the TLS 1.3 handshake.\n    #[inline]\n    pub fn key_shares<T>(mut self, key_shares: T) -> Self\n    where\n        T: Into<Cow<'static, [KeyShare]>>,\n    {\n        self.config.key_shares = Some(key_shares.into());\n        self\n    }\n\n    /// Sets the supported curves list.\n    #[inline]\n    pub fn curves_list<T>(mut self, curves: T) -> Self\n    where\n        T: Into<Cow<'static, str>>,\n    {\n        self.config.curves_list = Some(curves.into());\n        self\n    }\n\n    /// Sets the cipher list.\n    #[inline]\n    pub fn cipher_list<T>(mut self, ciphers: T) -> Self\n    where\n        T: Into<Cow<'static, str>>,\n    {\n        self.config.cipher_list = Some(ciphers.into());\n        self\n    }\n\n    /// Sets the supported signature algorithms.\n    #[inline]\n    pub fn sigalgs_list<T>(mut self, sigalgs: T) -> Self\n    where\n        T: Into<Cow<'static, str>>,\n    {\n        self.config.sigalgs_list = Some(sigalgs.into());\n        self\n    }\n\n    /// Sets the certificate compression algorithms.\n    #[inline]\n    pub fn certificate_compressors<T>(mut self, algs: T) -> Self\n    where\n        T: Into<Cow<'static, [&'static dyn CertificateCompressor]>>,\n    {\n        self.config.certificate_compressors = Some(algs.into());\n        self\n    }\n\n    /// Sets the extension permutation.\n    #[inline]\n    pub fn extension_permutation<T>(mut self, permutation: T) -> Self\n    where\n        T: Into<Cow<'static, [ExtensionType]>>,\n    {\n        self.config.extension_permutation = Some(permutation.into());\n        self\n    }\n\n    /// Sets the AES hardware override flag.\n    #[inline]\n    pub fn aes_hw_override<T>(mut self, enabled: T) -> Self\n    where\n        T: Into<Option<bool>>,\n    {\n        self.config.aes_hw_override = enabled.into();\n        self\n    }\n\n    /// Sets the random AES hardware override flag.\n    #[inline]\n    pub fn random_aes_hw_override(mut self, enabled: bool) -> Self {\n        self.config.random_aes_hw_override = enabled;\n        self\n    }\n\n    /// Sets whether to preserve the TLS 1.3 cipher list as configured by [`Self::cipher_list`].\n    ///\n    /// By default, BoringSSL does not preserve the TLS 1.3 cipher list. When this option is\n    /// disabled (the default), BoringSSL uses its internal default TLS 1.3 cipher suites in its\n    /// default order, regardless of what is set via [`Self::cipher_list`].\n    ///\n    /// When enabled, this option ensures that the TLS 1.3 cipher suites explicitly set via\n    /// [`Self::cipher_list`] are retained in their original order, without being reordered or\n    /// modified by BoringSSL's internal logic. This is useful for maintaining specific cipher suite\n    /// priorities for TLS 1.3. Note that if [`Self::cipher_list`] does not include any TLS 1.3\n    /// cipher suites, BoringSSL will still fall back to its default TLS 1.3 cipher suites and\n    /// order.\n    #[inline]\n    pub fn preserve_tls13_cipher_list<T>(mut self, enabled: T) -> Self\n    where\n        T: Into<Option<bool>>,\n    {\n        self.config.preserve_tls13_cipher_list = enabled.into();\n        self\n    }\n\n    /// Builds the `TlsOptions` from the builder.\n    #[inline]\n    pub fn build(self) -> TlsOptions {\n        self.config\n    }\n}\n\nimpl TlsOptions {\n    /// Creates a new `TlsOptionsBuilder` instance.\n    pub fn builder() -> TlsOptionsBuilder {\n        TlsOptionsBuilder {\n            config: TlsOptions::default(),\n        }\n    }\n}\n\nimpl Default for TlsOptions {\n    fn default() -> Self {\n        TlsOptions {\n            alpn_protocols: Some(Cow::Borrowed(&[AlpnProtocol::HTTP2, AlpnProtocol::HTTP1])),\n            alps_protocols: None,\n            alps_use_new_codepoint: false,\n            session_ticket: true,\n            min_tls_version: None,\n            max_tls_version: None,\n            pre_shared_key: false,\n            enable_ech_grease: false,\n            permute_extensions: None,\n            grease_enabled: None,\n            enable_ocsp_stapling: false,\n            enable_signed_cert_timestamps: false,\n            record_size_limit: None,\n            psk_skip_session_ticket: false,\n            key_shares: None,\n            psk_dhe_ke: true,\n            renegotiation: true,\n            delegated_credentials: None,\n            curves_list: None,\n            cipher_list: None,\n            sigalgs_list: None,\n            certificate_compressors: None,\n            extension_permutation: None,\n            aes_hw_override: None,\n            preserve_tls13_cipher_list: None,\n            random_aes_hw_override: false,\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn alpn_protocol_encode() {\n        let alpn = AlpnProtocol::encode_sequence(&[AlpnProtocol::HTTP1, AlpnProtocol::HTTP2]);\n        assert_eq!(alpn, Bytes::from_static(b\"\\x08http/1.1\\x02h2\"));\n\n        let alpn = AlpnProtocol::encode_sequence(&[AlpnProtocol::HTTP3]);\n        assert_eq!(alpn, Bytes::from_static(b\"\\x02h3\"));\n\n        let alpn = AlpnProtocol::encode_sequence(&[AlpnProtocol::HTTP1, AlpnProtocol::HTTP3]);\n        assert_eq!(alpn, Bytes::from_static(b\"\\x08http/1.1\\x02h3\"));\n\n        let alpn = AlpnProtocol::encode_sequence(&[AlpnProtocol::HTTP2, AlpnProtocol::HTTP3]);\n        assert_eq!(alpn, Bytes::from_static(b\"\\x02h2\\x02h3\"));\n\n        let alpn = AlpnProtocol::encode_sequence(&[\n            AlpnProtocol::HTTP1,\n            AlpnProtocol::HTTP2,\n            AlpnProtocol::HTTP3,\n        ]);\n        assert_eq!(alpn, Bytes::from_static(b\"\\x08http/1.1\\x02h2\\x02h3\"));\n    }\n\n    #[test]\n    fn alpn_protocol_encode_single() {\n        let alpn = AlpnProtocol::HTTP1.encode();\n        assert_eq!(alpn, b\"\\x08http/1.1\".as_ref());\n\n        let alpn = AlpnProtocol::HTTP2.encode();\n        assert_eq!(alpn, b\"\\x02h2\".as_ref());\n\n        let alpn = AlpnProtocol::HTTP3.encode();\n        assert_eq!(alpn, b\"\\x02h3\".as_ref());\n    }\n}\n"
  },
  {
    "path": "src/trace.rs",
    "content": "macro_rules! debug {\n    ($($arg:tt)+) => {\n        {\n            #[cfg(feature = \"tracing\")]\n            {\n                ::tracing::debug!($($arg)+);\n            }\n        }\n    }\n}\n\nmacro_rules! trace {\n    ($($arg:tt)*) => {\n        {\n            #[cfg(feature = \"tracing\")]\n            {\n                ::tracing::trace!($($arg)+);\n            }\n        }\n    }\n}\n\nmacro_rules! trace_span {\n    ($($arg:tt)*) => {\n        {\n            #[cfg(feature = \"tracing\")]\n            {\n                let _span = ::tracing::trace_span!($($arg)+);\n                let _ = _span.entered();\n            }\n        }\n    }\n}\n\nmacro_rules! warn {\n    ($($arg:tt)*) => {\n        {\n            #[cfg(feature = \"tracing\")]\n            {\n                ::tracing::warn!($($arg)+);\n            }\n        }\n    }\n}\n\nmacro_rules! error {\n    ($($arg:tt)*) => {\n        {\n            #[cfg(feature = \"tracing\")]\n            {\n                ::tracing::error!($($arg)+);\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "src/util.rs",
    "content": "use std::{fmt, fmt::Write};\n\nuse bytes::Bytes;\n\nuse crate::header::{Entry, HeaderMap, HeaderValue, OccupiedEntry};\n\npub(crate) fn basic_auth<U, P>(username: U, password: Option<P>) -> HeaderValue\nwhere\n    U: fmt::Display,\n    P: fmt::Display,\n{\n    let encoded = {\n        let mut buf = b\"Basic \".to_vec();\n        let mut buf_str = String::with_capacity(32);\n        let _ = write!(buf_str, \"{username}:\");\n        if let Some(password) = password {\n            let _ = write!(buf_str, \"{password}\");\n        }\n\n        let encoded = btls::base64::encode_block(buf_str.as_bytes());\n        buf.extend(encoded.into_bytes());\n        buf\n    };\n\n    let mut header = HeaderValue::from_maybe_shared(Bytes::from(encoded))\n        .expect(\"base64 is always valid HeaderValue\");\n    header.set_sensitive(true);\n    header\n}\n\npub(crate) fn fast_random() -> u64 {\n    use std::{\n        cell::Cell,\n        collections::hash_map::RandomState,\n        hash::{BuildHasher, Hasher},\n    };\n\n    thread_local! {\n        static KEY: RandomState = RandomState::new();\n        static COUNTER: Cell<u64> = const { Cell::new(0) };\n    }\n\n    KEY.with(|key| {\n        COUNTER.with(|ctr| {\n            let n = ctr.get().wrapping_add(1);\n            ctr.set(n);\n\n            let mut h = key.build_hasher();\n            h.write_u64(n);\n            h.finish()\n        })\n    })\n}\n\npub(crate) fn replace_headers(dst: &mut HeaderMap, src: HeaderMap) {\n    // IntoIter of HeaderMap yields (Option<HeaderName>, HeaderValue).\n    // The first time a name is yielded, it will be Some(name), and if\n    // there are more values with the same name, the next yield will be\n    // None.\n\n    let mut prev_entry: Option<OccupiedEntry<_>> = None;\n    for (key, value) in src {\n        match key {\n            Some(key) => match dst.entry(key) {\n                Entry::Occupied(mut e) => {\n                    e.insert(value);\n                    prev_entry = Some(e);\n                }\n                Entry::Vacant(e) => {\n                    let e = e.insert_entry(value);\n                    prev_entry = Some(e);\n                }\n            },\n            None => match prev_entry {\n                Some(ref mut entry) => {\n                    entry.append(value);\n                }\n                None => unreachable!(\"HeaderMap::into_iter yielded None first\"),\n            },\n        }\n    }\n}\n\npub(crate) struct Escape<'a>(&'a [u8]);\n\nimpl<'a> Escape<'a> {\n    pub(crate) fn new(bytes: &'a [u8]) -> Self {\n        Escape(bytes)\n    }\n}\n\nimpl fmt::Debug for Escape<'_> {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        write!(f, \"b\\\"{self}\\\"\")?;\n        Ok(())\n    }\n}\n\nimpl fmt::Display for Escape<'_> {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        for &c in self.0 {\n            // https://doc.rust-lang.org/reference.html#byte-escapes\n            if c == b'\\n' {\n                write!(f, \"\\\\n\")?;\n            } else if c == b'\\r' {\n                write!(f, \"\\\\r\")?;\n            } else if c == b'\\t' {\n                write!(f, \"\\\\t\")?;\n            } else if c == b'\\\\' || c == b'\"' {\n                write!(f, \"\\\\{}\", c as char)?;\n            } else if c == b'\\0' {\n                write!(f, \"\\\\0\")?;\n            // ASCII printable\n            } else if (0x20..0x7f).contains(&c) {\n                write!(f, \"{}\", c as char)?;\n            } else {\n                write!(f, \"\\\\x{c:02x}\")?;\n            }\n        }\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "tests/badssl.rs",
    "content": "use std::time::Duration;\n\nuse wreq::{\n    Client,\n    tls::{AlpsProtocol, TlsInfo, TlsOptions, TlsVersion, trust::CertStore},\n};\n\nmacro_rules! join {\n    ($sep:expr, $first:expr $(, $rest:expr)*) => {\n        concat!($first $(, $sep, $rest)*)\n    };\n}\n\n#[tokio::test]\nasync fn test_badssl_modern() {\n    let text = Client::builder()\n        .no_proxy()\n        .connect_timeout(Duration::from_secs(360))\n        .build()\n        .unwrap()\n        .get(\"https://mozilla-modern.badssl.com/\")\n        .send()\n        .await\n        .unwrap()\n        .text()\n        .await\n        .unwrap();\n\n    assert!(!text.is_empty());\n}\n\n#[tokio::test]\nasync fn test_badssl_self_signed() {\n    let text = Client::builder()\n        .tls_cert_verification(false)\n        .connect_timeout(Duration::from_secs(360))\n        .no_proxy()\n        .build()\n        .unwrap()\n        .get(\"https://self-signed.badssl.com/\")\n        .send()\n        .await\n        .unwrap()\n        .text()\n        .await\n        .unwrap();\n\n    assert!(!text.is_empty());\n}\nconst CURVES_LIST: &str = join!(\n    \":\",\n    \"X25519\",\n    \"P-256\",\n    \"P-384\",\n    \"P-521\",\n    \"ffdhe2048\",\n    \"ffdhe3072\"\n);\n\n#[tokio::test]\nasync fn test_3des_support() -> wreq::Result<()> {\n    let tls_options = TlsOptions::builder()\n        .cipher_list(join!(\n            \":\",\n            \"TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA\",\n            \"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\"\n        ))\n        .curves_list(CURVES_LIST)\n        .build();\n\n    // Create a client with the TLS options\n    let client = Client::builder()\n        .tls_options(tls_options)\n        .tls_cert_verification(false)\n        .connect_timeout(Duration::from_secs(360))\n        .build()?;\n\n    // Check if the client can connect to the 3des.badssl.com\n    let content = client\n        .get(\"https://3des.badssl.com/\")\n        .send()\n        .await?\n        .text()\n        .await?;\n\n    println!(\"3des.badssl.com is supported:\\n{content}\");\n\n    Ok(())\n}\n\n#[tokio::test]\nasync fn test_firefox_7x_100_cipher() -> wreq::Result<()> {\n    let tls_options = TlsOptions::builder()\n        .cipher_list(join!(\n            \":\",\n            \"TLS_DHE_RSA_WITH_AES_128_CBC_SHA\",\n            \"TLS_DHE_RSA_WITH_AES_256_CBC_SHA\",\n            \"TLS_DHE_RSA_WITH_AES_128_CBC_SHA256\",\n            \"TLS_DHE_RSA_WITH_AES_256_CBC_SHA256\"\n        ))\n        .curves_list(CURVES_LIST)\n        .build();\n\n    // Create a client with the TLS options\n    let client = Client::builder()\n        .tls_options(tls_options)\n        .tls_cert_verification(false)\n        .connect_timeout(Duration::from_secs(360))\n        .build()?;\n\n    // Check if the client can connect to the dh2048.badssl.com\n    let content = client\n        .get(\"https://dh2048.badssl.com/\")\n        .send()\n        .await?\n        .text()\n        .await?;\n\n    println!(\"dh2048.badssl.com is supported:\\n{content}\");\n\n    Ok(())\n}\n\n#[tokio::test]\nasync fn test_alps_new_endpoint() -> wreq::Result<()> {\n    let tls_options = TlsOptions::builder()\n        .min_tls_version(TlsVersion::TLS_1_2)\n        .max_tls_version(TlsVersion::TLS_1_3)\n        .alps_protocols([AlpsProtocol::HTTP2])\n        .alps_use_new_codepoint(true)\n        .build();\n\n    let client = Client::builder()\n        .tls_options(tls_options)\n        .connect_timeout(Duration::from_secs(360))\n        .build()?;\n\n    let resp = client.get(\"https://www.google.com\").send().await?;\n    assert!(resp.status().is_success());\n    Ok(())\n}\n\n#[tokio::test]\nasync fn test_aes_hw_override() -> wreq::Result<()> {\n    const CIPHER_LIST: &str = join!(\n        \":\",\n        \"TLS_AES_128_GCM_SHA256\",\n        \"TLS_CHACHA20_POLY1305_SHA256\",\n        \"TLS_AES_256_GCM_SHA384\",\n        \"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n        \"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n        \"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\",\n        \"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\",\n        \"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n        \"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n        \"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\",\n        \"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\",\n        \"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\",\n        \"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\",\n        \"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n        \"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n        \"TLS_RSA_WITH_AES_128_CBC_SHA\",\n        \"TLS_RSA_WITH_AES_256_CBC_SHA\"\n    );\n\n    let tls_options = TlsOptions::builder()\n        .cipher_list(CIPHER_LIST)\n        .min_tls_version(TlsVersion::TLS_1_2)\n        .max_tls_version(TlsVersion::TLS_1_3)\n        .enable_ech_grease(true)\n        .aes_hw_override(false)\n        .preserve_tls13_cipher_list(true)\n        .build();\n\n    // Create a client with the TLS options\n    let client = Client::builder()\n        .tls_options(tls_options)\n        .connect_timeout(Duration::from_secs(360))\n        .build()?;\n\n    let resp = client.get(\"https://tls.browserleaks.com\").send().await?;\n    assert!(resp.status().is_success());\n    let text = resp.text().await?;\n    assert!(text.contains(\"ChaCha20Poly1305\"));\n    Ok(())\n}\n\n#[tokio::test]\nasync fn test_tls_self_signed_cert() {\n    let client = Client::builder()\n        .tls_cert_verification(false)\n        .connect_timeout(Duration::from_secs(360))\n        .tls_info(true)\n        .build()\n        .unwrap();\n\n    let resp = client\n        .get(\"https://self-signed.badssl.com/\")\n        .send()\n        .await\n        .unwrap();\n\n    let peer_cert_der = resp\n        .extensions()\n        .get::<TlsInfo>()\n        .and_then(|info| info.peer_certificate())\n        .unwrap();\n\n    let self_signed_cert_store = CertStore::builder()\n        .add_der_cert(peer_cert_der)\n        .build()\n        .unwrap();\n\n    let client = Client::builder()\n        .tls_cert_store(self_signed_cert_store)\n        .build()\n        .unwrap();\n\n    let resp = client\n        .get(\"https://self-signed.badssl.com/\")\n        .send()\n        .await\n        .unwrap();\n    assert!(resp.status().is_success());\n\n    let res = client.get(\"https://www.google.com\").send().await;\n    assert!(res.is_err());\n}\n"
  },
  {
    "path": "tests/brotli.rs",
    "content": "mod support;\nuse std::io::Read;\n\nuse support::server;\nuse tokio::io::AsyncWriteExt;\nuse wreq::Client;\n\n#[tokio::test]\nasync fn brotli_response() {\n    brotli_case(10_000, 4096).await;\n}\n\n#[tokio::test]\nasync fn brotli_single_byte_chunks() {\n    brotli_case(10, 1).await;\n}\n\n#[tokio::test]\nasync fn test_brotli_empty_body() {\n    let server = server::http(move |req| async move {\n        assert_eq!(req.method(), \"HEAD\");\n\n        http::Response::builder()\n            .header(\"content-encoding\", \"br\")\n            .body(Default::default())\n            .unwrap()\n    });\n\n    let res = wreq::head(format!(\"http://{}/brotli\", server.addr()))\n        .send()\n        .await\n        .unwrap();\n\n    let body = res.text().await.unwrap();\n\n    assert_eq!(body, \"\");\n}\n\n#[tokio::test]\nasync fn test_accept_header_is_not_changed_if_set() {\n    let server = server::http(move |req| async move {\n        assert_eq!(req.headers()[\"accept\"], \"application/json\");\n        assert!(\n            req.headers()[\"accept-encoding\"]\n                .to_str()\n                .unwrap()\n                .contains(\"br\")\n        );\n        http::Response::default()\n    });\n\n    let res = wreq::get(format!(\"http://{}/accept\", server.addr()))\n        .header(\n            wreq::header::ACCEPT,\n            wreq::header::HeaderValue::from_static(\"application/json\"),\n        )\n        .send()\n        .await\n        .unwrap();\n\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n}\n\n#[tokio::test]\nasync fn test_accept_encoding_header_is_not_changed_if_set() {\n    let server = server::http(move |req| async move {\n        assert_eq!(req.headers()[\"accept\"], \"*/*\");\n        assert_eq!(req.headers()[\"accept-encoding\"], \"identity\");\n        http::Response::default()\n    });\n\n    let res = wreq::get(format!(\"http://{}/accept-encoding\", server.addr()))\n        .header(wreq::header::ACCEPT, \"*/*\")\n        .header(\n            wreq::header::ACCEPT_ENCODING,\n            wreq::header::HeaderValue::from_static(\"identity\"),\n        )\n        .send()\n        .await\n        .unwrap();\n\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n}\n\nasync fn brotli_case(response_size: usize, chunk_size: usize) {\n    use futures_util::stream::StreamExt;\n\n    let content: String = (0..response_size).fold(String::new(), |mut acc, i| {\n        acc.push_str(&format!(\"test {i}\"));\n        acc\n    });\n\n    let mut encoder = brotli::CompressorReader::new(content.as_bytes(), 4096, 5, 20);\n    let mut brotlied_content = Vec::new();\n    encoder.read_to_end(&mut brotlied_content).unwrap();\n\n    let mut response = format!(\n        \"\\\n         HTTP/1.1 200 OK\\r\\n\\\n         Server: test-accept\\r\\n\\\n         Content-Encoding: br\\r\\n\\\n         Content-Length: {}\\r\\n\\\n         \\r\\n\",\n        &brotlied_content.len()\n    )\n    .into_bytes();\n    response.extend(&brotlied_content);\n\n    let server = server::http(move |req| {\n        assert!(\n            req.headers()[\"accept-encoding\"]\n                .to_str()\n                .unwrap()\n                .contains(\"br\")\n        );\n\n        let brotlied = brotlied_content.clone();\n        async move {\n            let len = brotlied.len();\n            let stream =\n                futures_util::stream::unfold((brotlied, 0), move |(brotlied, pos)| async move {\n                    let chunk = brotlied.chunks(chunk_size).nth(pos)?.to_vec();\n\n                    Some((chunk, (brotlied, pos + 1)))\n                });\n\n            let body = wreq::Body::wrap_stream(stream.map(Ok::<_, std::convert::Infallible>));\n\n            http::Response::builder()\n                .header(\"content-encoding\", \"br\")\n                .header(\"content-length\", len)\n                .body(body)\n                .unwrap()\n        }\n    });\n\n    let res = wreq::get(format!(\"http://{}/brotli\", server.addr()))\n        .send()\n        .await\n        .expect(\"response\");\n\n    let body = res.text().await.expect(\"text\");\n    assert_eq!(body, content);\n}\n\nconst COMPRESSED_RESPONSE_HEADERS: &[u8] = b\"HTTP/1.1 200 OK\\x0d\\x0a\\\n            Content-Type: text/plain\\x0d\\x0a\\\n            Connection: keep-alive\\x0d\\x0a\\\n            Content-Encoding: br\\x0d\\x0a\";\n\nconst RESPONSE_CONTENT: &str = \"some message here\";\n\nfn brotli_compress(input: &[u8]) -> Vec<u8> {\n    let mut encoder = brotli::CompressorReader::new(input, 4096, 5, 20);\n    let mut brotlied_content = Vec::new();\n    encoder.read_to_end(&mut brotlied_content).unwrap();\n    brotlied_content\n}\n\n#[tokio::test]\nasync fn test_non_chunked_non_fragmented_response() {\n    let server = server::low_level_with_response(|_raw_request, client_socket| {\n        Box::new(async move {\n            let brotlied_content = brotli_compress(RESPONSE_CONTENT.as_bytes());\n            let content_length_header =\n                format!(\"Content-Length: {}\\r\\n\\r\\n\", brotlied_content.len()).into_bytes();\n            let response = [\n                COMPRESSED_RESPONSE_HEADERS,\n                &content_length_header,\n                &brotlied_content,\n            ]\n            .concat();\n\n            client_socket\n                .write_all(response.as_slice())\n                .await\n                .expect(\"response write_all failed\");\n            client_socket.flush().await.expect(\"response flush failed\");\n        })\n    });\n\n    let res = wreq::get(format!(\"http://{}/\", server.addr()))\n        .send()\n        .await\n        .expect(\"response\");\n\n    assert_eq!(res.text().await.expect(\"text\"), RESPONSE_CONTENT);\n}\n\n#[tokio::test]\nasync fn test_chunked_fragmented_response_1() {\n    const DELAY_BETWEEN_RESPONSE_PARTS: tokio::time::Duration =\n        tokio::time::Duration::from_millis(1000);\n    const DELAY_MARGIN: tokio::time::Duration = tokio::time::Duration::from_millis(50);\n\n    let server = server::low_level_with_response(|_raw_request, client_socket| {\n        Box::new(async move {\n            let brotlied_content = brotli_compress(RESPONSE_CONTENT.as_bytes());\n            let response_first_part = [\n                COMPRESSED_RESPONSE_HEADERS,\n                format!(\n                    \"Transfer-Encoding: chunked\\r\\n\\r\\n{:x}\\r\\n\",\n                    brotlied_content.len()\n                )\n                .as_bytes(),\n                &brotlied_content,\n            ]\n            .concat();\n            let response_second_part = b\"\\r\\n0\\r\\n\\r\\n\";\n\n            client_socket\n                .write_all(response_first_part.as_slice())\n                .await\n                .expect(\"response_first_part write_all failed\");\n            client_socket\n                .flush()\n                .await\n                .expect(\"response_first_part flush failed\");\n\n            tokio::time::sleep(DELAY_BETWEEN_RESPONSE_PARTS).await;\n\n            client_socket\n                .write_all(response_second_part)\n                .await\n                .expect(\"response_second_part write_all failed\");\n            client_socket\n                .flush()\n                .await\n                .expect(\"response_second_part flush failed\");\n        })\n    });\n\n    let start = tokio::time::Instant::now();\n    let res = wreq::get(format!(\"http://{}/\", server.addr()))\n        .send()\n        .await\n        .expect(\"response\");\n\n    assert_eq!(res.text().await.expect(\"text\"), RESPONSE_CONTENT);\n    assert!(start.elapsed() >= DELAY_BETWEEN_RESPONSE_PARTS - DELAY_MARGIN);\n}\n\n#[tokio::test]\nasync fn test_chunked_fragmented_response_2() {\n    const DELAY_BETWEEN_RESPONSE_PARTS: tokio::time::Duration =\n        tokio::time::Duration::from_millis(1000);\n    const DELAY_MARGIN: tokio::time::Duration = tokio::time::Duration::from_millis(50);\n\n    let server = server::low_level_with_response(|_raw_request, client_socket| {\n        Box::new(async move {\n            let brotlied_content = brotli_compress(RESPONSE_CONTENT.as_bytes());\n            let response_first_part = [\n                COMPRESSED_RESPONSE_HEADERS,\n                format!(\n                    \"Transfer-Encoding: chunked\\r\\n\\r\\n{:x}\\r\\n\",\n                    brotlied_content.len()\n                )\n                .as_bytes(),\n                &brotlied_content,\n                b\"\\r\\n\",\n            ]\n            .concat();\n            let response_second_part = b\"0\\r\\n\\r\\n\";\n\n            client_socket\n                .write_all(response_first_part.as_slice())\n                .await\n                .expect(\"response_first_part write_all failed\");\n            client_socket\n                .flush()\n                .await\n                .expect(\"response_first_part flush failed\");\n\n            tokio::time::sleep(DELAY_BETWEEN_RESPONSE_PARTS).await;\n\n            client_socket\n                .write_all(response_second_part)\n                .await\n                .expect(\"response_second_part write_all failed\");\n            client_socket\n                .flush()\n                .await\n                .expect(\"response_second_part flush failed\");\n        })\n    });\n\n    let start = tokio::time::Instant::now();\n    let res = wreq::get(format!(\"http://{}/\", server.addr()))\n        .send()\n        .await\n        .expect(\"response\");\n\n    assert_eq!(res.text().await.expect(\"text\"), RESPONSE_CONTENT);\n    assert!(start.elapsed() >= DELAY_BETWEEN_RESPONSE_PARTS - DELAY_MARGIN);\n}\n\n#[tokio::test]\nasync fn test_chunked_fragmented_response_with_extra_bytes() {\n    const DELAY_BETWEEN_RESPONSE_PARTS: tokio::time::Duration =\n        tokio::time::Duration::from_millis(1000);\n    const DELAY_MARGIN: tokio::time::Duration = tokio::time::Duration::from_millis(50);\n\n    let server = server::low_level_with_response(|_raw_request, client_socket| {\n        Box::new(async move {\n            let brotlied_content = brotli_compress(RESPONSE_CONTENT.as_bytes());\n            let response_first_part = [\n                COMPRESSED_RESPONSE_HEADERS,\n                format!(\n                    \"Transfer-Encoding: chunked\\r\\n\\r\\n{:x}\\r\\n\",\n                    brotlied_content.len()\n                )\n                .as_bytes(),\n                &brotlied_content,\n            ]\n            .concat();\n            let response_second_part = b\"\\r\\n2ab\\r\\n0\\r\\n\\r\\n\";\n\n            client_socket\n                .write_all(response_first_part.as_slice())\n                .await\n                .expect(\"response_first_part write_all failed\");\n            client_socket\n                .flush()\n                .await\n                .expect(\"response_first_part flush failed\");\n\n            tokio::time::sleep(DELAY_BETWEEN_RESPONSE_PARTS).await;\n\n            client_socket\n                .write_all(response_second_part)\n                .await\n                .expect(\"response_second_part write_all failed\");\n            client_socket\n                .flush()\n                .await\n                .expect(\"response_second_part flush failed\");\n        })\n    });\n\n    let start = tokio::time::Instant::now();\n    let res = Client::new()\n        .get(format!(\"http://{}/\", server.addr()))\n        .send()\n        .await\n        .expect(\"response\");\n\n    let err = res.text().await.expect_err(\"there must be an error\");\n    assert!(err.is_decode());\n    assert!(start.elapsed() >= DELAY_BETWEEN_RESPONSE_PARTS - DELAY_MARGIN);\n}\n"
  },
  {
    "path": "tests/client.rs",
    "content": "mod support;\n\n#[cfg(feature = \"json\")]\nuse std::collections::HashMap;\n\nuse bytes::Bytes;\nuse http::{\n    HeaderMap, HeaderValue, StatusCode, Version,\n    header::{\n        self, ACCEPT, AUTHORIZATION, CACHE_CONTROL, CONTENT_LENGTH, CONTENT_TYPE, COOKIE, REFERER,\n        TRANSFER_ENCODING, USER_AGENT,\n    },\n};\nuse http_body_util::{BodyExt, Full};\nuse pretty_env_logger::env_logger;\nuse support::server;\nuse tokio::io::AsyncWriteExt;\nuse wreq::{Client, header::OrigHeaderMap, tls::TlsInfo};\n\n#[tokio::test]\nasync fn auto_headers() {\n    let server = server::http(move |req| async move {\n        assert_eq!(req.method(), \"GET\");\n\n        assert_eq!(req.headers()[\"accept\"], \"*/*\");\n        assert_eq!(req.headers().get(\"user-agent\"), None);\n        if cfg!(feature = \"gzip\") {\n            assert!(\n                req.headers()[\"accept-encoding\"]\n                    .to_str()\n                    .unwrap()\n                    .contains(\"gzip\")\n            );\n        }\n        if cfg!(feature = \"brotli\") {\n            assert!(\n                req.headers()[\"accept-encoding\"]\n                    .to_str()\n                    .unwrap()\n                    .contains(\"br\")\n            );\n        }\n        if cfg!(feature = \"zstd\") {\n            assert!(\n                req.headers()[\"accept-encoding\"]\n                    .to_str()\n                    .unwrap()\n                    .contains(\"zstd\")\n            );\n        }\n        if cfg!(feature = \"deflate\") {\n            assert!(\n                req.headers()[\"accept-encoding\"]\n                    .to_str()\n                    .unwrap()\n                    .contains(\"deflate\")\n            );\n        }\n\n        http::Response::default()\n    });\n\n    let url = format!(\"http://{}/1\", server.addr());\n    let res = Client::builder()\n        .no_proxy()\n        .build()\n        .unwrap()\n        .get(&url)\n        .header(wreq::header::ACCEPT, \"*/*\")\n        .send()\n        .await\n        .unwrap();\n\n    assert_eq!(res.uri(), url.as_str());\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n    assert_eq!(res.remote_addr(), Some(server.addr()));\n}\n\n#[tokio::test]\nasync fn test_headers_order_with_client() {\n    use http::HeaderValue;\n    use wreq::{\n        Client,\n        header::{ACCEPT, CONTENT_TYPE, USER_AGENT},\n    };\n\n    let server = server::http(move |req| async move {\n        assert_eq!(req.method(), \"POST\");\n\n        let expected_headers = [\n            (\"cookie\", \"cookie1=cookie1-value\"),\n            (\"cookie\", \"cookie2=cookie2-value\"),\n            (\"user-agent\", \"my-test-client\"),\n            (\"accept\", \"*/*\"),\n            (\"content-type\", \"application/json\"),\n            (\"authorization\", \"Bearer test-token\"),\n            (\"referer\", \"https://example.com\"),\n            (\"cache-control\", \"no-cache\"),\n        ];\n\n        for (i, (expected_key, expected_value)) in expected_headers.iter().enumerate() {\n            let (key, value) = req.headers().iter().nth(i).unwrap();\n            assert_eq!(key.as_str(), *expected_key);\n            assert_eq!(value.as_bytes(), expected_value.as_bytes());\n        }\n\n        let full: Vec<u8> = req\n            .into_body()\n            .collect()\n            .await\n            .expect(\"must succeed\")\n            .to_bytes()\n            .to_vec();\n\n        assert_eq!(full, br#\"{\"message\":\"hello\"}\"#);\n\n        http::Response::default()\n    });\n\n    let url = format!(\"http://{}/test\", server.addr());\n\n    let client = Client::builder()\n        .no_proxy()\n        .default_headers({\n            let mut headers = HeaderMap::new();\n            headers.insert(ACCEPT, HeaderValue::from_static(\"*/*\"));\n            headers.insert(CONTENT_TYPE, HeaderValue::from_static(\"application/json\"));\n            headers.insert(USER_AGENT, HeaderValue::from_static(\"my-test-client\"));\n            headers.insert(AUTHORIZATION, HeaderValue::from_static(\"Bearer test-token\"));\n            headers.insert(REFERER, HeaderValue::from_static(\"https://example.com\"));\n            headers.append(\"cookie\", HeaderValue::from_static(\"cookie1=cookie1-value\"));\n            headers.append(\"cookie\", HeaderValue::from_static(\"cookie2=cookie2-value\"));\n            headers.insert(CACHE_CONTROL, HeaderValue::from_static(\"no-cache\"));\n            headers\n        })\n        .orig_headers({\n            let mut orig_headers = OrigHeaderMap::new();\n            orig_headers.insert(\"cookie\");\n            orig_headers.insert(\"user-agent\");\n            orig_headers.insert(\"accept\");\n            orig_headers.insert(\"content-type\");\n            orig_headers.insert(\"authorization\");\n            orig_headers.insert(\"referer\");\n            orig_headers.insert(\"cache-control\");\n            orig_headers\n        })\n        .build()\n        .unwrap();\n\n    let res = client\n        .post(&url)\n        .body(r#\"{\"message\":\"hello\"}\"#)\n        .send()\n        .await\n        .unwrap();\n\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n}\n\n#[tokio::test]\nasync fn test_headers_order_with_request() {\n    use http::HeaderValue;\n    use wreq::{\n        Client,\n        header::{ACCEPT, CONTENT_TYPE, USER_AGENT},\n    };\n\n    let server = server::http(move |req| async move {\n        assert_eq!(req.method(), \"POST\");\n\n        let expected_headers = [\n            (\"user-agent\", \"my-test-client\"),\n            (\"accept\", \"*/*\"),\n            (\"content-type\", \"application/json\"),\n            (\"authorization\", \"Bearer test-token\"),\n            (\"referer\", \"https://example.com\"),\n            (\"cookie\", \"cookie1=cookie1\"),\n            (\"cookie\", \"cookie2=cookie2\"),\n            (\"cache-control\", \"no-cache\"),\n        ];\n\n        for (i, (expected_key, expected_value)) in expected_headers.iter().enumerate() {\n            let (key, value) = req.headers().iter().nth(i).unwrap();\n            assert_eq!(key.as_str(), *expected_key);\n            assert_eq!(value.as_bytes(), expected_value.as_bytes());\n        }\n\n        let full: Vec<u8> = req\n            .into_body()\n            .collect()\n            .await\n            .expect(\"must succeed\")\n            .to_bytes()\n            .to_vec();\n\n        assert_eq!(full, br#\"{\"message\":\"hello\"}\"#);\n\n        http::Response::default()\n    });\n\n    let url = format!(\"http://{}/test\", server.addr());\n\n    let client = Client::builder().no_proxy().build().unwrap();\n\n    let res = client\n        .post(&url)\n        .headers({\n            let mut headers = HeaderMap::new();\n            headers.insert(ACCEPT, HeaderValue::from_static(\"*/*\"));\n            headers.insert(CONTENT_TYPE, HeaderValue::from_static(\"application/json\"));\n            headers.insert(USER_AGENT, HeaderValue::from_static(\"my-test-client\"));\n            headers.insert(AUTHORIZATION, HeaderValue::from_static(\"Bearer test-token\"));\n            headers.insert(REFERER, HeaderValue::from_static(\"https://example.com\"));\n            headers.append(\"cookie\", HeaderValue::from_static(\"cookie1=cookie1\"));\n            headers.append(\"cookie\", HeaderValue::from_static(\"cookie2=cookie2\"));\n            headers.insert(CACHE_CONTROL, HeaderValue::from_static(\"no-cache\"));\n            headers\n        })\n        .orig_headers({\n            let mut orig_headers = OrigHeaderMap::new();\n            orig_headers.insert(\"user-agent\");\n            orig_headers.insert(\"accept\");\n            orig_headers.insert(\"content-type\");\n            orig_headers.insert(\"authorization\");\n            orig_headers.insert(\"referer\");\n            orig_headers.insert(\"cookie\");\n            orig_headers.insert(\"cache-control\");\n            orig_headers\n        })\n        .body(r#\"{\"message\":\"hello\"}\"#)\n        .send()\n        .await\n        .unwrap();\n\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n}\n\n#[tokio::test]\nasync fn test_overwrite_headers() {\n    let server = server::http(move |req| async move {\n        let path = req.uri().path();\n        match path {\n            \"/1\" => {\n                assert_eq!(req.method(), \"GET\");\n                assert_eq!(req.headers()[USER_AGENT], \"my-custom-agent\");\n                let mut cookies = req.headers().get_all(COOKIE).iter();\n                assert_eq!(cookies.next().unwrap(), \"a=b\");\n                assert_eq!(cookies.next().unwrap(), \"c=d\");\n                assert_eq!(cookies.next(), None);\n            }\n            \"/2\" => {\n                assert_eq!(req.method(), \"GET\");\n                assert_eq!(req.headers()[USER_AGENT], \"my-custom-agent\");\n                let mut cookies = req.headers().get_all(COOKIE).iter();\n                assert_eq!(cookies.next().unwrap(), \"e=f\");\n                assert_eq!(cookies.next().unwrap(), \"g=h\");\n                assert_eq!(cookies.next(), None);\n            }\n            \"/3\" => {\n                assert_eq!(req.method(), \"GET\");\n                assert_eq!(req.headers()[USER_AGENT], \"default-agent\");\n                let mut cookies = req.headers().get_all(COOKIE).iter();\n                assert_eq!(cookies.next().unwrap(), \"a=b\");\n                assert_eq!(cookies.next().unwrap(), \"c=d\");\n                assert_eq!(cookies.next(), None);\n            }\n            \"/4\" => {\n                assert_eq!(req.method(), \"GET\");\n                assert_eq!(req.headers()[USER_AGENT], \"default-agent\");\n                let mut cookies = req.headers().get_all(COOKIE).iter();\n                assert_eq!(cookies.next().unwrap(), \"e=f\");\n                assert_eq!(cookies.next().unwrap(), \"g=h\");\n                assert_eq!(cookies.next(), None);\n            }\n            _ => {\n                unreachable!(\"Unexpected request path: {}\", path);\n            }\n        }\n\n        http::Response::default()\n    });\n\n    let mut default_headers = header::HeaderMap::new();\n    default_headers.insert(\n        USER_AGENT,\n        header::HeaderValue::from_static(\"default-agent\"),\n    );\n    default_headers.insert(COOKIE, header::HeaderValue::from_static(\"a=b\"));\n    default_headers.append(COOKIE, header::HeaderValue::from_static(\"c=d\"));\n\n    let client = Client::builder()\n        .no_proxy()\n        .default_headers(default_headers)\n        .build()\n        .unwrap();\n\n    let url = format!(\"http://{}/1\", server.addr());\n    let res = client\n        .get(&url)\n        .header(USER_AGENT, \"my-custom-agent\")\n        .send()\n        .await\n        .unwrap();\n    assert_eq!(res.uri(), url.as_str());\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n\n    let url = format!(\"http://{}/2\", server.addr());\n    let res = client\n        .get(&url)\n        .header(USER_AGENT, \"my-custom-agent\")\n        .header(COOKIE, \"e=f\")\n        .header(COOKIE, \"g=h\")\n        .send()\n        .await\n        .unwrap();\n    assert_eq!(res.uri(), url.as_str());\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n\n    let url = format!(\"http://{}/3\", server.addr());\n    let res = client.get(&url).send().await.unwrap();\n    assert_eq!(res.uri(), url.as_str());\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n\n    let url = format!(\"http://{}/4\", server.addr());\n    let res = client\n        .get(&url)\n        .header(COOKIE, \"e=f\")\n        .header(COOKIE, \"g=h\")\n        .send()\n        .await\n        .unwrap();\n    assert_eq!(res.uri(), url.as_str());\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n}\n\n#[tokio::test]\nasync fn donot_set_content_length_0_if_have_no_body() {\n    let server = server::http(move |req| async move {\n        let headers = req.headers();\n        assert_eq!(headers.get(CONTENT_LENGTH), None);\n        assert!(headers.get(CONTENT_TYPE).is_none());\n        assert!(headers.get(TRANSFER_ENCODING).is_none());\n        http::Response::default()\n    });\n\n    let url = format!(\"http://{}/content-length\", server.addr());\n    let res = Client::builder()\n        .no_proxy()\n        .build()\n        .expect(\"client builder\")\n        .get(&url)\n        .send()\n        .await\n        .expect(\"request\");\n\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n}\n\n#[tokio::test]\nasync fn user_agent() {\n    let server = server::http(move |req| async move {\n        assert_eq!(req.headers()[\"user-agent\"], \"wreq-test-agent\");\n        http::Response::default()\n    });\n\n    let url = format!(\"http://{}/ua\", server.addr());\n    let res = Client::builder()\n        .user_agent(\"wreq-test-agent\")\n        .build()\n        .expect(\"client builder\")\n        .get(&url)\n        .send()\n        .await\n        .expect(\"request\");\n\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n}\n\n#[tokio::test]\nasync fn response_text() {\n    let _ = env_logger::try_init();\n\n    let server = server::http(move |_req| async { http::Response::new(\"Hello\".into()) });\n\n    let client = Client::new();\n\n    let res = client\n        .get(format!(\"http://{}/text\", server.addr()))\n        .send()\n        .await\n        .expect(\"Failed to get\");\n    assert_eq!(res.content_length(), Some(5));\n    let text = res.text().await.expect(\"Failed to get text\");\n    assert_eq!(\"Hello\", text);\n}\n\n#[tokio::test]\nasync fn response_bytes() {\n    let _ = env_logger::try_init();\n\n    let server = server::http(move |_req| async { http::Response::new(\"Hello\".into()) });\n\n    let client = Client::new();\n\n    let res = client\n        .get(format!(\"http://{}/bytes\", server.addr()))\n        .send()\n        .await\n        .expect(\"Failed to get\");\n    assert_eq!(res.content_length(), Some(5));\n    let bytes = res.bytes().await.expect(\"res.bytes()\");\n    assert_eq!(\"Hello\", bytes);\n}\n\n#[tokio::test]\n#[cfg(feature = \"json\")]\nasync fn response_json() {\n    let _ = env_logger::try_init();\n\n    let server = server::http(move |_req| async { http::Response::new(\"\\\"Hello\\\"\".into()) });\n\n    let client = Client::new();\n\n    let res = client\n        .get(format!(\"http://{}/json\", server.addr()))\n        .send()\n        .await\n        .expect(\"Failed to get\");\n    let text = res.json::<String>().await.expect(\"Failed to get json\");\n    assert_eq!(\"Hello\", text);\n}\n\n#[tokio::test]\nasync fn body_pipe_response() {\n    let _ = env_logger::try_init();\n\n    let server = server::http(move |req| async move {\n        if req.uri() == \"/get\" {\n            http::Response::new(\"pipe me\".into())\n        } else {\n            assert_eq!(req.uri(), \"/pipe\");\n            assert_eq!(req.headers()[\"content-length\"], \"7\");\n\n            let full: Vec<u8> = req\n                .into_body()\n                .collect()\n                .await\n                .expect(\"must succeed\")\n                .to_bytes()\n                .to_vec();\n\n            assert_eq!(full, b\"pipe me\");\n\n            http::Response::default()\n        }\n    });\n\n    let client = Client::new();\n\n    let res1 = client\n        .get(format!(\"http://{}/get\", server.addr()))\n        .send()\n        .await\n        .expect(\"get1\");\n\n    assert_eq!(res1.status(), wreq::StatusCode::OK);\n    assert_eq!(res1.content_length(), Some(7));\n\n    // and now ensure we can \"pipe\" the response to another request\n    let res2 = client\n        .post(format!(\"http://{}/pipe\", server.addr()))\n        .body(res1)\n        .send()\n        .await\n        .expect(\"res2\");\n\n    assert_eq!(res2.status(), wreq::StatusCode::OK);\n}\n\n#[tokio::test]\nasync fn overridden_dns_resolution_with_gai() {\n    let _ = env_logger::builder().is_test(true).try_init();\n    let server = server::http(move |_req| async { http::Response::new(\"Hello\".into()) });\n\n    let overridden_domain = \"rust-lang.org\";\n    let url = format!(\n        \"http://{overridden_domain}:{}/domain_override\",\n        server.addr().port()\n    );\n    let client = Client::builder()\n        .no_proxy()\n        .resolve(overridden_domain, server.addr())\n        .build()\n        .expect(\"client builder\");\n    let req = client.get(&url);\n    let res = req.send().await.expect(\"request\");\n\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n    let text = res.text().await.expect(\"Failed to get text\");\n    assert_eq!(\"Hello\", text);\n}\n\n#[tokio::test]\nasync fn overridden_dns_resolution_with_gai_multiple() {\n    let _ = env_logger::builder().is_test(true).try_init();\n    let server = server::http(move |_req| async { http::Response::new(\"Hello\".into()) });\n\n    let overridden_domain = \"rust-lang.org\";\n    let url = format!(\n        \"http://{overridden_domain}:{}/domain_override\",\n        server.addr().port()\n    );\n    // the server runs on IPv4 localhost, so provide both IPv4 and IPv6 and let the happy eyeballs\n    // algorithm decide which address to use.\n    let client = Client::builder()\n        .no_proxy()\n        .resolve_to_addrs(\n            overridden_domain,\n            [\n                std::net::SocketAddr::new(\n                    std::net::IpAddr::V6(std::net::Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)),\n                    server.addr().port(),\n                ),\n                server.addr(),\n            ],\n        )\n        .build()\n        .expect(\"client builder\");\n    let req = client.get(&url);\n    let res = req.send().await.expect(\"request\");\n\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n    let text = res.text().await.expect(\"Failed to get text\");\n    assert_eq!(\"Hello\", text);\n}\n\n#[cfg(feature = \"hickory-dns\")]\n#[tokio::test]\nasync fn overridden_dns_resolution_with_hickory_dns() {\n    let _ = env_logger::builder().is_test(true).try_init();\n    let server = server::http(move |_req| async { http::Response::new(\"Hello\".into()) });\n\n    let overridden_domain = \"rust-lang.org\";\n    let url = format!(\n        \"http://{overridden_domain}:{}/domain_override\",\n        server.addr().port()\n    );\n    let client = Client::builder()\n        .no_proxy()\n        .resolve(overridden_domain, server.addr())\n        .build()\n        .expect(\"client builder\");\n    let req = client.get(&url);\n    let res = req.send().await.expect(\"request\");\n\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n    let text = res.text().await.expect(\"Failed to get text\");\n    assert_eq!(\"Hello\", text);\n}\n\n#[cfg(feature = \"hickory-dns\")]\n#[tokio::test]\nasync fn overridden_dns_resolution_with_hickory_dns_multiple() {\n    let _ = env_logger::builder().is_test(true).try_init();\n    let server = server::http(move |_req| async { http::Response::new(\"Hello\".into()) });\n\n    let overridden_domain = \"rust-lang.org\";\n    let url = format!(\n        \"http://{overridden_domain}:{}/domain_override\",\n        server.addr().port()\n    );\n    // the server runs on IPv4 localhost, so provide both IPv4 and IPv6 and let the happy eyeballs\n    // algorithm decide which address to use.\n    let client = Client::builder()\n        .no_proxy()\n        .resolve_to_addrs(\n            overridden_domain,\n            [\n                std::net::SocketAddr::new(\n                    std::net::IpAddr::V6(std::net::Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)),\n                    server.addr().port(),\n                ),\n                server.addr(),\n            ],\n        )\n        .build()\n        .expect(\"client builder\");\n    let req = client.get(&url);\n    let res = req.send().await.expect(\"request\");\n\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n    let text = res.text().await.expect(\"Failed to get text\");\n    assert_eq!(\"Hello\", text);\n}\n\n#[test]\n#[cfg(feature = \"json\")]\nfn add_json_default_content_type_if_not_set_manually() {\n    let mut map = HashMap::new();\n    map.insert(\"body\", \"json\");\n    let content_type = http::HeaderValue::from_static(\"application/vnd.api+json\");\n    let req = Client::new()\n        .post(\"https://google.com/\")\n        .header(CONTENT_TYPE, &content_type)\n        .json(&map)\n        .build()\n        .expect(\"request is not valid\");\n\n    assert_eq!(content_type, req.headers().get(CONTENT_TYPE).unwrap());\n}\n\n#[test]\n#[cfg(feature = \"json\")]\nfn update_json_content_type_if_set_manually() {\n    let mut map = HashMap::new();\n    map.insert(\"body\", \"json\");\n    let req = Client::new()\n        .post(\"https://google.com/\")\n        .json(&map)\n        .build()\n        .expect(\"request is not valid\");\n\n    assert_eq!(\"application/json\", req.headers().get(CONTENT_TYPE).unwrap());\n}\n\n#[tokio::test]\nasync fn test_tls_info() {\n    let resp = Client::builder()\n        .tls_info(true)\n        .build()\n        .expect(\"client builder\")\n        .get(\"https://google.com\")\n        .send()\n        .await\n        .expect(\"response\");\n    let tls_info = resp.extensions().get::<TlsInfo>().unwrap();\n    let peer_certificate = tls_info.peer_certificate();\n    assert!(peer_certificate.is_some());\n    let der = peer_certificate.unwrap();\n    assert_eq!(der[0], 0x30); // ASN.1 SEQUENCE\n\n    let resp = Client::builder()\n        .build()\n        .expect(\"client builder\")\n        .get(\"https://google.com\")\n        .send()\n        .await\n        .expect(\"response\");\n    let tls_info = resp.extensions().get::<TlsInfo>();\n    assert!(tls_info.is_none());\n}\n\n#[tokio::test]\nasync fn close_connection_after_idle_timeout() {\n    let mut server = server::http(move |_| async move { http::Response::default() });\n\n    let client = Client::builder()\n        .pool_idle_timeout(std::time::Duration::from_secs(1))\n        .build()\n        .unwrap();\n\n    let url = format!(\"http://{}\", server.addr());\n\n    client.get(&url).send().await.unwrap();\n\n    tokio::time::sleep(std::time::Duration::from_secs(2)).await;\n\n    assert!(\n        server\n            .events()\n            .iter()\n            .any(|e| matches!(e, server::Event::ConnectionClosed))\n    );\n}\n\n#[tokio::test]\nasync fn http1_reason_phrase() {\n    let server = server::low_level_with_response(|_raw_request, client_socket| {\n        Box::new(async move {\n            client_socket\n                .write_all(b\"HTTP/1.1 418 I'm not a teapot\\r\\nContent-Length: 0\\r\\n\\r\\n\")\n                .await\n                .expect(\"response write_all failed\");\n        })\n    });\n\n    let client = Client::new();\n\n    let res = client\n        .get(format!(\"http://{}\", server.addr()))\n        .send()\n        .await\n        .expect(\"Failed to get\");\n\n    assert_eq!(\n        res.error_for_status().unwrap_err().to_string(),\n        format!(\n            \"HTTP status client error (418 I'm not a teapot) for uri (http://{}/)\",\n            server.addr()\n        )\n    );\n}\n\n#[tokio::test]\nasync fn error_has_url() {\n    let u = \"http://does.not.exist.local/ever\";\n    let err = wreq::get(u).send().await.unwrap_err();\n    assert_eq!(\n        err.uri().map(ToString::to_string).as_deref(),\n        Some(u),\n        \"{err:?}\"\n    );\n}\n\n#[tokio::test]\nasync fn http1_only() {\n    let server = server::http(move |_| async move { http::Response::default() });\n\n    let resp = Client::builder()\n        .http1_only()\n        .build()\n        .unwrap()\n        .get(format!(\"http://{}\", server.addr()))\n        .send()\n        .await\n        .unwrap();\n\n    assert_eq!(resp.version(), wreq::Version::HTTP_11);\n\n    let resp = wreq::get(format!(\"http://{}\", server.addr()))\n        .version(Version::HTTP_11)\n        .send()\n        .await\n        .unwrap();\n\n    assert_eq!(resp.version(), wreq::Version::HTTP_11);\n}\n\n#[tokio::test]\nasync fn http2_only() {\n    let server = server::http(move |_| async move { http::Response::default() });\n\n    let resp = Client::builder()\n        .http2_only()\n        .build()\n        .unwrap()\n        .get(format!(\"http://{}\", server.addr()))\n        .send()\n        .await\n        .unwrap();\n\n    assert_eq!(resp.version(), wreq::Version::HTTP_2);\n\n    let resp = wreq::get(format!(\"http://{}\", server.addr()))\n        .version(Version::HTTP_2)\n        .send()\n        .await\n        .unwrap();\n\n    assert_eq!(resp.version(), wreq::Version::HTTP_2);\n}\n\n#[tokio::test]\nasync fn connection_pool_cache() {\n    let client = Client::default();\n    let url = \"https://hyper.rs\";\n\n    let resp = client\n        .get(url)\n        .version(http::Version::HTTP_2)\n        .send()\n        .await\n        .unwrap();\n\n    assert_eq!(resp.status(), wreq::StatusCode::OK);\n    assert_eq!(resp.version(), http::Version::HTTP_2);\n\n    let resp = client\n        .get(url)\n        .version(http::Version::HTTP_11)\n        .send()\n        .await\n        .unwrap();\n\n    assert_eq!(resp.status(), wreq::StatusCode::OK);\n    assert_eq!(resp.version(), http::Version::HTTP_11);\n\n    let resp = client\n        .get(url)\n        .version(http::Version::HTTP_2)\n        .send()\n        .await\n        .unwrap();\n\n    assert_eq!(resp.status(), wreq::StatusCode::OK);\n    assert_eq!(resp.version(), http::Version::HTTP_2);\n}\n\n#[tokio::test]\nasync fn http1_send_case_sensitive_headers() {\n    // Create a request with a case-sensitive header\n    let mut orig_headers = OrigHeaderMap::new();\n    orig_headers.insert(\"X-custom-header\");\n    orig_headers.insert(\"Host\");\n\n    let resp = wreq::get(\"https://tls.browserleaks.com\")\n        .header(\"X-Custom-Header\", \"value\")\n        .orig_headers(orig_headers)\n        .version(Version::HTTP_11)\n        .send()\n        .await\n        .unwrap()\n        .text()\n        .await\n        .unwrap();\n\n    assert!(resp.contains(\"X-custom-header\"));\n    assert!(resp.contains(\"Host\"));\n}\n\n#[tokio::test]\nasync fn tunnel_includes_proxy_auth_with_multiple_proxies() {\n    let url = \"http://hyper.rs.local/prox\";\n    let server1 = server::http(move |req| {\n        assert_eq!(req.method(), \"GET\");\n        assert_eq!(req.uri(), url);\n        assert_eq!(req.headers()[\"host\"], \"hyper.rs.local\");\n        assert_eq!(\n            req.headers()[\"proxy-authorization\"],\n            \"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==\"\n        );\n        assert_eq!(req.headers()[\"proxy-header\"], \"proxy2\");\n        async { http::Response::default() }\n    });\n\n    let proxy_url = format!(\"http://Aladdin:open%20sesame@{}\", server1.addr());\n\n    let mut headers1 = wreq::header::HeaderMap::new();\n    headers1.insert(\"proxy-header\", \"proxy1\".parse().unwrap());\n\n    let mut headers2 = wreq::header::HeaderMap::new();\n    headers2.insert(\"proxy-header\", \"proxy2\".parse().unwrap());\n\n    let client = Client::builder()\n        // When processing proxy headers, the first one is iterated,\n        // and if the current URL does not match, the proxy is skipped\n        .proxy(\n            wreq::Proxy::https(&proxy_url)\n                .unwrap()\n                .custom_http_headers(headers1.clone()),\n        )\n        // When processing proxy headers, the second one is iterated,\n        // and for the current URL matching, the proxy will be used\n        .proxy(\n            wreq::Proxy::http(&proxy_url)\n                .unwrap()\n                .custom_http_headers(headers2.clone()),\n        )\n        .build()\n        .unwrap();\n\n    let res = client.get(url).send().await.unwrap();\n\n    assert_eq!(res.uri(), url);\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n\n    let client = Client::builder()\n        // When processing proxy headers, the first one is iterated,\n        // and for the current URL matching, the proxy will be used\n        .proxy(\n            wreq::Proxy::http(&proxy_url)\n                .unwrap()\n                .custom_http_headers(headers2),\n        )\n        // When processing proxy headers, the second one is iterated,\n        // and if the current URL does not match, the proxy is skipped\n        .proxy(\n            wreq::Proxy::https(&proxy_url)\n                .unwrap()\n                .custom_http_headers(headers1),\n        )\n        .build()\n        .unwrap();\n\n    let res = client.get(url).send().await.unwrap();\n\n    assert_eq!(res.uri(), url);\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n}\n\n#[tokio::test]\nasync fn skip_default_headers() {\n    let server = server::http(move |req| async move {\n        let path = req.uri().path();\n        match path {\n            \"/skip\" => {\n                assert_eq!(req.method(), \"GET\");\n                assert_eq!(req.headers().get(USER_AGENT), None);\n                assert_eq!(req.headers().get(ACCEPT), None);\n            }\n            \"/no_skip\" => {\n                assert_eq!(req.method(), \"GET\");\n                assert_eq!(req.headers()[USER_AGENT], \"test-agent\");\n                assert_eq!(req.headers()[ACCEPT], \"*/*\");\n            }\n            _ => unreachable!(\"Unexpected request path: {path}\"),\n        }\n\n        http::Response::default()\n    });\n\n    let client = Client::builder()\n        .default_headers({\n            let mut headers = wreq::header::HeaderMap::new();\n            headers.insert(USER_AGENT, \"test-agent\".parse().unwrap());\n            headers.insert(ACCEPT, \"*/*\".parse().unwrap());\n            headers\n        })\n        .no_proxy()\n        .build()\n        .unwrap();\n\n    let url = format!(\"http://{}/skip\", server.addr());\n    let res = client\n        .get(&url)\n        .default_headers(false)\n        .send()\n        .await\n        .unwrap();\n    assert_eq!(res.uri(), url.as_str());\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n\n    let url = format!(\"http://{}/no_skip\", server.addr());\n    let res = client.get(&url).send().await.unwrap();\n    assert_eq!(res.uri(), url.as_str());\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n}\n\n#[tokio::test]\nasync fn test_client_same_header_values_append() {\n    let server = server::http(move |req| async move {\n        let path = req.uri().path();\n        match path {\n            \"/duplicate-cookies\" => {\n                let cookie_values: Vec<_> = req.headers().get_all(header::COOKIE).iter().collect();\n                assert_eq!(cookie_values.len(), 1);\n                assert_eq!(cookie_values[0], \"duplicate=same_value\");\n            }\n            \"/no-duplicate-cookies\" => {\n                let cookie_values: Vec<_> = req.headers().get_all(header::COOKIE).iter().collect();\n                assert_eq!(cookie_values.len(), 3);\n                assert_eq!(cookie_values[0], \"duplicate=same_value\");\n                assert_eq!(cookie_values[1], \"unique1=value1\");\n                assert_eq!(cookie_values[2], \"unique2=value2\");\n            }\n            _ => unreachable!(\"Unexpected request path: {}\", path),\n        }\n\n        http::Response::default()\n    });\n\n    let client = Client::builder()\n        .no_proxy()\n        .default_headers({\n            let mut headers = HeaderMap::new();\n            headers.insert(\n                header::COOKIE,\n                HeaderValue::from_static(\"duplicate=same_value\"),\n            );\n            headers.append(header::COOKIE, HeaderValue::from_static(\"unique1=value1\"));\n            headers.append(header::COOKIE, HeaderValue::from_static(\"unique2=value2\"));\n            headers\n        })\n        .build()\n        .unwrap();\n\n    let res = client\n        .get(format!(\"http://{}/duplicate-cookies\", server.addr()))\n        .header(header::COOKIE, \"duplicate=same_value\")\n        .send()\n        .await\n        .unwrap();\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n\n    let res = client\n        .get(format!(\"http://{}/no-duplicate-cookies\", server.addr()))\n        .send()\n        .await\n        .unwrap();\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n}\n\n#[cfg(all(\n    feature = \"gzip\",\n    feature = \"brotli\",\n    feature = \"deflate\",\n    feature = \"zstd\"\n))]\n#[tokio::test]\nasync fn test_client_default_accept_encoding() {\n    let server = server::http(move |req| async move {\n        let accept_encoding = req.headers().get(header::ACCEPT_ENCODING).unwrap();\n        if req.uri() == \"/default\" {\n            assert_eq!(accept_encoding, \"zstd\");\n        }\n\n        if req.uri() == \"/custom\" {\n            assert_eq!(accept_encoding, \"gzip\");\n        }\n\n        http::Response::default()\n    });\n\n    let client = Client::builder()\n        .default_headers({\n            let mut headers = HeaderMap::new();\n            headers.insert(header::ACCEPT_ENCODING, HeaderValue::from_static(\"zstd\"));\n            headers\n        })\n        .no_proxy()\n        .build()\n        .unwrap();\n\n    let _ = client\n        .get(format!(\"http://{}/default\", server.addr()))\n        .send()\n        .await\n        .unwrap();\n\n    let _ = client\n        .get(format!(\"http://{}/custom\", server.addr()))\n        .header(header::ACCEPT_ENCODING, \"gzip\")\n        .send()\n        .await\n        .unwrap();\n}\n\n#[tokio::test]\nasync fn response_trailers() {\n    let server = server::http(move |req| async move {\n        assert_eq!(req.uri().path(), \"/trailers\");\n\n        let body = Full::new(Bytes::from(\"HelloWorld!\")).with_trailers(async move {\n            let mut trailers = http::HeaderMap::new();\n            trailers.insert(\"chunky-trailer1\", HeaderValue::from_static(\"value1\"));\n            trailers.insert(\"chunky-trailer2\", HeaderValue::from_static(\"value2\"));\n            Some(Ok(trailers))\n        });\n        let mut resp = http::Response::new(wreq::Body::wrap(body));\n        resp.headers_mut().insert(\n            header::TRAILER,\n            header::HeaderValue::from_static(\"chunky-trailer1, chunky-trailer2\"),\n        );\n        resp.headers_mut().insert(\n            header::TRANSFER_ENCODING,\n            header::HeaderValue::from_static(\"chunked\"),\n        );\n\n        resp\n    });\n\n    let mut res = wreq::get(format!(\"http://{}/trailers\", server.addr()))\n        .header(header::TE, \"trailers\")\n        .send()\n        .await\n        .expect(\"Failed to get response\");\n\n    assert_eq!(res.status(), StatusCode::OK);\n\n    let mut body_content = Vec::new();\n    let mut trailers = HeaderMap::default();\n    while let Some(chunk) = res.frame().await {\n        match chunk\n            .unwrap()\n            .into_data()\n            .map_err(|frame| frame.into_trailers())\n        {\n            Ok(res) => {\n                body_content.extend_from_slice(&res);\n            }\n            Err(Ok(res)) => {\n                trailers.extend(res);\n            }\n            _ => (),\n        }\n    }\n\n    let body = String::from_utf8(body_content).expect(\"Invalid UTF-8\");\n    assert_eq!(body, \"HelloWorld!\");\n    assert_eq!(trailers[\"chunky-trailer1\"], \"value1\");\n    assert_eq!(trailers[\"chunky-trailer2\"], \"value2\");\n}\n"
  },
  {
    "path": "tests/connector_layers.rs",
    "content": "mod support;\n\nuse std::time::Duration;\n\nuse futures_util::future::join_all;\nuse pretty_env_logger::env_logger;\nuse support::{layer::DelayLayer, server};\nuse tower::{layer::util::Identity, limit::ConcurrencyLimitLayer, timeout::TimeoutLayer};\nuse wreq::Client;\n\n#[tokio::test]\nasync fn non_op_layer() {\n    let _ = env_logger::try_init();\n\n    let server = server::http(move |_req| async { http::Response::default() });\n\n    let url = format!(\"http://{}\", server.addr());\n\n    let client = Client::builder()\n        .connector_layer(Identity::new())\n        .no_proxy()\n        .build()\n        .unwrap();\n\n    let res = client.get(url).send().await;\n\n    assert!(res.is_ok());\n}\n\n#[tokio::test]\nasync fn non_op_layer_with_timeout() {\n    let _ = env_logger::try_init();\n\n    let client = Client::builder()\n        .connector_layer(Identity::new())\n        .connect_timeout(Duration::from_millis(200))\n        .no_proxy()\n        .build()\n        .unwrap();\n\n    // never returns\n    let url = \"http://192.0.2.1:81/slow\";\n\n    let res = client.get(url).send().await;\n\n    let err = res.unwrap_err();\n\n    assert!(err.is_connect() && err.is_timeout());\n}\n\n#[tokio::test]\nasync fn with_connect_timeout_layer_never_returning() {\n    let _ = env_logger::try_init();\n\n    let client = Client::builder()\n        .connector_layer(TimeoutLayer::new(Duration::from_millis(100)))\n        .no_proxy()\n        .build()\n        .unwrap();\n\n    // never returns\n    let url = \"http://192.0.2.1:81/slow\";\n\n    let res = client.get(url).send().await;\n\n    let err = res.unwrap_err();\n\n    assert!(err.is_connect() && err.is_timeout());\n}\n\n#[tokio::test]\nasync fn with_connect_timeout_layer_slow() {\n    let _ = env_logger::try_init();\n\n    let server = server::http(move |_req| async { http::Response::default() });\n\n    let url = format!(\"http://{}\", server.addr());\n\n    let client = Client::builder()\n        .connector_layer(DelayLayer::new(Duration::from_millis(200)))\n        .connector_layer(TimeoutLayer::new(Duration::from_millis(100)))\n        .no_proxy()\n        .build()\n        .unwrap();\n\n    let res = client.get(url).send().await;\n\n    let err = res.unwrap_err();\n\n    assert!(err.is_connect() && err.is_timeout());\n}\n\n#[tokio::test]\nasync fn multiple_timeout_layers_under_threshold() {\n    let _ = env_logger::try_init();\n\n    let server = server::http(move |_req| async { http::Response::default() });\n\n    let url = format!(\"http://{}\", server.addr());\n\n    let client = Client::builder()\n        .connector_layer(DelayLayer::new(Duration::from_millis(100)))\n        .connector_layer(TimeoutLayer::new(Duration::from_millis(200)))\n        .connector_layer(TimeoutLayer::new(Duration::from_millis(300)))\n        .connector_layer(TimeoutLayer::new(Duration::from_millis(500)))\n        .connect_timeout(Duration::from_millis(200))\n        .no_proxy()\n        .build()\n        .unwrap();\n\n    let res = client.get(url).send().await;\n\n    assert!(res.is_ok());\n}\n\n#[tokio::test]\nasync fn multiple_timeout_layers_over_threshold() {\n    let _ = env_logger::try_init();\n\n    let server = server::http(move |_req| async { http::Response::default() });\n\n    let url = format!(\"http://{}\", server.addr());\n\n    let client = Client::builder()\n        .connector_layer(DelayLayer::new(Duration::from_millis(100)))\n        .connector_layer(TimeoutLayer::new(Duration::from_millis(50)))\n        .connector_layer(TimeoutLayer::new(Duration::from_millis(50)))\n        .connector_layer(TimeoutLayer::new(Duration::from_millis(50)))\n        .connect_timeout(Duration::from_millis(50))\n        .no_proxy()\n        .build()\n        .unwrap();\n\n    let res = client.get(url).send().await;\n\n    let err = res.unwrap_err();\n\n    assert!(err.is_connect() && err.is_timeout());\n}\n\n#[tokio::test]\nasync fn with_concurrency_limit_layer_timeout() {\n    let _ = env_logger::try_init();\n\n    let server = server::http(move |_req| async { http::Response::default() });\n\n    let url = format!(\"http://{}\", server.addr());\n\n    let client = Client::builder()\n        .connector_layer(DelayLayer::new(Duration::from_millis(100)))\n        .connector_layer(ConcurrencyLimitLayer::new(1))\n        .timeout(Duration::from_millis(200))\n        .pool_max_idle_per_host(0) // disable connection reuse to force resource contention on the concurrency limit semaphore\n        .no_proxy()\n        .build()\n        .unwrap();\n\n    // first call succeeds since no resource contention\n    let res = client.get(url.clone()).send().await;\n    assert!(res.is_ok());\n\n    // 3 calls where the second two wait on the first and time out\n    let mut futures = Vec::new();\n    for _ in 0..3 {\n        futures.push(client.clone().get(url.clone()).send());\n    }\n\n    let all_res = join_all(futures).await;\n\n    let timed_out = all_res\n        .into_iter()\n        .any(|res| res.is_err_and(|err| err.is_timeout()));\n\n    assert!(timed_out, \"at least one request should have timed out\");\n}\n\n#[tokio::test]\nasync fn with_concurrency_limit_layer_success() {\n    let _ = env_logger::try_init();\n\n    let server = server::http(move |_req| async { http::Response::default() });\n\n    let url = format!(\"http://{}\", server.addr());\n\n    let client = Client::builder()\n        .connector_layer(DelayLayer::new(Duration::from_millis(100)))\n        .connector_layer(TimeoutLayer::new(Duration::from_millis(200)))\n        .connector_layer(ConcurrencyLimitLayer::new(1))\n        .timeout(Duration::from_millis(1000))\n        .pool_max_idle_per_host(0) // disable connection reuse to force resource contention on the concurrency limit semaphore\n        .no_proxy()\n        .build()\n        .unwrap();\n\n    // first call succeeds since no resource contention\n    let res = client.get(url.clone()).send().await;\n    assert!(res.is_ok());\n\n    // 3 calls of which all are individually below the inner timeout\n    // and the sum is below outer timeout which affects the final call which waited the whole time\n    let mut futures = Vec::new();\n    for _ in 0..3 {\n        futures.push(client.clone().get(url.clone()).send());\n    }\n\n    let all_res = join_all(futures).await;\n\n    for res in all_res.into_iter() {\n        assert!(\n            res.is_ok(),\n            \"neither outer long timeout or inner short timeout should be exceeded\"\n        );\n    }\n}\n\n#[tokio::test]\nasync fn no_generic_bounds_required_for_client_new() {\n    let _ = env_logger::try_init();\n\n    let server = server::http(move |_req| async { http::Response::default() });\n\n    let url = format!(\"http://{}\", server.addr());\n\n    let res = wreq::get(url).send().await;\n\n    assert!(res.is_ok());\n}\n"
  },
  {
    "path": "tests/cookie.rs",
    "content": "mod support;\n\nuse http::{Version, header::COOKIE};\nuse support::server;\nuse wreq::{Client, cookie::Jar};\n\n#[tokio::test]\nasync fn cookie_response_accessor() {\n    let server = server::http(move |_req| async move {\n        http::Response::builder()\n            .header(\"Set-Cookie\", \"key=val\")\n            .header(\n                \"Set-Cookie\",\n                \"expires=1; Expires=Wed, 21 Oct 2015 07:28:00 GMT\",\n            )\n            .header(\"Set-Cookie\", \"path=1; Path=/the-path\")\n            .header(\"Set-Cookie\", \"maxage=1; Max-Age=100\")\n            .header(\"Set-Cookie\", \"domain=1; Domain=mydomain\")\n            .header(\"Set-Cookie\", \"secure=1; Secure\")\n            .header(\"Set-Cookie\", \"httponly=1; HttpOnly\")\n            .header(\"Set-Cookie\", \"samesitelax=1; SameSite=Lax\")\n            .header(\"Set-Cookie\", \"samesitestrict=1; SameSite=Strict\")\n            .body(Default::default())\n            .unwrap()\n    });\n\n    let url = format!(\"http://{}/\", server.addr());\n    let res = wreq::get(&url).send().await.unwrap();\n\n    let cookies = res.cookies().collect::<Vec<_>>();\n\n    // key=val\n    assert_eq!(cookies[0].name(), \"key\");\n    assert_eq!(cookies[0].value(), \"val\");\n\n    // expires\n    assert_eq!(cookies[1].name(), \"expires\");\n    assert_eq!(\n        cookies[1].expires().unwrap(),\n        std::time::SystemTime::UNIX_EPOCH + std::time::Duration::from_secs(1_445_412_480)\n    );\n\n    // path\n    assert_eq!(cookies[2].name(), \"path\");\n    assert_eq!(cookies[2].path().unwrap(), \"/the-path\");\n\n    // max-age\n    assert_eq!(cookies[3].name(), \"maxage\");\n    assert_eq!(\n        cookies[3].max_age().unwrap(),\n        std::time::Duration::from_secs(100)\n    );\n\n    // domain\n    assert_eq!(cookies[4].name(), \"domain\");\n    assert_eq!(cookies[4].domain().unwrap(), \"mydomain\");\n\n    // secure\n    assert_eq!(cookies[5].name(), \"secure\");\n    assert!(cookies[5].secure());\n\n    // httponly\n    assert_eq!(cookies[6].name(), \"httponly\");\n    assert!(cookies[6].http_only());\n\n    // samesitelax\n    assert_eq!(cookies[7].name(), \"samesitelax\");\n    assert!(cookies[7].same_site_lax());\n\n    // samesitestrict\n    assert_eq!(cookies[8].name(), \"samesitestrict\");\n    assert!(cookies[8].same_site_strict());\n}\n\n#[tokio::test]\nasync fn cookie_store_simple() {\n    let server = server::http(move |req| async move {\n        if req.uri() == \"/2\" {\n            assert_eq!(req.headers()[\"cookie\"], \"key=val\");\n        }\n        http::Response::builder()\n            .header(\"Set-Cookie\", \"key=val; HttpOnly\")\n            .body(Default::default())\n            .unwrap()\n    });\n\n    let client = Client::builder().cookie_store(true).build().unwrap();\n\n    let url = format!(\"http://{}/\", server.addr());\n    client.get(&url).send().await.unwrap();\n\n    let url = format!(\"http://{}/2\", server.addr());\n    client.get(&url).send().await.unwrap();\n}\n\n#[tokio::test]\nasync fn cookie_store_overwrite_existing() {\n    let server = server::http(move |req| async move {\n        if req.uri() == \"/\" {\n            http::Response::builder()\n                .header(\"Set-Cookie\", \"key=val\")\n                .body(Default::default())\n                .unwrap()\n        } else if req.uri() == \"/2\" {\n            assert_eq!(req.headers()[\"cookie\"], \"key=val\");\n            http::Response::builder()\n                .header(\"Set-Cookie\", \"key=val2\")\n                .body(Default::default())\n                .unwrap()\n        } else {\n            assert_eq!(req.uri(), \"/3\");\n            assert_eq!(req.headers()[\"cookie\"], \"key=val2\");\n            http::Response::default()\n        }\n    });\n\n    let client = Client::builder().cookie_store(true).build().unwrap();\n\n    let url = format!(\"http://{}/\", server.addr());\n    client.get(&url).send().await.unwrap();\n\n    let url = format!(\"http://{}/2\", server.addr());\n    client.get(&url).send().await.unwrap();\n\n    let url = format!(\"http://{}/3\", server.addr());\n    client.get(&url).send().await.unwrap();\n}\n\n#[tokio::test]\nasync fn cookie_store_max_age() {\n    let server = server::http(move |req| async move {\n        assert_eq!(req.headers().get(\"cookie\"), None);\n        http::Response::builder()\n            .header(\"Set-Cookie\", \"key=val; Max-Age=0\")\n            .body(Default::default())\n            .unwrap()\n    });\n\n    let client = Client::builder().cookie_store(true).build().unwrap();\n    let url = format!(\"http://{}/\", server.addr());\n    client.get(&url).send().await.unwrap();\n    client.get(&url).send().await.unwrap();\n}\n\n#[tokio::test]\nasync fn cookie_store_expires() {\n    let server = server::http(move |req| async move {\n        assert_eq!(req.headers().get(\"cookie\"), None);\n        http::Response::builder()\n            .header(\n                \"Set-Cookie\",\n                \"key=val; Expires=Wed, 21 Oct 2015 07:28:00 GMT\",\n            )\n            .body(Default::default())\n            .unwrap()\n    });\n\n    let client = Client::builder().cookie_store(true).build().unwrap();\n\n    let url = format!(\"http://{}/\", server.addr());\n    client.get(&url).send().await.unwrap();\n    client.get(&url).send().await.unwrap();\n}\n\n#[tokio::test]\nasync fn cookie_store_path() {\n    let server = server::http(move |req| async move {\n        if req.uri() == \"/\" {\n            assert_eq!(req.headers().get(\"cookie\"), None);\n            http::Response::builder()\n                .header(\"Set-Cookie\", \"key=val; Path=/subpath\")\n                .body(Default::default())\n                .unwrap()\n        } else {\n            assert_eq!(req.uri(), \"/subpath\");\n            assert_eq!(req.headers()[\"cookie\"], \"key=val\");\n            http::Response::default()\n        }\n    });\n\n    let client = Client::builder().cookie_store(true).build().unwrap();\n\n    let url = format!(\"http://{}/\", server.addr());\n    client.get(&url).send().await.unwrap();\n    client.get(&url).send().await.unwrap();\n\n    let url = format!(\"http://{}/subpath\", server.addr());\n    client.get(&url).send().await.unwrap();\n}\n\n#[tokio::test]\nasync fn cookie_store_stores_response_cookie_with_manual_cookie() {\n    let server = server::http(|req| async move {\n        if req.uri() == \"/1\" {\n            assert_eq!(req.headers()[\"cookie\"], \"key=val\");\n        }\n\n        if req.uri() == \"/2\" {\n            assert_eq!(req.headers()[\"cookie\"], \"key=val\");\n        }\n\n        http::Response::builder()\n            .header(\"Set-Cookie\", \"key=val; HttpOnly\")\n            .body(Default::default())\n            .unwrap()\n    });\n\n    let client = Client::builder().cookie_store(true).build().unwrap();\n\n    let set_url = format!(\"http://{}/1\", server.addr());\n    let _ = client\n        .get(&set_url)\n        .header(\"cookie\", \"key=val\")\n        .send()\n        .await\n        .unwrap();\n\n    let check_url = format!(\"http://{}/2\", server.addr());\n    let _ = client.get(&check_url).send().await.unwrap();\n}\n\n#[tokio::test]\nasync fn cookie_request_level_compression() {\n    let server = server::http(|req| async move {\n        match req.uri().path() {\n            \"/set\" => http::Response::builder()\n                .header(\"Set-Cookie\", \"cookie1=value1\")\n                .header(\"Set-Cookie\", \"cookie2=value2\")\n                .header(\"Set-Cookie\", \"cookie3=value3\")\n                .body(Default::default())\n                .unwrap(),\n            \"/default\" | \"/compressed\" => {\n                assert_eq!(req.version(), Version::HTTP_11);\n                let cookies = req\n                    .headers()\n                    .get(COOKIE)\n                    .and_then(|v| v.to_str().ok())\n                    .unwrap();\n                assert!(\n                    cookies.contains(\"cookie1=value1\")\n                        && cookies.contains(\"cookie2=value2\")\n                        && cookies.contains(\"cookie3=value3\")\n                );\n                assert!(cookies.contains(\"; \"));\n                http::Response::default()\n            }\n            \"/uncompressed\" => {\n                assert_eq!(req.version(), Version::HTTP_2);\n                let cookies: Vec<_> = req\n                    .headers()\n                    .get_all(COOKIE)\n                    .iter()\n                    .map(|v| v.to_str().unwrap())\n                    .collect();\n\n                assert_eq!(cookies.len(), 3);\n\n                assert!(cookies.contains(&\"cookie1=value1\"));\n                assert!(cookies.contains(&\"cookie2=value2\"));\n                assert!(cookies.contains(&\"cookie3=value3\"));\n                http::Response::default()\n            }\n            _ => unreachable!(),\n        }\n    });\n\n    let base_url = format!(\"http://{}\", server.addr());\n\n    // Create a client with this jar\n    let client = Client::builder()\n        .cookie_provider(Jar::default())\n        .build()\n        .unwrap();\n\n    // Set cookies\n    client\n        .get(format!(\"{}/set\", base_url))\n        .send()\n        .await\n        .unwrap();\n\n    // Request with default behavior (compressed)\n    client\n        .get(format!(\"{}/default\", base_url))\n        .send()\n        .await\n        .unwrap();\n\n    // Request with compressed cookies\n    client\n        .get(format!(\"{}/compressed\", base_url))\n        .version(Version::HTTP_11)\n        .send()\n        .await\n        .unwrap();\n\n    // Request with uncompressed cookies\n    client\n        .get(format!(\"{}/uncompressed\", base_url))\n        .version(Version::HTTP_2)\n        .send()\n        .await\n        .unwrap();\n}\n"
  },
  {
    "path": "tests/deflate.rs",
    "content": "mod support;\nuse std::io::Write;\n\nuse flate2::{Compression, write::ZlibEncoder};\nuse support::server;\nuse tokio::io::AsyncWriteExt;\n\n#[tokio::test]\nasync fn deflate_response() {\n    deflate_case(10_000, 4096).await;\n}\n\n#[tokio::test]\nasync fn deflate_single_byte_chunks() {\n    deflate_case(10, 1).await;\n}\n\n#[tokio::test]\nasync fn test_deflate_empty_body() {\n    let server = server::http(move |req| async move {\n        assert_eq!(req.method(), \"HEAD\");\n\n        http::Response::builder()\n            .header(\"content-encoding\", \"deflate\")\n            .body(Default::default())\n            .unwrap()\n    });\n\n    let res = wreq::head(format!(\"http://{}/deflate\", server.addr()))\n        .send()\n        .await\n        .unwrap();\n\n    let body = res.text().await.unwrap();\n\n    assert_eq!(body, \"\");\n}\n\n#[tokio::test]\nasync fn test_accept_header_is_not_changed_if_set() {\n    let server = server::http(move |req| async move {\n        assert_eq!(req.headers()[\"accept\"], \"application/json\");\n        assert!(\n            req.headers()[\"accept-encoding\"]\n                .to_str()\n                .unwrap()\n                .contains(\"deflate\")\n        );\n        http::Response::default()\n    });\n\n    let res = wreq::get(format!(\"http://{}/accept\", server.addr()))\n        .header(\n            wreq::header::ACCEPT,\n            wreq::header::HeaderValue::from_static(\"application/json\"),\n        )\n        .send()\n        .await\n        .unwrap();\n\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n}\n\n#[tokio::test]\nasync fn test_accept_encoding_header_is_not_changed_if_set() {\n    let server = server::http(move |req| async move {\n        assert_eq!(req.headers()[\"accept\"], \"*/*\");\n        assert_eq!(req.headers()[\"accept-encoding\"], \"identity\");\n        http::Response::default()\n    });\n\n    let res = wreq::get(format!(\"http://{}/accept-encoding\", server.addr()))\n        .header(wreq::header::ACCEPT, \"*/*\")\n        .header(\n            wreq::header::ACCEPT_ENCODING,\n            wreq::header::HeaderValue::from_static(\"identity\"),\n        )\n        .send()\n        .await\n        .unwrap();\n\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n}\n\nasync fn deflate_case(response_size: usize, chunk_size: usize) {\n    use futures_util::stream::StreamExt;\n\n    let content: String = (0..response_size).fold(String::new(), |mut acc, i| {\n        acc.push_str(&format!(\"test {i}\"));\n        acc\n    });\n    let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default());\n    encoder.write_all(content.as_bytes()).unwrap();\n    let deflated_content = encoder.finish().unwrap();\n\n    let mut response = format!(\n        \"\\\n         HTTP/1.1 200 OK\\r\\n\\\n         Server: test-accept\\r\\n\\\n         Content-Encoding: deflate\\r\\n\\\n         Content-Length: {}\\r\\n\\\n         \\r\\n\",\n        &deflated_content.len()\n    )\n    .into_bytes();\n    response.extend(&deflated_content);\n\n    let server = server::http(move |req| {\n        assert!(\n            req.headers()[\"accept-encoding\"]\n                .to_str()\n                .unwrap()\n                .contains(\"deflate\")\n        );\n\n        let deflated = deflated_content.clone();\n        async move {\n            let len = deflated.len();\n            let stream =\n                futures_util::stream::unfold((deflated, 0), move |(deflated, pos)| async move {\n                    let chunk = deflated.chunks(chunk_size).nth(pos)?.to_vec();\n\n                    Some((chunk, (deflated, pos + 1)))\n                });\n\n            let body = wreq::Body::wrap_stream(stream.map(Ok::<_, std::convert::Infallible>));\n\n            http::Response::builder()\n                .header(\"content-encoding\", \"deflate\")\n                .header(\"content-length\", len)\n                .body(body)\n                .unwrap()\n        }\n    });\n\n    let res = wreq::get(format!(\"http://{}/deflate\", server.addr()))\n        .send()\n        .await\n        .expect(\"response\");\n\n    let body = res.text().await.expect(\"text\");\n    assert_eq!(body, content);\n}\n\nconst COMPRESSED_RESPONSE_HEADERS: &[u8] = b\"HTTP/1.1 200 OK\\x0d\\x0a\\\n            Content-Type: text/plain\\x0d\\x0a\\\n            Connection: keep-alive\\x0d\\x0a\\\n            Content-Encoding: deflate\\x0d\\x0a\";\n\nconst RESPONSE_CONTENT: &str = \"some message here\";\n\nfn deflate_compress(input: &[u8]) -> Vec<u8> {\n    let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default());\n    encoder.write_all(input).unwrap();\n    encoder.finish().unwrap()\n}\n\n#[tokio::test]\nasync fn test_non_chunked_non_fragmented_response() {\n    let server = server::low_level_with_response(|_raw_request, client_socket| {\n        Box::new(async move {\n            let deflated_content = deflate_compress(RESPONSE_CONTENT.as_bytes());\n            let content_length_header =\n                format!(\"Content-Length: {}\\r\\n\\r\\n\", deflated_content.len()).into_bytes();\n            let response = [\n                COMPRESSED_RESPONSE_HEADERS,\n                &content_length_header,\n                &deflated_content,\n            ]\n            .concat();\n\n            client_socket\n                .write_all(response.as_slice())\n                .await\n                .expect(\"response write_all failed\");\n            client_socket.flush().await.expect(\"response flush failed\");\n        })\n    });\n\n    let res = wreq::get(format!(\"http://{}/\", server.addr()))\n        .send()\n        .await\n        .expect(\"response\");\n\n    assert_eq!(res.text().await.expect(\"text\"), RESPONSE_CONTENT);\n}\n\n#[tokio::test]\nasync fn test_chunked_fragmented_response_1() {\n    const DELAY_BETWEEN_RESPONSE_PARTS: tokio::time::Duration =\n        tokio::time::Duration::from_millis(1000);\n    const DELAY_MARGIN: tokio::time::Duration = tokio::time::Duration::from_millis(50);\n\n    let server = server::low_level_with_response(|_raw_request, client_socket| {\n        Box::new(async move {\n            let deflated_content = deflate_compress(RESPONSE_CONTENT.as_bytes());\n            let response_first_part = [\n                COMPRESSED_RESPONSE_HEADERS,\n                format!(\n                    \"Transfer-Encoding: chunked\\r\\n\\r\\n{:x}\\r\\n\",\n                    deflated_content.len()\n                )\n                .as_bytes(),\n                &deflated_content,\n            ]\n            .concat();\n            let response_second_part = b\"\\r\\n0\\r\\n\\r\\n\";\n\n            client_socket\n                .write_all(response_first_part.as_slice())\n                .await\n                .expect(\"response_first_part write_all failed\");\n            client_socket\n                .flush()\n                .await\n                .expect(\"response_first_part flush failed\");\n\n            tokio::time::sleep(DELAY_BETWEEN_RESPONSE_PARTS).await;\n\n            client_socket\n                .write_all(response_second_part)\n                .await\n                .expect(\"response_second_part write_all failed\");\n            client_socket\n                .flush()\n                .await\n                .expect(\"response_second_part flush failed\");\n        })\n    });\n\n    let start = tokio::time::Instant::now();\n    let res = wreq::get(format!(\"http://{}/\", server.addr()))\n        .send()\n        .await\n        .expect(\"response\");\n\n    assert_eq!(res.text().await.expect(\"text\"), RESPONSE_CONTENT);\n    assert!(start.elapsed() >= DELAY_BETWEEN_RESPONSE_PARTS - DELAY_MARGIN);\n}\n\n#[tokio::test]\nasync fn test_chunked_fragmented_response_2() {\n    const DELAY_BETWEEN_RESPONSE_PARTS: tokio::time::Duration =\n        tokio::time::Duration::from_millis(1000);\n    const DELAY_MARGIN: tokio::time::Duration = tokio::time::Duration::from_millis(50);\n\n    let server = server::low_level_with_response(|_raw_request, client_socket| {\n        Box::new(async move {\n            let deflated_content = deflate_compress(RESPONSE_CONTENT.as_bytes());\n            let response_first_part = [\n                COMPRESSED_RESPONSE_HEADERS,\n                format!(\n                    \"Transfer-Encoding: chunked\\r\\n\\r\\n{:x}\\r\\n\",\n                    deflated_content.len()\n                )\n                .as_bytes(),\n                &deflated_content,\n                b\"\\r\\n\",\n            ]\n            .concat();\n            let response_second_part = b\"0\\r\\n\\r\\n\";\n\n            client_socket\n                .write_all(response_first_part.as_slice())\n                .await\n                .expect(\"response_first_part write_all failed\");\n            client_socket\n                .flush()\n                .await\n                .expect(\"response_first_part flush failed\");\n\n            tokio::time::sleep(DELAY_BETWEEN_RESPONSE_PARTS).await;\n\n            client_socket\n                .write_all(response_second_part)\n                .await\n                .expect(\"response_second_part write_all failed\");\n            client_socket\n                .flush()\n                .await\n                .expect(\"response_second_part flush failed\");\n        })\n    });\n\n    let start = tokio::time::Instant::now();\n    let res = wreq::get(format!(\"http://{}/\", server.addr()))\n        .send()\n        .await\n        .expect(\"response\");\n\n    assert_eq!(res.text().await.expect(\"text\"), RESPONSE_CONTENT);\n    assert!(start.elapsed() >= DELAY_BETWEEN_RESPONSE_PARTS - DELAY_MARGIN);\n}\n\n#[tokio::test]\nasync fn test_chunked_fragmented_response_with_extra_bytes() {\n    const DELAY_BETWEEN_RESPONSE_PARTS: tokio::time::Duration =\n        tokio::time::Duration::from_millis(1000);\n    const DELAY_MARGIN: tokio::time::Duration = tokio::time::Duration::from_millis(50);\n\n    let server = server::low_level_with_response(|_raw_request, client_socket| {\n        Box::new(async move {\n            let deflated_content = deflate_compress(RESPONSE_CONTENT.as_bytes());\n            let response_first_part = [\n                COMPRESSED_RESPONSE_HEADERS,\n                format!(\n                    \"Transfer-Encoding: chunked\\r\\n\\r\\n{:x}\\r\\n\",\n                    deflated_content.len()\n                )\n                .as_bytes(),\n                &deflated_content,\n            ]\n            .concat();\n            let response_second_part = b\"\\r\\n2ab\\r\\n0\\r\\n\\r\\n\";\n\n            client_socket\n                .write_all(response_first_part.as_slice())\n                .await\n                .expect(\"response_first_part write_all failed\");\n            client_socket\n                .flush()\n                .await\n                .expect(\"response_first_part flush failed\");\n\n            tokio::time::sleep(DELAY_BETWEEN_RESPONSE_PARTS).await;\n\n            client_socket\n                .write_all(response_second_part)\n                .await\n                .expect(\"response_second_part write_all failed\");\n            client_socket\n                .flush()\n                .await\n                .expect(\"response_second_part flush failed\");\n        })\n    });\n\n    let start = tokio::time::Instant::now();\n    let res = wreq::get(format!(\"http://{}/\", server.addr()))\n        .send()\n        .await\n        .expect(\"response\");\n\n    let err = res.text().await.expect_err(\"there must be an error\");\n    assert!(err.is_decode());\n    assert!(start.elapsed() >= DELAY_BETWEEN_RESPONSE_PARTS - DELAY_MARGIN);\n}\n"
  },
  {
    "path": "tests/emulate.rs",
    "content": "use std::{\n    io::{self, Write},\n    time::Duration,\n};\n\nuse brotli::{CompressorWriter as BrotliEncoder, Decompressor as BrotliDecoder};\nuse flate2::{Compression, read::ZlibDecoder, write::ZlibEncoder};\nuse wreq::{\n    Client, Emulation,\n    http1::Http1Options,\n    http2::{\n        Http2Options, PseudoId, PseudoOrder, SettingId, SettingsOrder, StreamDependency, StreamId,\n    },\n    tls::{\n        AlpnProtocol, ExtensionType, KeyShare, TlsOptions, TlsVersion,\n        compress::{CertificateCompressionAlgorithm, CertificateCompressor, Codec},\n    },\n};\nuse zstd::stream::{Decoder as ZstdDecoder, Encoder as ZstdEncoder};\n\n#[derive(Debug)]\nstruct BrotliCompressor {\n    q: u32,\n    lgwin: u32,\n}\n\n#[derive(Debug)]\nstruct ZlibCompressor;\n\n#[derive(Debug)]\nstruct ZstdCompressor;\n\nimpl CertificateCompressor for BrotliCompressor {\n    fn compress(&self) -> Codec {\n        let q = self.q;\n        let lgwin = self.lgwin;\n        Codec::Dynamic(Box::new(move |input, output| {\n            let mut writer = BrotliEncoder::new(output, input.len(), q, lgwin);\n            writer.write_all(input)?;\n            writer.flush()?;\n            Ok(())\n        }))\n    }\n\n    fn decompress(&self) -> Codec {\n        Codec::Pointer(|input, output| {\n            let mut reader = BrotliDecoder::new(input, 4096);\n            io::copy(&mut reader, output)?;\n            Ok(())\n        })\n    }\n\n    fn algorithm(&self) -> CertificateCompressionAlgorithm {\n        CertificateCompressionAlgorithm::BROTLI\n    }\n}\n\nimpl CertificateCompressor for ZlibCompressor {\n    fn compress(&self) -> Codec {\n        Codec::Pointer(|input, output| {\n            let mut encoder = ZlibEncoder::new(output, Compression::default());\n            encoder.write_all(input)?;\n            encoder.finish()?;\n            Ok(())\n        })\n    }\n\n    fn decompress(&self) -> Codec {\n        Codec::Pointer(|input, output| {\n            let mut reader = ZlibDecoder::new(input);\n            io::copy(&mut reader, output)?;\n            Ok(())\n        })\n    }\n\n    fn algorithm(&self) -> CertificateCompressionAlgorithm {\n        CertificateCompressionAlgorithm::ZLIB\n    }\n}\n\nimpl CertificateCompressor for ZstdCompressor {\n    fn compress(&self) -> Codec {\n        Codec::Pointer(|input, output| {\n            let mut encoder = ZstdEncoder::new(output, 0)?;\n            encoder.write_all(input)?;\n            encoder.finish()?;\n            Ok(())\n        })\n    }\n\n    fn decompress(&self) -> Codec {\n        Codec::Pointer(|input, output| {\n            let mut reader = ZstdDecoder::new(input)?;\n            io::copy(&mut reader, output)?;\n            Ok(())\n        })\n    }\n\n    fn algorithm(&self) -> CertificateCompressionAlgorithm {\n        CertificateCompressionAlgorithm::ZSTD\n    }\n}\n\nmacro_rules! join {\n    ($sep:expr, $first:expr $(, $rest:expr)*) => {\n        concat!($first $(, $sep, $rest)*)\n    };\n}\n\nfn tls_options_template() -> TlsOptions {\n    //  TLS options config\n    TlsOptions::builder()\n        .curves_list(join!(\n            \":\",\n            \"X25519MLKEM768\",\n            \"X25519\",\n            \"P-256\",\n            \"P-384\",\n            \"P-521\",\n            \"ffdhe2048\",\n            \"ffdhe3072\"\n        ))\n        .cipher_list(join!(\n            \":\",\n            \"TLS_AES_128_GCM_SHA256\",\n            \"TLS_CHACHA20_POLY1305_SHA256\",\n            \"TLS_AES_256_GCM_SHA384\",\n            \"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n            \"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n            \"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\",\n            \"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\",\n            \"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n            \"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n            \"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\",\n            \"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\",\n            \"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\",\n            \"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\",\n            \"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n            \"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n            \"TLS_RSA_WITH_AES_128_CBC_SHA\",\n            \"TLS_RSA_WITH_AES_256_CBC_SHA\"\n        ))\n        .sigalgs_list(join!(\n            \":\",\n            \"ecdsa_secp256r1_sha256\",\n            \"ecdsa_secp384r1_sha384\",\n            \"ecdsa_secp521r1_sha512\",\n            \"rsa_pss_rsae_sha256\",\n            \"rsa_pss_rsae_sha384\",\n            \"rsa_pss_rsae_sha512\",\n            \"rsa_pkcs1_sha256\",\n            \"rsa_pkcs1_sha384\",\n            \"rsa_pkcs1_sha512\",\n            \"ecdsa_sha1\",\n            \"rsa_pkcs1_sha1\"\n        ))\n        .delegated_credentials(join!(\n            \":\",\n            \"ecdsa_secp256r1_sha256\",\n            \"ecdsa_secp384r1_sha384\",\n            \"ecdsa_secp521r1_sha512\",\n            \"ecdsa_sha1\"\n        ))\n        .certificate_compressors(vec![\n            &ZlibCompressor as _,\n            &BrotliCompressor { q: 11, lgwin: 32 } as _,\n            &ZstdCompressor as _,\n        ])\n        .alpn_protocols([AlpnProtocol::HTTP2, AlpnProtocol::HTTP1])\n        .record_size_limit(0x4001)\n        .pre_shared_key(true)\n        .enable_ech_grease(true)\n        .enable_ocsp_stapling(true)\n        .enable_signed_cert_timestamps(true)\n        .min_tls_version(TlsVersion::TLS_1_2)\n        .max_tls_version(TlsVersion::TLS_1_3)\n        .key_shares(vec![\n            KeyShare::X25519_MLKEM768,\n            KeyShare::X25519,\n            KeyShare::P256,\n        ])\n        .preserve_tls13_cipher_list(true)\n        .aes_hw_override(true)\n        .random_aes_hw_override(true)\n        .extension_permutation(&[\n            ExtensionType::SERVER_NAME,\n            ExtensionType::EXTENDED_MASTER_SECRET,\n            ExtensionType::RENEGOTIATE,\n            ExtensionType::SUPPORTED_GROUPS,\n            ExtensionType::EC_POINT_FORMATS,\n            ExtensionType::SESSION_TICKET,\n            ExtensionType::APPLICATION_LAYER_PROTOCOL_NEGOTIATION,\n            ExtensionType::STATUS_REQUEST,\n            ExtensionType::DELEGATED_CREDENTIAL,\n            ExtensionType::CERTIFICATE_TIMESTAMP,\n            ExtensionType::KEY_SHARE,\n            ExtensionType::SUPPORTED_VERSIONS,\n            ExtensionType::SIGNATURE_ALGORITHMS,\n            ExtensionType::PSK_KEY_EXCHANGE_MODES,\n            ExtensionType::RECORD_SIZE_LIMIT,\n            ExtensionType::CERT_COMPRESSION,\n            ExtensionType::ENCRYPTED_CLIENT_HELLO,\n            ExtensionType::PADDING,\n        ])\n        .build()\n}\n\nfn http1_options_template() -> Http1Options {\n    // HTTP/1 options config\n    Http1Options::builder()\n        .allow_obsolete_multiline_headers_in_responses(true)\n        .max_headers(100)\n        .build()\n}\n\nfn http2_options_template() -> Http2Options {\n    // HTTP/2 headers frame pseudo-header order\n    let headers_pseudo_order = PseudoOrder::builder()\n        .extend([\n            PseudoId::Method,\n            PseudoId::Path,\n            PseudoId::Authority,\n            PseudoId::Scheme,\n        ])\n        .build();\n\n    // HTTP/2 settings frame order\n    let settings_order = SettingsOrder::builder()\n        .extend([\n            SettingId::HeaderTableSize,\n            SettingId::EnablePush,\n            SettingId::MaxConcurrentStreams,\n            SettingId::InitialWindowSize,\n            SettingId::MaxFrameSize,\n            SettingId::MaxHeaderListSize,\n            SettingId::EnableConnectProtocol,\n            SettingId::NoRfc7540Priorities,\n        ])\n        .build();\n\n    Http2Options::builder()\n        .header_table_size(65536)\n        .enable_push(false)\n        .initial_window_size(131072)\n        .max_frame_size(16384)\n        .initial_connection_window_size(12517377 + 65535)\n        .headers_stream_dependency(StreamDependency::new(StreamId::ZERO, 41, false))\n        .headers_pseudo_order(headers_pseudo_order)\n        .settings_order(settings_order)\n        .build()\n}\n\nfn emulation_template() -> Emulation {\n    // This provider encapsulates TLS, HTTP/1, HTTP/2\n    Emulation::builder()\n        .tls_options(tls_options_template())\n        .http1_options(http1_options_template())\n        .http2_options(http2_options_template())\n        .build(Default::default())\n}\n\n#[tokio::test]\nasync fn test_emulation() -> wreq::Result<()> {\n    let client = Client::builder()\n        .emulation(emulation_template())\n        .connect_timeout(Duration::from_secs(10))\n        .tls_cert_verification(false)\n        .build()?;\n\n    let text = client\n        .get(\"https://tls.browserleaks.com/\")\n        .send()\n        .await?\n        .text()\n        .await?;\n\n    assert!(\n        text.contains(\"t13d1717h2_5b57614c22b0_3cbfd9057e0d\"),\n        \"Response ja4_hash fingerprint not found: {text}\"\n    );\n    assert!(\n        text.contains(\"6ea73faa8fc5aac76bded7bd238f6433\"),\n        \"Response akamai_hash fingerprint not found: {text}\"\n    );\n\n    Ok(())\n}\n\n#[tokio::test]\nasync fn test_request_with_emulation() -> wreq::Result<()> {\n    let client = Client::builder()\n        .connect_timeout(Duration::from_secs(10))\n        .tls_cert_verification(false)\n        .build()?;\n\n    let text = client\n        .get(\"https://tls.browserleaks.com/\")\n        .emulation(emulation_template())\n        .send()\n        .await?\n        .text()\n        .await?;\n\n    assert!(\n        text.contains(\"t13d1717h2_5b57614c22b0_3cbfd9057e0d\"),\n        \"Response ja4_hash fingerprint not found: {text}\"\n    );\n    assert!(\n        text.contains(\"6ea73faa8fc5aac76bded7bd238f6433\"),\n        \"Response akamai_hash fingerprint not found: {text}\"\n    );\n\n    Ok(())\n}\n\n#[tokio::test]\nasync fn test_request_with_emulation_tls() -> wreq::Result<()> {\n    let client = Client::builder()\n        .connect_timeout(Duration::from_secs(10))\n        .tls_cert_verification(false)\n        .build()?;\n\n    let emulation = Emulation::builder()\n        .tls_options(tls_options_template())\n        .build(Default::default());\n\n    let text = client\n        .get(\"https://tls.browserleaks.com/\")\n        .emulation(emulation)\n        .send()\n        .await?\n        .text()\n        .await?;\n\n    assert!(\n        text.contains(\"t13d1717h2_5b57614c22b0_3cbfd9057e0d\"),\n        \"Response ja4_hash fingerprint not found: {text}\"\n    );\n\n    Ok(())\n}\n\n#[tokio::test]\nasync fn test_request_with_emulation_http2() -> wreq::Result<()> {\n    let client = Client::builder()\n        .connect_timeout(Duration::from_secs(10))\n        .tls_cert_verification(false)\n        .build()?;\n\n    let emulation = Emulation::builder()\n        .http2_options(http2_options_template())\n        .build(Default::default());\n\n    let text = client\n        .get(\"https://tls.browserleaks.com/\")\n        .emulation(emulation)\n        .send()\n        .await?\n        .text()\n        .await?;\n\n    assert!(\n        text.contains(\"6ea73faa8fc5aac76bded7bd238f6433\"),\n        \"Response akamai_hash fingerprint not found: {text}\"\n    );\n\n    Ok(())\n}\n"
  },
  {
    "path": "tests/gzip.rs",
    "content": "mod support;\nuse std::io::Write;\n\nuse flate2::{Compression, write::GzEncoder};\nuse support::server;\nuse tokio::io::AsyncWriteExt;\n\n#[tokio::test]\nasync fn gzip_response() {\n    gzip_case(10_000, 4096).await;\n}\n\n#[tokio::test]\nasync fn gzip_single_byte_chunks() {\n    gzip_case(10, 1).await;\n}\n\n#[tokio::test]\nasync fn test_gzip_empty_body() {\n    let server = server::http(move |req| async move {\n        assert_eq!(req.method(), \"HEAD\");\n\n        http::Response::builder()\n            .header(\"content-encoding\", \"gzip\")\n            .body(Default::default())\n            .unwrap()\n    });\n\n    let res = wreq::head(format!(\"http://{}/gzip\", server.addr()))\n        .send()\n        .await\n        .unwrap();\n\n    let body = res.text().await.unwrap();\n\n    assert_eq!(body, \"\");\n}\n\n#[tokio::test]\nasync fn test_accept_header_is_not_changed_if_set() {\n    let server = server::http(move |req| async move {\n        assert_eq!(req.headers()[\"accept\"], \"application/json\");\n        assert!(\n            req.headers()[\"accept-encoding\"]\n                .to_str()\n                .unwrap()\n                .contains(\"gzip\")\n        );\n        http::Response::default()\n    });\n\n    let res = wreq::get(format!(\"http://{}/accept\", server.addr()))\n        .header(\n            wreq::header::ACCEPT,\n            wreq::header::HeaderValue::from_static(\"application/json\"),\n        )\n        .send()\n        .await\n        .unwrap();\n\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n}\n\n#[tokio::test]\nasync fn test_accept_encoding_header_is_not_changed_if_set() {\n    let server = server::http(move |req| async move {\n        assert_eq!(req.headers()[\"accept\"], \"*/*\");\n        assert_eq!(req.headers()[\"accept-encoding\"], \"identity\");\n        http::Response::default()\n    });\n\n    let res = wreq::get(format!(\"http://{}/accept-encoding\", server.addr()))\n        .header(wreq::header::ACCEPT, \"*/*\")\n        .header(\n            wreq::header::ACCEPT_ENCODING,\n            wreq::header::HeaderValue::from_static(\"identity\"),\n        )\n        .send()\n        .await\n        .unwrap();\n\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n}\n\nasync fn gzip_case(response_size: usize, chunk_size: usize) {\n    use futures_util::stream::StreamExt;\n\n    let content: String = (0..response_size).fold(String::new(), |mut acc, i| {\n        acc.push_str(&format!(\"test {i}\"));\n        acc\n    });\n    let mut encoder = GzEncoder::new(Vec::new(), Compression::default());\n    encoder.write_all(content.as_bytes()).unwrap();\n    let gzipped_content = encoder.finish().unwrap();\n\n    let mut response = format!(\n        \"\\\n         HTTP/1.1 200 OK\\r\\n\\\n         Server: test-accept\\r\\n\\\n         Content-Encoding: gzip\\r\\n\\\n         Content-Length: {}\\r\\n\\\n         \\r\\n\",\n        &gzipped_content.len()\n    )\n    .into_bytes();\n    response.extend(&gzipped_content);\n\n    let server = server::http(move |req| {\n        assert!(\n            req.headers()[\"accept-encoding\"]\n                .to_str()\n                .unwrap()\n                .contains(\"gzip\")\n        );\n\n        let gzipped = gzipped_content.clone();\n        async move {\n            let len = gzipped.len();\n            let stream =\n                futures_util::stream::unfold((gzipped, 0), move |(gzipped, pos)| async move {\n                    let chunk = gzipped.chunks(chunk_size).nth(pos)?.to_vec();\n\n                    Some((chunk, (gzipped, pos + 1)))\n                });\n\n            let body = wreq::Body::wrap_stream(stream.map(Ok::<_, std::convert::Infallible>));\n\n            http::Response::builder()\n                .header(\"content-encoding\", \"gzip\")\n                .header(\"content-length\", len)\n                .body(body)\n                .unwrap()\n        }\n    });\n\n    let res = wreq::get(format!(\"http://{}/gzip\", server.addr()))\n        .send()\n        .await\n        .expect(\"response\");\n\n    let body = res.text().await.expect(\"text\");\n    assert_eq!(body, content);\n}\n\nconst COMPRESSED_RESPONSE_HEADERS: &[u8] = b\"HTTP/1.1 200 OK\\x0d\\x0a\\\n            Content-Type: text/plain\\x0d\\x0a\\\n            Connection: keep-alive\\x0d\\x0a\\\n            Content-Encoding: gzip\\x0d\\x0a\";\n\nconst RESPONSE_CONTENT: &str = \"some message here\";\n\nfn gzip_compress(input: &[u8]) -> Vec<u8> {\n    let mut encoder = GzEncoder::new(Vec::new(), Compression::default());\n    encoder.write_all(input).unwrap();\n    encoder.finish().unwrap()\n}\n\n#[tokio::test]\nasync fn test_non_chunked_non_fragmented_response() {\n    let server = server::low_level_with_response(|_raw_request, client_socket| {\n        Box::new(async move {\n            let gzipped_content = gzip_compress(RESPONSE_CONTENT.as_bytes());\n            let content_length_header =\n                format!(\"Content-Length: {}\\r\\n\\r\\n\", gzipped_content.len()).into_bytes();\n            let response = [\n                COMPRESSED_RESPONSE_HEADERS,\n                &content_length_header,\n                &gzipped_content,\n            ]\n            .concat();\n\n            client_socket\n                .write_all(response.as_slice())\n                .await\n                .expect(\"response write_all failed\");\n            client_socket.flush().await.expect(\"response flush failed\");\n        })\n    });\n\n    let res = wreq::get(format!(\"http://{}/\", server.addr()))\n        .send()\n        .await\n        .expect(\"response\");\n\n    assert_eq!(res.text().await.expect(\"text\"), RESPONSE_CONTENT);\n}\n\n#[tokio::test]\nasync fn test_chunked_fragmented_response_1() {\n    const DELAY_BETWEEN_RESPONSE_PARTS: tokio::time::Duration =\n        tokio::time::Duration::from_millis(1000);\n    const DELAY_MARGIN: tokio::time::Duration = tokio::time::Duration::from_millis(50);\n\n    let server = server::low_level_with_response(|_raw_request, client_socket| {\n        Box::new(async move {\n            let gzipped_content = gzip_compress(RESPONSE_CONTENT.as_bytes());\n            let response_first_part = [\n                COMPRESSED_RESPONSE_HEADERS,\n                format!(\n                    \"Transfer-Encoding: chunked\\r\\n\\r\\n{:x}\\r\\n\",\n                    gzipped_content.len()\n                )\n                .as_bytes(),\n                &gzipped_content,\n            ]\n            .concat();\n            let response_second_part = b\"\\r\\n0\\r\\n\\r\\n\";\n\n            client_socket\n                .write_all(response_first_part.as_slice())\n                .await\n                .expect(\"response_first_part write_all failed\");\n            client_socket\n                .flush()\n                .await\n                .expect(\"response_first_part flush failed\");\n\n            tokio::time::sleep(DELAY_BETWEEN_RESPONSE_PARTS).await;\n\n            client_socket\n                .write_all(response_second_part)\n                .await\n                .expect(\"response_second_part write_all failed\");\n            client_socket\n                .flush()\n                .await\n                .expect(\"response_second_part flush failed\");\n        })\n    });\n\n    let start = tokio::time::Instant::now();\n    let res = wreq::get(format!(\"http://{}/\", server.addr()))\n        .send()\n        .await\n        .expect(\"response\");\n\n    assert_eq!(res.text().await.expect(\"text\"), RESPONSE_CONTENT);\n    assert!(start.elapsed() >= DELAY_BETWEEN_RESPONSE_PARTS - DELAY_MARGIN);\n}\n\n#[tokio::test]\nasync fn test_chunked_fragmented_response_2() {\n    const DELAY_BETWEEN_RESPONSE_PARTS: tokio::time::Duration =\n        tokio::time::Duration::from_millis(1000);\n    const DELAY_MARGIN: tokio::time::Duration = tokio::time::Duration::from_millis(50);\n\n    let server = server::low_level_with_response(|_raw_request, client_socket| {\n        Box::new(async move {\n            let gzipped_content = gzip_compress(RESPONSE_CONTENT.as_bytes());\n            let response_first_part = [\n                COMPRESSED_RESPONSE_HEADERS,\n                format!(\n                    \"Transfer-Encoding: chunked\\r\\n\\r\\n{:x}\\r\\n\",\n                    gzipped_content.len()\n                )\n                .as_bytes(),\n                &gzipped_content,\n                b\"\\r\\n\",\n            ]\n            .concat();\n            let response_second_part = b\"0\\r\\n\\r\\n\";\n\n            client_socket\n                .write_all(response_first_part.as_slice())\n                .await\n                .expect(\"response_first_part write_all failed\");\n            client_socket\n                .flush()\n                .await\n                .expect(\"response_first_part flush failed\");\n\n            tokio::time::sleep(DELAY_BETWEEN_RESPONSE_PARTS).await;\n\n            client_socket\n                .write_all(response_second_part)\n                .await\n                .expect(\"response_second_part write_all failed\");\n            client_socket\n                .flush()\n                .await\n                .expect(\"response_second_part flush failed\");\n        })\n    });\n\n    let start = tokio::time::Instant::now();\n    let res = wreq::get(format!(\"http://{}/\", server.addr()))\n        .send()\n        .await\n        .expect(\"response\");\n\n    assert_eq!(res.text().await.expect(\"text\"), RESPONSE_CONTENT);\n    assert!(start.elapsed() >= DELAY_BETWEEN_RESPONSE_PARTS - DELAY_MARGIN);\n}\n\n#[tokio::test]\nasync fn test_chunked_fragmented_response_with_extra_bytes() {\n    const DELAY_BETWEEN_RESPONSE_PARTS: tokio::time::Duration =\n        tokio::time::Duration::from_millis(1000);\n    const DELAY_MARGIN: tokio::time::Duration = tokio::time::Duration::from_millis(50);\n\n    let server = server::low_level_with_response(|_raw_request, client_socket| {\n        Box::new(async move {\n            let gzipped_content = gzip_compress(RESPONSE_CONTENT.as_bytes());\n            let response_first_part = [\n                COMPRESSED_RESPONSE_HEADERS,\n                format!(\n                    \"Transfer-Encoding: chunked\\r\\n\\r\\n{:x}\\r\\n\",\n                    gzipped_content.len()\n                )\n                .as_bytes(),\n                &gzipped_content,\n            ]\n            .concat();\n            let response_second_part = b\"\\r\\n2ab\\r\\n0\\r\\n\\r\\n\";\n\n            client_socket\n                .write_all(response_first_part.as_slice())\n                .await\n                .expect(\"response_first_part write_all failed\");\n            client_socket\n                .flush()\n                .await\n                .expect(\"response_first_part flush failed\");\n\n            tokio::time::sleep(DELAY_BETWEEN_RESPONSE_PARTS).await;\n\n            client_socket\n                .write_all(response_second_part)\n                .await\n                .expect(\"response_second_part write_all failed\");\n            client_socket\n                .flush()\n                .await\n                .expect(\"response_second_part flush failed\");\n        })\n    });\n\n    let start = tokio::time::Instant::now();\n    let res = wreq::get(format!(\"http://{}/\", server.addr()))\n        .send()\n        .await\n        .expect(\"response\");\n\n    let err = res.text().await.expect_err(\"there must be an error\");\n    assert!(err.is_decode());\n    assert!(start.elapsed() >= DELAY_BETWEEN_RESPONSE_PARTS - DELAY_MARGIN);\n}\n"
  },
  {
    "path": "tests/layers.rs",
    "content": "mod support;\n\nuse std::time::Duration;\n\nuse futures_util::future::join_all;\nuse pretty_env_logger::env_logger;\nuse support::{\n    layer::{DelayLayer, SharedConcurrencyLimitLayer},\n    server,\n};\nuse tower::{layer::util::Identity, limit::ConcurrencyLimitLayer, timeout::TimeoutLayer};\nuse wreq::Client;\n\n#[tokio::test]\nasync fn non_op_layer() {\n    let _ = env_logger::try_init();\n\n    let server = server::http(move |_req| async { http::Response::default() });\n\n    let url = format!(\"http://{}\", server.addr());\n\n    let client = Client::builder()\n        .layer(Identity::new())\n        .no_proxy()\n        .build()\n        .unwrap();\n\n    let res = client.get(url).send().await;\n\n    assert!(res.is_ok());\n}\n\n#[tokio::test]\nasync fn non_op_layer_with_timeout() {\n    let _ = env_logger::try_init();\n\n    let client = Client::builder()\n        .layer(Identity::new())\n        .connect_timeout(Duration::from_millis(200))\n        .no_proxy()\n        .build()\n        .unwrap();\n\n    // never returns\n    let url = \"http://192.0.2.1:81/slow\";\n\n    let res = client.get(url).send().await;\n\n    let err = res.unwrap_err();\n\n    assert!(err.is_connect() && err.is_timeout());\n}\n\n#[tokio::test]\nasync fn with_connect_timeout_layer_never_returning() {\n    let _ = env_logger::try_init();\n\n    let client = Client::builder()\n        .layer(TimeoutLayer::new(Duration::from_millis(100)))\n        .no_proxy()\n        .build()\n        .unwrap();\n\n    // never returns\n    let url = \"http://192.0.2.1:81/slow\";\n\n    let res = client.get(url).send().await;\n\n    let err = res.unwrap_err();\n\n    assert!(err.is_timeout());\n}\n\n#[tokio::test]\nasync fn with_connect_timeout_layer_slow() {\n    let _ = env_logger::try_init();\n\n    let server = server::http(move |_req| async { http::Response::default() });\n\n    let url = format!(\"http://{}\", server.addr());\n\n    let client = Client::builder()\n        .layer(DelayLayer::new(Duration::from_millis(200)))\n        .layer(TimeoutLayer::new(Duration::from_millis(100)))\n        .no_proxy()\n        .build()\n        .unwrap();\n\n    let res = client.get(url).send().await;\n\n    let err = res.unwrap_err();\n\n    assert!(err.is_timeout());\n}\n\n#[tokio::test]\nasync fn multiple_timeout_layers_under_threshold() {\n    let _ = env_logger::try_init();\n\n    let server = server::http(move |_req| async { http::Response::default() });\n\n    let url = format!(\"http://{}\", server.addr());\n\n    let client = Client::builder()\n        .layer(DelayLayer::new(Duration::from_millis(100)))\n        .layer(TimeoutLayer::new(Duration::from_millis(200)))\n        .layer(TimeoutLayer::new(Duration::from_millis(300)))\n        .layer(TimeoutLayer::new(Duration::from_millis(500)))\n        .timeout(Duration::from_millis(200))\n        .no_proxy()\n        .build()\n        .unwrap();\n\n    let res = client.get(url).send().await;\n\n    assert!(res.is_ok());\n}\n\n#[tokio::test]\nasync fn multiple_timeout_layers_over_threshold() {\n    let _ = env_logger::try_init();\n\n    let server = server::http(move |_req| async { http::Response::default() });\n\n    let url = format!(\"http://{}\", server.addr());\n\n    let client = Client::builder()\n        .layer(DelayLayer::new(Duration::from_millis(100)))\n        .layer(TimeoutLayer::new(Duration::from_millis(50)))\n        .layer(TimeoutLayer::new(Duration::from_millis(50)))\n        .layer(TimeoutLayer::new(Duration::from_millis(50)))\n        .connect_timeout(Duration::from_millis(50))\n        .no_proxy()\n        .build()\n        .unwrap();\n\n    let res = client.get(url).send().await;\n\n    let err = res.unwrap_err();\n\n    assert!(err.is_timeout());\n}\n\n#[tokio::test]\nasync fn layer_insert_headers() {\n    let _ = env_logger::try_init();\n\n    let server = server::http(move |req| async move {\n        let headers = req.headers().clone();\n        assert!(headers.contains_key(\"x-test-header\"));\n        http::Response::default()\n    });\n\n    let url = format!(\"http://{}\", server.addr());\n\n    let client = Client::builder()\n        .layer(tower::util::MapRequestLayer::new(\n            move |mut req: http::Request<wreq::Body>| {\n                req.headers_mut().insert(\n                    \"x-test-header\",\n                    http::HeaderValue::from_static(\"test-value\"),\n                );\n                req\n            },\n        ))\n        .no_proxy()\n        .build()\n        .unwrap();\n\n    let res = client.get(url).send().await;\n\n    assert!(res.is_ok());\n}\n\n#[tokio::test]\nasync fn with_concurrency_limit_layer_timeout() {\n    let _ = env_logger::try_init();\n\n    let server = server::http(move |_req| async { http::Response::default() });\n\n    let url = format!(\"http://{}\", server.addr());\n\n    let client = Client::builder()\n        .layer(DelayLayer::new(Duration::from_millis(100)))\n        .layer(SharedConcurrencyLimitLayer::new(2))\n        .timeout(Duration::from_millis(200))\n        .pool_max_idle_per_host(0) // disable connection reuse to force resource contention on the concurrency limit semaphore\n        .no_proxy()\n        .build()\n        .unwrap();\n\n    // first call succeeds since no resource contention\n    let res = client.get(url.clone()).send().await;\n    assert!(res.is_ok());\n\n    // 3 calls where the second two wait on the first and time out\n    let mut futures = Vec::new();\n    for _ in 0..3 {\n        futures.push(client.clone().get(url.clone()).send());\n    }\n\n    let all_res = join_all(futures).await;\n\n    let timed_out = all_res\n        .into_iter()\n        .any(|res| res.is_err_and(|err| err.is_timeout()));\n\n    assert!(timed_out, \"at least one request should have timed out\");\n}\n\n#[tokio::test]\nasync fn with_concurrency_limit_layer_success() {\n    let _ = env_logger::try_init();\n\n    let server = server::http(move |_req| async { http::Response::default() });\n\n    let url = format!(\"http://{}\", server.addr());\n\n    let client = Client::builder()\n        .layer(DelayLayer::new(Duration::from_millis(100)))\n        .layer(TimeoutLayer::new(Duration::from_millis(200)))\n        .layer(ConcurrencyLimitLayer::new(1)) //2\n        .timeout(Duration::from_millis(1000))\n        .pool_max_idle_per_host(0) // disable connection reuse to force resource contention on the concurrency limit semaphore\n        .no_proxy()\n        .build()\n        .unwrap();\n\n    // first call succeeds since no resource contention\n    let res = client.get(url.clone()).send().await;\n    assert!(res.is_ok());\n\n    // 3 calls of which all are individually below the inner timeout\n    // and the sum is below outer timeout which affects the final call which waited the whole time\n    let mut futures = Vec::new();\n    for _ in 0..3 {\n        futures.push(client.clone().get(url.clone()).send());\n    }\n\n    let all_res = join_all(futures).await;\n\n    for res in all_res.into_iter() {\n        assert!(\n            res.is_ok(),\n            \"neither outer long timeout or inner short timeout should be exceeded\"\n        );\n    }\n}\n\n#[tokio::test]\nasync fn no_generic_bounds_required_for_client_new() {\n    let _ = env_logger::try_init();\n\n    let server = server::http(move |_req| async { http::Response::default() });\n\n    let url = format!(\"http://{}\", server.addr());\n\n    let res = wreq::get(url).send().await;\n\n    assert!(res.is_ok());\n}\n"
  },
  {
    "path": "tests/multipart.rs",
    "content": "mod support;\nuse http_body_util::BodyExt;\nuse pretty_env_logger::env_logger;\nuse support::server;\n\n#[tokio::test]\nasync fn text_part() {\n    let _ = env_logger::try_init();\n\n    let form = wreq::multipart::Form::new().text(\"foo\", \"bar\");\n\n    let expected_body = format!(\n        \"\\\n         --{0}\\r\\n\\\n         Content-Disposition: form-data; name=\\\"foo\\\"\\r\\n\\r\\n\\\n         bar\\r\\n\\\n         --{0}--\\r\\n\\\n         \",\n        form.boundary()\n    );\n\n    let ct = format!(\"multipart/form-data; boundary={}\", form.boundary());\n\n    let server = server::http(move |mut req| {\n        let ct = ct.clone();\n        let expected_body = expected_body.clone();\n        async move {\n            assert_eq!(req.method(), \"POST\");\n            assert_eq!(req.headers()[\"content-type\"], ct);\n            assert_eq!(\n                req.headers()[\"content-length\"],\n                expected_body.len().to_string()\n            );\n\n            let mut full: Vec<u8> = Vec::new();\n            while let Some(item) = req.body_mut().frame().await {\n                full.extend(&*item.unwrap().into_data().unwrap());\n            }\n\n            assert_eq!(full, expected_body.as_bytes());\n\n            http::Response::default()\n        }\n    });\n\n    let url = format!(\"http://{}/multipart/1\", server.addr());\n\n    let res = wreq::post(&url).multipart(form).send().await.unwrap();\n\n    assert_eq!(res.uri(), url.as_str());\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n}\n\n#[tokio::test]\nasync fn text_part_with_custom_boundary() {\n    let _ = env_logger::try_init();\n\n    let form =\n        wreq::multipart::Form::with_boundary(\"----WebKitFormBoundary0123456789\").text(\"foo\", \"bar\");\n\n    let expected_body = \"\\\n         ------WebKitFormBoundary0123456789\\r\\n\\\n         Content-Disposition: form-data; name=\\\"foo\\\"\\r\\n\\r\\n\\\n         bar\\r\\n\\\n         ------WebKitFormBoundary0123456789--\\r\\n\\\n         \";\n\n    let ct = \"multipart/form-data; boundary=----WebKitFormBoundary0123456789\";\n\n    let server = server::http(move |mut req| async move {\n        assert_eq!(req.method(), \"POST\");\n        assert_eq!(req.headers()[\"content-type\"], ct);\n        assert_eq!(\n            req.headers()[\"content-length\"],\n            expected_body.len().to_string()\n        );\n\n        let mut full: Vec<u8> = Vec::new();\n        while let Some(item) = req.body_mut().frame().await {\n            full.extend(&*item.unwrap().into_data().unwrap());\n        }\n\n        assert_eq!(full, expected_body.as_bytes());\n\n        http::Response::default()\n    });\n\n    let url = format!(\"http://{}/multipart/1\", server.addr());\n\n    let res = wreq::post(&url).multipart(form).send().await.unwrap();\n\n    assert_eq!(res.uri(), url.as_str());\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n}\n\n#[cfg(feature = \"stream\")]\n#[tokio::test]\nasync fn stream_part() {\n    use futures_util::{future, stream};\n\n    let _ = env_logger::try_init();\n\n    let stream = wreq::Body::wrap_stream(stream::once(future::ready(Ok::<_, wreq::Error>(\n        \"part1 part2\".to_owned(),\n    ))));\n    let part = wreq::multipart::Part::stream(stream);\n\n    let form = wreq::multipart::Form::new()\n        .text(\"foo\", \"bar\")\n        .part(\"part_stream\", part);\n\n    let expected_body = format!(\n        \"\\\n         --{0}\\r\\n\\\n         Content-Disposition: form-data; name=\\\"foo\\\"\\r\\n\\\n         \\r\\n\\\n         bar\\r\\n\\\n         --{0}\\r\\n\\\n         Content-Disposition: form-data; name=\\\"part_stream\\\"\\r\\n\\\n         \\r\\n\\\n         part1 part2\\r\\n\\\n         --{0}--\\r\\n\\\n         \",\n        form.boundary()\n    );\n\n    let ct = format!(\"multipart/form-data; boundary={}\", form.boundary());\n\n    let server = server::http(move |req| {\n        let ct = ct.clone();\n        let expected_body = expected_body.clone();\n        async move {\n            assert_eq!(req.method(), \"POST\");\n            assert_eq!(req.headers()[\"content-type\"], ct);\n            assert_eq!(req.headers()[\"transfer-encoding\"], \"chunked\");\n\n            let full = req.collect().await.unwrap().to_bytes();\n\n            assert_eq!(full, expected_body.as_bytes());\n\n            http::Response::default()\n        }\n    });\n\n    let url = format!(\"http://{}/multipart/1\", server.addr());\n\n    let res = wreq::post(&url)\n        .multipart(form)\n        .send()\n        .await\n        .expect(\"Failed to post multipart\");\n    assert_eq!(res.uri(), url.as_str());\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n}\n\n#[cfg(feature = \"stream\")]\n#[tokio::test]\nasync fn async_impl_file_part() {\n    let _ = env_logger::try_init();\n\n    let form = wreq::multipart::Form::new()\n        .file(\"foo\", \"Cargo.lock\")\n        .await\n        .unwrap();\n\n    let fcontents = std::fs::read_to_string(\"Cargo.lock\").unwrap();\n\n    let expected_body = format!(\n        \"\\\n         --{0}\\r\\n\\\n         Content-Disposition: form-data; name=\\\"foo\\\"; filename=\\\"Cargo.lock\\\"\\r\\n\\\n         Content-Type: application/octet-stream\\r\\n\\r\\n\\\n         {1}\\r\\n\\\n         --{0}--\\r\\n\\\n         \",\n        form.boundary(),\n        fcontents\n    );\n\n    let ct = format!(\"multipart/form-data; boundary={}\", form.boundary());\n\n    let server = server::http(move |req| {\n        let ct = ct.clone();\n        let expected_body = expected_body.clone();\n        async move {\n            assert_eq!(req.method(), \"POST\");\n            assert_eq!(req.headers()[\"content-type\"], ct);\n            // files know their exact size\n            assert_eq!(\n                req.headers()[\"content-length\"],\n                expected_body.len().to_string()\n            );\n            let full = req.collect().await.unwrap().to_bytes();\n\n            assert_eq!(full, expected_body.as_bytes());\n\n            http::Response::default()\n        }\n    });\n\n    let url = format!(\"http://{}/multipart/3\", server.addr());\n\n    let res = wreq::post(&url).multipart(form).send().await.unwrap();\n\n    assert_eq!(res.uri(), url.as_str());\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n}\n"
  },
  {
    "path": "tests/proxy.rs",
    "content": "mod support;\nuse std::{env, sync::LazyLock};\n\nuse support::server;\nuse tokio::sync::Mutex;\nuse wreq::Client;\n\n// serialize tests that read from / write to environment variables\nstatic HTTP_PROXY_ENV_MUTEX: LazyLock<Mutex<()>> = LazyLock::new(|| Mutex::new(()));\n\n#[tokio::test]\nasync fn http_proxy() {\n    let url = \"http://hyper.rs.local/prox\";\n    let server = server::http(move |req| {\n        assert_eq!(req.method(), \"GET\");\n        assert_eq!(req.uri(), url);\n        assert_eq!(req.headers()[\"host\"], \"hyper.rs.local\");\n\n        async { http::Response::default() }\n    });\n\n    let proxy = format!(\"http://{}\", server.addr());\n\n    let res = Client::builder()\n        .proxy(wreq::Proxy::http(&proxy).unwrap())\n        .build()\n        .unwrap()\n        .get(url)\n        .send()\n        .await\n        .unwrap();\n\n    assert_eq!(res.uri(), url);\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n}\n\n#[tokio::test]\nasync fn http_proxy_basic_auth() {\n    let url = \"http://hyper.rs.local/prox\";\n    let server = server::http(move |req| {\n        assert_eq!(req.method(), \"GET\");\n        assert_eq!(req.uri(), url);\n        assert_eq!(req.headers()[\"host\"], \"hyper.rs.local\");\n        assert_eq!(\n            req.headers()[\"proxy-authorization\"],\n            \"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==\"\n        );\n\n        async { http::Response::default() }\n    });\n\n    let proxy = format!(\"http://{}\", server.addr());\n\n    let res = Client::builder()\n        .proxy(\n            wreq::Proxy::http(&proxy)\n                .unwrap()\n                .basic_auth(\"Aladdin\", \"open sesame\"),\n        )\n        .build()\n        .unwrap()\n        .get(url)\n        .send()\n        .await\n        .unwrap();\n\n    assert_eq!(res.uri(), url);\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n}\n\n#[tokio::test]\nasync fn http_proxy_basic_auth_parsed() {\n    let url = \"http://hyper.rs.local/prox\";\n    let server = server::http(move |req| {\n        assert_eq!(req.method(), \"GET\");\n        assert_eq!(req.uri(), url);\n        assert_eq!(req.headers()[\"host\"], \"hyper.rs.local\");\n        assert_eq!(\n            req.headers()[\"proxy-authorization\"],\n            \"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==\"\n        );\n\n        async { http::Response::default() }\n    });\n\n    let proxy = format!(\"http://Aladdin:open%20sesame@{}\", server.addr());\n\n    let res = Client::builder()\n        .proxy(wreq::Proxy::http(&proxy).unwrap())\n        .build()\n        .unwrap()\n        .get(url)\n        .send()\n        .await\n        .unwrap();\n\n    assert_eq!(res.uri(), url);\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n\n    let res = wreq::get(url)\n        .proxy(wreq::Proxy::http(&proxy).unwrap())\n        .send()\n        .await\n        .unwrap();\n\n    assert_eq!(res.uri(), url);\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n}\n\n#[tokio::test]\nasync fn system_http_proxy_basic_auth_parsed() {\n    let url = \"http://hyper.rs.local/prox\";\n    let server = server::http(move |req| {\n        assert_eq!(req.method(), \"GET\");\n        assert_eq!(req.uri(), url);\n        assert_eq!(req.headers()[\"host\"], \"hyper.rs.local\");\n        assert_eq!(\n            req.headers()[\"proxy-authorization\"],\n            \"Basic QWxhZGRpbjpvcGVuc2VzYW1l\"\n        );\n\n        async { http::Response::default() }\n    });\n\n    // avoid races with other tests that change \"http_proxy\"\n    let _env_lock = HTTP_PROXY_ENV_MUTEX.lock().await;\n\n    // save system setting first.\n    let system_proxy = env::var(\"http_proxy\");\n\n    // set-up http proxy.\n    unsafe {\n        env::set_var(\n            \"http_proxy\",\n            format!(\"http://Aladdin:opensesame@{}\", server.addr()),\n        )\n    }\n\n    let res = Client::builder()\n        .build()\n        .unwrap()\n        .get(url)\n        .send()\n        .await\n        .unwrap();\n\n    assert_eq!(res.uri(), url);\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n\n    // reset user setting.\n    unsafe {\n        match system_proxy {\n            Err(_) => env::remove_var(\"http_proxy\"),\n            Ok(proxy) => env::set_var(\"http_proxy\", proxy),\n        }\n    }\n}\n\n#[tokio::test]\nasync fn test_no_proxy() {\n    let server = server::http(move |req| {\n        assert_eq!(req.method(), \"GET\");\n        assert_eq!(req.uri(), \"/4\");\n\n        async { http::Response::default() }\n    });\n    let proxy = format!(\"http://{}\", server.addr());\n    let url = format!(\"http://{}/4\", server.addr());\n\n    // set up proxy and use no_proxy to clear up client builder proxies.\n    let res = Client::builder()\n        .proxy(wreq::Proxy::http(&proxy).unwrap())\n        .no_proxy()\n        .build()\n        .unwrap()\n        .get(&url)\n        .send()\n        .await\n        .unwrap();\n\n    assert_eq!(res.uri(), url.as_str());\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n}\n\n#[tokio::test]\nasync fn test_using_system_proxy() {\n    let url = \"http://not.a.real.sub.hyper.rs.local/prox\";\n    let server = server::http(move |req| {\n        assert_eq!(req.method(), \"GET\");\n        assert_eq!(req.uri(), url);\n        assert_eq!(req.headers()[\"host\"], \"not.a.real.sub.hyper.rs.local\");\n\n        async { http::Response::default() }\n    });\n\n    // avoid races with other tests that change \"http_proxy\"\n    let _env_lock = HTTP_PROXY_ENV_MUTEX.lock().await;\n\n    // save system setting first.\n    let system_proxy = env::var(\"http_proxy\");\n    // set-up http proxy.\n    unsafe {\n        env::set_var(\"http_proxy\", format!(\"http://{}\", server.addr()));\n    }\n    // system proxy is used by default\n    let res = wreq::get(url).send().await.unwrap();\n\n    assert_eq!(res.uri(), url);\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n\n    // reset user setting.\n    unsafe {\n        match system_proxy {\n            Err(_) => env::remove_var(\"http_proxy\"),\n            Ok(proxy) => env::set_var(\"http_proxy\", proxy),\n        }\n    }\n}\n\n#[tokio::test]\nasync fn http_over_http() {\n    let url = \"http://hyper.rs.local/prox\";\n\n    let server = server::http(move |req| {\n        assert_eq!(req.method(), \"GET\");\n        assert_eq!(req.uri(), url);\n        assert_eq!(req.headers()[\"host\"], \"hyper.rs.local\");\n\n        async { http::Response::default() }\n    });\n\n    let proxy = format!(\"http://{}\", server.addr());\n\n    let res = Client::builder()\n        .proxy(wreq::Proxy::http(&proxy).unwrap())\n        .build()\n        .unwrap()\n        .get(url)\n        .send()\n        .await\n        .unwrap();\n\n    assert_eq!(res.uri(), url);\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n}\n\n#[tokio::test]\nasync fn http_proxy_custom_headers() {\n    let url = \"http://hyper.rs.local/prox\";\n    let server = server::http(move |req| {\n        assert_eq!(req.method(), \"GET\");\n        assert_eq!(req.uri(), url);\n        assert_eq!(req.headers()[\"host\"], \"hyper.rs.local\");\n        assert_eq!(\n            req.headers()[\"proxy-authorization\"],\n            \"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==\"\n        );\n        assert_eq!(req.headers()[\"x-custom-header\"], \"value\");\n\n        async { http::Response::default() }\n    });\n\n    let proxy = format!(\"http://Aladdin:open%20sesame@{}\", server.addr());\n\n    let proxy = wreq::Proxy::http(&proxy).unwrap().custom_http_headers({\n        let mut headers = http::HeaderMap::new();\n        headers.insert(\"x-custom-header\", \"value\".parse().unwrap());\n        headers\n    });\n\n    let res = Client::builder()\n        .proxy(proxy.clone())\n        .build()\n        .unwrap()\n        .get(url)\n        .send()\n        .await\n        .unwrap();\n\n    assert_eq!(res.uri(), url);\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n\n    let res = wreq::get(url).proxy(proxy).send().await.unwrap();\n\n    assert_eq!(res.uri(), url);\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n}\n\n#[tokio::test]\nasync fn tunnel_detects_auth_required() {\n    let url = \"https://hyper.rs.local/prox\";\n\n    let server = server::http(move |req| {\n        assert_eq!(req.method(), \"CONNECT\");\n        assert_eq!(req.uri(), \"hyper.rs.local:443\");\n        assert!(\n            !req.headers()\n                .contains_key(http::header::PROXY_AUTHORIZATION)\n        );\n\n        async {\n            let mut res = http::Response::default();\n            *res.status_mut() = http::StatusCode::PROXY_AUTHENTICATION_REQUIRED;\n            res\n        }\n    });\n\n    let proxy = format!(\"http://{}\", server.addr());\n\n    let err = Client::builder()\n        .proxy(wreq::Proxy::https(&proxy).unwrap())\n        .build()\n        .unwrap()\n        .get(url)\n        .send()\n        .await\n        .unwrap_err();\n\n    let err = support::error::inspect(err).pop().unwrap();\n    assert!(\n        err.contains(\"auth\"),\n        \"proxy auth err expected, got: {err:?}\"\n    );\n}\n\n#[tokio::test]\nasync fn tunnel_includes_proxy_auth() {\n    let url = \"https://hyper.rs.local/prox\";\n\n    let server = server::http(move |req| {\n        assert_eq!(req.method(), \"CONNECT\");\n        assert_eq!(req.uri(), \"hyper.rs.local:443\");\n        assert_eq!(\n            req.headers()[\"proxy-authorization\"],\n            \"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==\"\n        );\n\n        async {\n            // return 400 to not actually deal with TLS tunneling\n            let mut res = http::Response::default();\n            *res.status_mut() = http::StatusCode::BAD_REQUEST;\n            res\n        }\n    });\n\n    let proxy = format!(\"http://Aladdin:open%20sesame@{}\", server.addr());\n\n    let err = Client::builder()\n        .proxy(wreq::Proxy::https(&proxy).unwrap())\n        .build()\n        .unwrap()\n        .get(url)\n        .send()\n        .await\n        .unwrap_err();\n\n    let err = support::error::inspect(err).pop().unwrap();\n    assert!(\n        err.contains(\"unsuccessful\"),\n        \"tunnel unsuccessful expected, got: {err:?}\"\n    );\n}\n\n#[tokio::test]\nasync fn tunnel_includes_user_agent() {\n    let url = \"https://hyper.rs.local/prox\";\n\n    let server = server::http(move |req| {\n        assert_eq!(req.method(), \"CONNECT\");\n        assert_eq!(req.uri(), \"hyper.rs.local:443\");\n        assert_eq!(req.headers()[\"user-agent\"], \"wreq-test\");\n\n        async {\n            // return 400 to not actually deal with TLS tunneling\n            let mut res = http::Response::default();\n            *res.status_mut() = http::StatusCode::BAD_REQUEST;\n            res\n        }\n    });\n\n    let proxy = format!(\"http://{}\", server.addr());\n\n    let err = Client::builder()\n        .proxy(wreq::Proxy::https(&proxy).unwrap().custom_http_headers({\n            let mut headers = http::HeaderMap::new();\n            headers.insert(\"user-agent\", \"wreq-test\".parse().unwrap());\n            headers\n        }))\n        .user_agent(\"wreq-test\")\n        .build()\n        .unwrap()\n        .get(url)\n        .send()\n        .await\n        .unwrap_err();\n\n    let err = support::error::inspect(err).pop().unwrap();\n    assert!(\n        err.contains(\"unsuccessful\"),\n        \"tunnel unsuccessful expected, got: {err:?}\"\n    );\n}\n\n#[tokio::test]\nasync fn proxy_tunnel_connect_error() {\n    let client = Client::builder()\n        .tls_cert_verification(false)\n        .no_proxy()\n        .build()\n        .unwrap();\n\n    let invalid_proxies = vec![\n        \"http://invalid.proxy:8080\",\n        \"https://invalid.proxy:8080\",\n        \"socks4://invalid.proxy:8080\",\n        \"socks4a://invalid.proxy:8080\",\n        \"socks5://invalid.proxy:8080\",\n        \"socks5h://invalid.proxy:8080\",\n    ];\n\n    let target_urls = [\"https://example.com\", \"http://example.com\"];\n\n    for proxy in invalid_proxies {\n        for url in target_urls {\n            let err = client\n                .get(url)\n                .proxy(wreq::Proxy::all(proxy).unwrap())\n                .send()\n                .await\n                .unwrap_err();\n\n            assert!(\n                err.is_proxy_connect(),\n                \"proxy connect error expected, got: {err:?}\"\n            );\n        }\n    }\n}\n"
  },
  {
    "path": "tests/redirect.rs",
    "content": "mod support;\nuse http::StatusCode;\nuse http_body_util::BodyExt;\nuse support::server;\nuse wreq::{\n    Body, Client,\n    redirect::{History, Policy},\n};\n\n#[tokio::test]\nasync fn test_redirect_301_and_302_and_303_changes_post_to_get() {\n    let codes = [301u16, 302, 303];\n\n    for &code in &codes {\n        let redirect = server::http(move |req| async move {\n            if req.method() == \"POST\" {\n                assert_eq!(req.uri(), &*format!(\"/{code}\"));\n                http::Response::builder()\n                    .status(code)\n                    .header(\"location\", \"/dst\")\n                    .header(\"server\", \"test-redirect\")\n                    .body(Body::default())\n                    .unwrap()\n            } else {\n                assert_eq!(req.method(), \"GET\");\n\n                http::Response::builder()\n                    .header(\"server\", \"test-dst\")\n                    .body(Body::default())\n                    .unwrap()\n            }\n        });\n\n        let url = format!(\"http://{}/{}\", redirect.addr(), code);\n        let dst = format!(\"http://{}/{}\", redirect.addr(), \"dst\");\n        let res = wreq::post(&url)\n            .redirect(Policy::default())\n            .send()\n            .await\n            .unwrap();\n        assert_eq!(res.uri(), dst.as_str());\n        assert_eq!(res.status(), wreq::StatusCode::OK);\n        assert_eq!(\n            res.headers().get(wreq::header::SERVER).unwrap(),\n            &\"test-dst\"\n        );\n    }\n}\n\n#[tokio::test]\nasync fn test_redirect_307_and_308_tries_to_get_again() {\n    let codes = [307u16, 308];\n    for &code in &codes {\n        let redirect = server::http(move |req| async move {\n            assert_eq!(req.method(), \"GET\");\n            if req.uri() == &*format!(\"/{code}\") {\n                http::Response::builder()\n                    .status(code)\n                    .header(\"location\", \"/dst\")\n                    .header(\"server\", \"test-redirect\")\n                    .body(Body::default())\n                    .unwrap()\n            } else {\n                assert_eq!(req.uri(), \"/dst\");\n\n                http::Response::builder()\n                    .header(\"server\", \"test-dst\")\n                    .body(Body::default())\n                    .unwrap()\n            }\n        });\n\n        let url = format!(\"http://{}/{}\", redirect.addr(), code);\n        let dst = format!(\"http://{}/{}\", redirect.addr(), \"dst\");\n        let res = wreq::get(&url)\n            .redirect(Policy::default())\n            .send()\n            .await\n            .unwrap();\n        assert_eq!(res.uri(), dst.as_str());\n        assert_eq!(res.status(), wreq::StatusCode::OK);\n        assert_eq!(\n            res.headers().get(wreq::header::SERVER).unwrap(),\n            &\"test-dst\"\n        );\n    }\n}\n\n#[tokio::test]\nasync fn test_redirect_307_and_308_tries_to_post_again() {\n    let _ = pretty_env_logger::env_logger::try_init();\n\n    let codes = [307u16, 308];\n    for &code in &codes {\n        let redirect = server::http(move |mut req| async move {\n            assert_eq!(req.method(), \"POST\");\n            assert_eq!(req.headers()[\"content-length\"], \"5\");\n\n            let data = req\n                .body_mut()\n                .frame()\n                .await\n                .unwrap()\n                .unwrap()\n                .into_data()\n                .unwrap();\n            assert_eq!(&*data, b\"Hello\");\n\n            if req.uri() == &*format!(\"/{code}\") {\n                http::Response::builder()\n                    .status(code)\n                    .header(\"location\", \"/dst\")\n                    .header(\"server\", \"test-redirect\")\n                    .body(Body::default())\n                    .unwrap()\n            } else {\n                assert_eq!(req.uri(), \"/dst\");\n\n                http::Response::builder()\n                    .header(\"server\", \"test-dst\")\n                    .body(Body::default())\n                    .unwrap()\n            }\n        });\n\n        let url = format!(\"http://{}/{}\", redirect.addr(), code);\n        let dst = format!(\"http://{}/{}\", redirect.addr(), \"dst\");\n        let res = wreq::post(&url)\n            .redirect(Policy::default())\n            .body(\"Hello\")\n            .send()\n            .await\n            .unwrap();\n        assert_eq!(res.uri(), dst.as_str());\n        assert_eq!(res.status(), wreq::StatusCode::OK);\n        assert_eq!(\n            res.headers().get(wreq::header::SERVER).unwrap(),\n            &\"test-dst\"\n        );\n    }\n}\n\n#[tokio::test]\nasync fn test_redirect_removes_sensitive_headers() {\n    use tokio::sync::watch;\n\n    let (tx, rx) = watch::channel::<Option<std::net::SocketAddr>>(None);\n\n    let end_server = server::http(move |req| {\n        let mut rx = rx.clone();\n        async move {\n            assert_eq!(req.headers().get(\"cookie\"), None);\n\n            rx.changed().await.unwrap();\n            let mid_addr = rx.borrow().unwrap();\n            assert_eq!(\n                req.headers()[\"referer\"],\n                format!(\"http://{mid_addr}/sensitive\")\n            );\n            http::Response::default()\n        }\n    });\n\n    let end_addr = end_server.addr();\n\n    let mid_server = server::http(move |req| async move {\n        assert_eq!(req.headers()[\"cookie\"], \"foo=bar\");\n        http::Response::builder()\n            .status(302)\n            .header(\"location\", format!(\"http://{end_addr}/end\"))\n            .body(Body::default())\n            .unwrap()\n    });\n\n    tx.send(Some(mid_server.addr())).unwrap();\n\n    Client::builder()\n        .redirect(Policy::default())\n        .build()\n        .unwrap()\n        .get(format!(\"http://{}/sensitive\", mid_server.addr()))\n        .header(\n            wreq::header::COOKIE,\n            wreq::header::HeaderValue::from_static(\"foo=bar\"),\n        )\n        .send()\n        .await\n        .unwrap();\n}\n\n#[tokio::test]\nasync fn test_redirect_policy_can_return_errors() {\n    let server = server::http(move |req| async move {\n        assert_eq!(req.uri(), \"/loop\");\n        http::Response::builder()\n            .status(302)\n            .header(\"location\", \"/loop\")\n            .body(Body::default())\n            .unwrap()\n    });\n\n    let url = format!(\"http://{}/loop\", server.addr());\n    let err = wreq::get(&url)\n        .redirect(Policy::default())\n        .send()\n        .await\n        .unwrap_err();\n    assert!(err.is_redirect());\n}\n\n#[tokio::test]\nasync fn test_redirect_policy_can_stop_redirects_without_an_error() {\n    let server = server::http(move |req| async move {\n        assert_eq!(req.uri(), \"/no-redirect\");\n        http::Response::builder()\n            .status(302)\n            .header(\"location\", \"/dont\")\n            .body(Body::default())\n            .unwrap()\n    });\n\n    let url = format!(\"http://{}/no-redirect\", server.addr());\n\n    let res = Client::builder()\n        .redirect(Policy::none())\n        .build()\n        .unwrap()\n        .get(&url)\n        .send()\n        .await\n        .unwrap();\n\n    assert_eq!(res.uri(), url.as_str());\n    assert_eq!(res.status(), wreq::StatusCode::FOUND);\n}\n\n#[tokio::test]\nasync fn test_referer_is_not_set_if_disabled() {\n    let server = server::http(move |req| async move {\n        if req.uri() == \"/no-refer\" {\n            http::Response::builder()\n                .status(302)\n                .header(\"location\", \"/dst\")\n                .body(Body::default())\n                .unwrap()\n        } else {\n            assert_eq!(req.uri(), \"/dst\");\n            assert_eq!(req.headers().get(\"referer\"), None);\n\n            http::Response::default()\n        }\n    });\n\n    Client::builder()\n        .referer(false)\n        .build()\n        .unwrap()\n        .get(format!(\"http://{}/no-refer\", server.addr()))\n        .send()\n        .await\n        .unwrap();\n}\n\n#[tokio::test]\nasync fn test_invalid_location_stops_redirect_gh484() {\n    let server = server::http(move |_req| async move {\n        http::Response::builder()\n            .status(302)\n            .header(\"location\", \"http://www.yikes{KABOOM}\")\n            .body(Body::default())\n            .unwrap()\n    });\n\n    let url = format!(\"http://{}/yikes\", server.addr());\n\n    let res = wreq::get(&url).send().await.unwrap();\n\n    assert_eq!(res.uri(), url.as_str());\n    assert_eq!(res.status(), wreq::StatusCode::FOUND);\n}\n\n#[tokio::test]\nasync fn test_invalid_scheme_is_rejected() {\n    let server = server::http(move |_req| async move {\n        http::Response::builder()\n            .status(302)\n            .header(\"location\", \"htt://www.yikes.com/\")\n            .body(Body::default())\n            .unwrap()\n    });\n\n    let url = format!(\"http://{}/yikes\", server.addr());\n\n    let err = wreq::get(&url)\n        .redirect(Policy::default())\n        .send()\n        .await\n        .unwrap_err();\n    assert!(err.is_builder());\n}\n\n#[cfg(feature = \"cookies\")]\n#[tokio::test]\nasync fn test_redirect_302_with_set_cookies() {\n    let code = 302;\n    let server = server::http(move |req| async move {\n        if req.uri() == \"/302\" {\n            http::Response::builder()\n                .status(302)\n                .header(\"location\", \"/dst\")\n                .header(\"set-cookie\", \"key=value\")\n                .body(Body::default())\n                .unwrap()\n        } else {\n            assert_eq!(req.uri(), \"/dst\");\n            assert_eq!(req.headers()[\"cookie\"], \"key=value\");\n            http::Response::default()\n        }\n    });\n\n    let url = format!(\"http://{}/{}\", server.addr(), code);\n    let dst = format!(\"http://{}/{}\", server.addr(), \"dst\");\n\n    let client = Client::builder()\n        .cookie_store(true)\n        .redirect(Policy::default())\n        .build()\n        .unwrap();\n    let res = client.get(&url).send().await.unwrap();\n\n    assert_eq!(res.uri(), dst.as_str());\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n}\n\n#[tokio::test]\nasync fn test_redirect_limit_to_1() {\n    let server = server::http(move |req| async move {\n        let i: i32 = req\n            .uri()\n            .path()\n            .rsplit('/')\n            .next()\n            .unwrap()\n            .parse::<i32>()\n            .unwrap();\n        assert!(req.uri().path().ends_with(&format!(\"/redirect/{i}\")));\n        http::Response::builder()\n            .status(302)\n            .header(\"location\", format!(\"/redirect/{}\", i + 1))\n            .body(Body::default())\n            .unwrap()\n    });\n    // The number at the end of the uri indicates the total number of redirections\n    let url = format!(\"http://{}/redirect/0\", server.addr());\n\n    let client = Client::builder()\n        .redirect(Policy::limited(1))\n        .build()\n        .unwrap();\n    let res = client.get(&url).send().await.unwrap_err();\n    // If the maximum limit is 1, then the final uri should be /redirect/1\n    assert_eq!(\n        res.uri().unwrap().to_string(),\n        format!(\"http://{}/redirect/1\", server.addr()).as_str()\n    );\n    assert!(res.is_redirect());\n}\n\n#[tokio::test]\nasync fn test_scheme_only_check_after_policy_return_follow() {\n    let server = server::http(move |_| async move {\n        http::Response::builder()\n            .status(302)\n            .header(\"location\", \"htt://www.yikes.com/\")\n            .body(Body::default())\n            .unwrap()\n    });\n\n    let url = format!(\"http://{}/yikes\", server.addr());\n    let res = Client::builder()\n        .redirect(Policy::custom(|attempt| attempt.stop()))\n        .build()\n        .unwrap()\n        .get(&url)\n        .send()\n        .await;\n\n    assert!(res.is_ok());\n    assert_eq!(res.unwrap().status(), wreq::StatusCode::FOUND);\n\n    let res = Client::builder()\n        .redirect(Policy::custom(|attempt| attempt.follow()))\n        .build()\n        .unwrap()\n        .get(&url)\n        .send()\n        .await;\n\n    assert!(res.is_err());\n    assert!(res.unwrap_err().is_builder());\n}\n\n#[tokio::test]\nasync fn test_redirect_301_302_303_empty_payload_headers() {\n    let codes = [301u16, 302, 303];\n    for &code in &codes {\n        let redirect = server::http(move |mut req| async move {\n            if req.method() == \"POST\" {\n                let data = req\n                    .body_mut()\n                    .frame()\n                    .await\n                    .unwrap()\n                    .unwrap()\n                    .into_data()\n                    .unwrap();\n\n                assert_eq!(&*data, b\"Hello\");\n                if req.headers().get(wreq::header::CONTENT_LENGTH).is_some() {\n                    assert_eq!(req.headers()[wreq::header::CONTENT_LENGTH], \"5\");\n                }\n                assert_eq!(req.uri(), &*format!(\"/{code}\"));\n\n                http::Response::builder()\n                    .header(\"location\", \"/dst\")\n                    .header(\"server\", \"test-dst\")\n                    .status(code)\n                    .body(Body::default())\n                    .unwrap()\n            } else {\n                assert_eq!(req.method(), \"GET\");\n                assert!(req.headers().get(wreq::header::CONTENT_TYPE).is_none());\n                assert!(req.headers().get(wreq::header::CONTENT_LENGTH).is_none());\n                assert!(req.headers().get(wreq::header::CONTENT_ENCODING).is_none());\n                http::Response::builder()\n                    .header(\"server\", \"test-dst\")\n                    .body(Body::default())\n                    .unwrap()\n            }\n        });\n\n        let url = format!(\"http://{}/{}\", redirect.addr(), code);\n        let dst = format!(\"http://{}/{}\", redirect.addr(), \"dst\");\n        let res = wreq::post(&url)\n            .redirect(Policy::default())\n            .body(\"Hello\")\n            .header(wreq::header::CONTENT_TYPE, \"text/plain\")\n            .header(wreq::header::CONTENT_LENGTH, \"5\")\n            .header(wreq::header::CONTENT_ENCODING, \"identity\")\n            .send()\n            .await\n            .unwrap();\n        assert_eq!(res.uri(), dst.as_str());\n        assert_eq!(res.status(), 200);\n        assert_eq!(\n            res.headers().get(wreq::header::SERVER).unwrap(),\n            &\"test-dst\"\n        );\n    }\n}\n\n#[tokio::test]\nasync fn test_redirect_history() {\n    let redirect = server::http(move |req| async move {\n        if req.uri() == \"/first\" {\n            http::Response::builder()\n                .status(302)\n                .header(\"location\", \"/second\")\n                .body(Body::default())\n                .unwrap()\n        } else if req.uri() == \"/second\" {\n            http::Response::builder()\n                .status(302)\n                .header(\"location\", \"/dst\")\n                .body(Body::default())\n                .unwrap()\n        } else {\n            assert_eq!(req.uri(), \"/dst\");\n\n            http::Response::builder()\n                .header(\"server\", \"test-dst\")\n                .body(Body::default())\n                .unwrap()\n        }\n    });\n\n    let url = format!(\"http://{}/first\", redirect.addr());\n    let dst = format!(\"http://{}/{}\", redirect.addr(), \"dst\");\n\n    let client = Client::builder()\n        .redirect(Policy::default())\n        .build()\n        .unwrap();\n\n    let res = client.get(&url).send().await.unwrap();\n    assert_eq!(res.uri(), dst.as_str());\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n    assert_eq!(\n        res.headers().get(wreq::header::SERVER).unwrap(),\n        &\"test-dst\"\n    );\n\n    let mut history = res.extensions().get::<History>().unwrap().into_iter();\n\n    let next1 = history.next().unwrap();\n    assert_eq!(next1.status, 302);\n    assert_eq!(next1.previous.path(), \"/first\");\n    assert_eq!(next1.uri.path(), \"/second\");\n    assert_eq!(next1.headers[\"location\"], \"/second\");\n\n    let next2 = history.next().unwrap();\n    assert_eq!(next2.status, 302);\n    assert_eq!(next2.previous.path(), \"/second\");\n    assert_eq!(next2.uri.path(), \"/dst\");\n    assert_eq!(next2.headers[\"location\"], \"/dst\");\n\n    assert!(history.next().is_none());\n}\n\n#[cfg(feature = \"cookies\")]\n#[tokio::test]\nasync fn test_redirect_applies_set_cookie_from_redirect() {\n    let server = server::http(move |req| async move {\n        match req.uri().path() {\n            \"/start\" => http::Response::builder()\n                .status(302)\n                .header(\"location\", \"/dst\")\n                .header(\"set-cookie\", \"session=abc; Path=/\")\n                .body(Body::default())\n                .unwrap(),\n            \"/dst\" => {\n                assert_eq!(req.headers()[\"cookie\"], \"session=abc\");\n                http::Response::builder()\n                    .status(200)\n                    .body(Body::default())\n                    .unwrap()\n            }\n            _ => http::Response::builder()\n                .status(404)\n                .body(Body::default())\n                .unwrap(),\n        }\n    });\n\n    let start = format!(\"http://{}/start\", server.addr());\n    let dst = format!(\"http://{}/dst\", server.addr());\n\n    let client = Client::builder()\n        .cookie_store(true)\n        .redirect(Policy::default())\n        .build()\n        .unwrap();\n\n    let res = client.get(&start).send().await.unwrap();\n    assert_eq!(res.uri(), dst.as_str());\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n}\n\n#[tokio::test]\nasync fn test_redirect_async_pending_follow() {\n    let server = server::http(move |req| async move {\n        if req.uri() == \"/async-redirect\" {\n            http::Response::builder()\n                .status(302)\n                .header(\"location\", \"/dst\")\n                .body(Body::default())\n                .unwrap()\n        } else {\n            assert_eq!(req.uri(), \"/dst\");\n            http::Response::builder()\n                .header(\"server\", \"test-dst\")\n                .body(Body::default())\n                .unwrap()\n        }\n    });\n\n    let url = format!(\"http://{}/async-redirect\", server.addr());\n    let dst = format!(\"http://{}/dst\", server.addr());\n\n    let client = Client::builder()\n        .redirect(Policy::custom(|attempt| {\n            attempt.pending(|attempt| async move {\n                // Simulate async decision-making\n                tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;\n                attempt.follow()\n            })\n        }))\n        .build()\n        .unwrap();\n\n    let res = client.get(&url).send().await.unwrap();\n    assert_eq!(res.uri(), dst.as_str());\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n    assert_eq!(\n        res.headers().get(wreq::header::SERVER).unwrap(),\n        &\"test-dst\"\n    );\n}\n\n#[tokio::test]\nasync fn test_redirect_location_is_encoded() {\n    let server = server::http(move |req| async move {\n        if req.uri() == \"/start\" {\n            http::Response::builder()\n                .status(302)\n                .header(\"location\", \"/dst path\")\n                .body(wreq::Body::default())\n                .unwrap()\n        } else {\n            assert_eq!(req.uri().path(), \"/dst%20path\");\n            http::Response::builder()\n                .status(StatusCode::OK)\n                .body(wreq::Body::default())\n                .unwrap()\n        }\n    });\n\n    let url = format!(\"http://{}/start\", server.addr());\n    let dst = format!(\"http://{}/dst%20path\", server.addr());\n\n    let client = Client::builder()\n        .redirect(Policy::default())\n        .build()\n        .unwrap();\n\n    let res = client.get(&url).send().await.unwrap();\n    assert_eq!(res.uri(), dst.as_str());\n    assert_eq!(res.status(), StatusCode::OK);\n}\n"
  },
  {
    "path": "tests/retry.rs",
    "content": "mod support;\n\nuse std::sync::{\n    Arc,\n    atomic::{AtomicUsize, Ordering},\n};\n\nuse support::server;\nuse wreq::Client;\n\n#[tokio::test]\nasync fn retries_apply_in_scope() {\n    let _ = pretty_env_logger::try_init();\n\n    let cnt = Arc::new(AtomicUsize::new(0));\n    let server = server::http(move |_req| {\n        let cnt = cnt.clone();\n        async move {\n            if cnt.fetch_add(1, Ordering::Relaxed) == 0 {\n                // first req is bad\n                http::Response::builder()\n                    .status(http::StatusCode::SERVICE_UNAVAILABLE)\n                    .body(Default::default())\n                    .unwrap()\n            } else {\n                http::Response::default()\n            }\n        }\n    });\n\n    let scope = server.addr().ip().to_string();\n    let policy = wreq::retry::Policy::for_host(scope).classify_fn(|req_rep| {\n        if req_rep.status() == Some(http::StatusCode::SERVICE_UNAVAILABLE) {\n            req_rep.retryable()\n        } else {\n            req_rep.success()\n        }\n    });\n\n    let url = format!(\"http://{}\", server.addr());\n    let resp = Client::builder()\n        .retry(policy)\n        .build()\n        .unwrap()\n        .get(url)\n        .send()\n        .await\n        .unwrap();\n\n    assert_eq!(resp.status(), 200);\n}\n\n#[tokio::test(flavor = \"multi_thread\", worker_threads = 2)]\nasync fn default_retries_have_a_limit() {\n    let _ = pretty_env_logger::try_init();\n\n    let server = server::http_with_config(\n        move |req| async move {\n            assert_eq!(req.version(), http::Version::HTTP_2);\n            // refused forever\n            Err(http2::Error::from(http2::Reason::REFUSED_STREAM))\n        },\n        |_| {},\n    );\n\n    let client = Client::builder().http2_only().build().unwrap();\n\n    let url = format!(\"http://{}\", server.addr());\n\n    let _err = client.get(url).send().await.unwrap_err();\n}\n\n// NOTE: using the default \"current_thread\" runtime here would cause the test to\n// fail, because the only thread would block until `panic_rx` receives a\n// notification while the client needs to be driven to get the graceful shutdown\n// done.\n#[tokio::test(flavor = \"multi_thread\", worker_threads = 2)]\nasync fn highly_concurrent_requests_to_http2_server_with_low_max_concurrent_streams() {\n    let client = Client::builder().http2_only().no_proxy().build().unwrap();\n\n    let server = server::http_with_config(\n        move |req| async move {\n            assert_eq!(req.version(), http::Version::HTTP_2);\n            Ok::<_, std::convert::Infallible>(http::Response::default())\n        },\n        |builder| {\n            builder.http2().max_concurrent_streams(1);\n        },\n    );\n\n    let url = format!(\"http://{}\", server.addr());\n\n    let futs = (0..100).map(|_| {\n        let client = client.clone();\n        let url = url.clone();\n        async move {\n            let res = client.get(&url).send().await.unwrap();\n            assert_eq!(res.status(), wreq::StatusCode::OK);\n        }\n    });\n    futures_util::future::join_all(futs).await;\n}\n\n#[tokio::test]\nasync fn highly_concurrent_requests_to_slow_http2_server_with_low_max_concurrent_streams() {\n    use support::delay_server;\n\n    let client = Client::builder().http2_only().no_proxy().build().unwrap();\n\n    let server = delay_server::Server::new(\n        move |req| async move {\n            assert_eq!(req.version(), http::Version::HTTP_2);\n            http::Response::default()\n        },\n        |http| {\n            http.http2().max_concurrent_streams(1);\n        },\n        std::time::Duration::from_secs(2),\n    )\n    .await;\n\n    let url = format!(\"http://{}\", server.addr());\n\n    let futs = (0..100).map(|_| {\n        let client = client.clone();\n        let url = url.clone();\n        async move {\n            let res = client.get(&url).send().await.unwrap();\n            assert_eq!(res.status(), wreq::StatusCode::OK);\n        }\n    });\n    futures_util::future::join_all(futs).await;\n\n    server.shutdown().await;\n}\n"
  },
  {
    "path": "tests/support/crl.pem",
    "content": "-----BEGIN X509 CRL-----\nMIIBnjCBhwIBATANBgkqhkiG9w0BAQsFADANMQswCQYDVQQDDAJjYRcNMjQwOTI2\nMDA0MjU1WhcNMjQxMDI2MDA0MjU1WjAUMBICAQEXDTI0MDkyNjAwNDI0NlqgMDAu\nMB8GA1UdIwQYMBaAFDxOaZI8zUaGX7mXAZ9Zd8jhyC3sMAsGA1UdFAQEAgIQATAN\nBgkqhkiG9w0BAQsFAAOCAQEAsqBa289UYKAOaH2gp3yC7YBF7uVZ25i3WV/InKjK\nzT/fFzZ9rL87ofl0VuR0GPAfwLXFQ96vYUg/nrlxF/A6FmQKf9JSlVBIVXaS2uyk\nfmdVX8fdU13uD2uKThT5Fojk5nKAeui0xwjTHqe9BjyDscQ5d5pkLIJUj/JbQmRF\nD/OtEpYQZMAdHLDF0a/9v69g/evlPlpTcikAU+T8rXp45rrsuuUgyhJ00UnE41j8\nMmMi3cn23JjFTyOrYx5g/0VFUNcwZpgZSnxNvFbcoh9oHHqS+UDESrwQmkmwrVvH\na7PEJq5ZPtjUPa0i7oFNa9cC+11Doo5bxkpCWhypvgTUzw==\n-----END X509 CRL-----\n"
  },
  {
    "path": "tests/support/delay_server.rs",
    "content": "#![allow(unused)]\nuse std::{convert::Infallible, future::Future, net, time::Duration};\n\nuse futures_util::FutureExt;\nuse http::{Request, Response};\nuse hyper::service::service_fn;\nuse tokio::{net::TcpListener, select, sync::oneshot};\n\n/// This server, unlike [`super::server::Server`], allows for delaying the\n/// specified amount of time after each TCP connection is established. This is\n/// useful for testing the behavior of the client when the server is slow.\n///\n/// For example, in case of HTTP/2, once the TCP/TLS connection is established,\n/// both endpoints are supposed to send a preface and an initial `SETTINGS`\n/// frame (See [RFC9113 3.4] for details). What if these frames are delayed for\n/// whatever reason? This server allows for testing such scenarios.\n///\n/// [RFC9113 3.4]: https://www.rfc-editor.org/rfc/rfc9113.html#name-http-2-connection-preface\npub struct Server {\n    addr: net::SocketAddr,\n    shutdown_tx: Option<oneshot::Sender<()>>,\n    server_terminated_rx: oneshot::Receiver<()>,\n}\n\ntype Builder = hyper_util::server::conn::auto::Builder<hyper_util::rt::TokioExecutor>;\n\nimpl Server {\n    pub async fn new<F1, Fut, F2, Bu>(func: F1, apply_config: F2, delay: Duration) -> Self\n    where\n        F1: Fn(Request<hyper::body::Incoming>) -> Fut + Clone + Send + 'static,\n        Fut: Future<Output = Response<wreq::Body>> + Send + 'static,\n        F2: FnOnce(&mut Builder) -> Bu + Send + 'static,\n    {\n        let (shutdown_tx, shutdown_rx) = oneshot::channel();\n        let (server_terminated_tx, server_terminated_rx) = oneshot::channel();\n\n        let tcp_listener = TcpListener::bind(\"127.0.0.1:0\").await.unwrap();\n        let addr = tcp_listener.local_addr().unwrap();\n\n        tokio::spawn(async move {\n            let mut builder =\n                hyper_util::server::conn::auto::Builder::new(hyper_util::rt::TokioExecutor::new());\n            apply_config(&mut builder);\n\n            tokio::spawn(async move {\n                let builder = builder;\n                let (connection_shutdown_tx, connection_shutdown_rx) = oneshot::channel();\n                let connection_shutdown_rx = connection_shutdown_rx.shared();\n                let mut shutdown_rx = std::pin::pin!(shutdown_rx);\n\n                let mut handles = Vec::new();\n                loop {\n                    select! {\n                        _ = shutdown_rx.as_mut() => {\n                            connection_shutdown_tx.send(()).unwrap();\n                            break;\n                        }\n                        res = tcp_listener.accept() => {\n                            let (stream, _) = res.unwrap();\n                            let io = hyper_util::rt::TokioIo::new(stream);\n\n\n                            let handle = tokio::spawn({\n                                let connection_shutdown_rx = connection_shutdown_rx.clone();\n                                let func = func.clone();\n                                let svc = service_fn(move |req| {\n                                    let fut = func(req);\n                                    async move {\n                                    Ok::<_, Infallible>(fut.await)\n                                }});\n                                let builder = builder.clone();\n\n                                async move {\n                                    let fut = builder.serve_connection_with_upgrades(io, svc);\n                                    tokio::time::sleep(delay).await;\n\n                                    let mut conn = std::pin::pin!(fut);\n\n                                    select! {\n                                        _ = conn.as_mut() => {}\n                                        _ = connection_shutdown_rx => {\n                                            conn.as_mut().graceful_shutdown();\n                                            conn.await.unwrap();\n                                        }\n                                    }\n                                }\n                            });\n\n                            handles.push(handle);\n                        }\n                    }\n                }\n\n                futures_util::future::join_all(handles).await;\n                server_terminated_tx.send(()).unwrap();\n            });\n        });\n\n        Self {\n            addr,\n            shutdown_tx: Some(shutdown_tx),\n            server_terminated_rx,\n        }\n    }\n\n    pub async fn shutdown(mut self) {\n        if let Some(tx) = self.shutdown_tx.take() {\n            let _ = tx.send(());\n        }\n\n        self.server_terminated_rx.await.unwrap();\n    }\n\n    pub fn addr(&self) -> net::SocketAddr {\n        self.addr\n    }\n}\n"
  },
  {
    "path": "tests/support/error.rs",
    "content": "use std::error::Error as StdError;\n\n#[allow(unused)]\npub fn inspect<E>(err: E) -> Vec<String>\nwhere\n    E: Into<Box<dyn StdError + Send + Sync>>,\n{\n    let berr = err.into();\n    let mut err = Some(&*berr as &(dyn StdError + 'static));\n    let mut errs = Vec::new();\n    while let Some(e) = err {\n        errs.push(e.to_string());\n        err = e.source();\n    }\n    errs\n}\n"
  },
  {
    "path": "tests/support/layer.rs",
    "content": "use std::{\n    future::Future,\n    pin::Pin,\n    task::{Context, Poll},\n    time::Duration,\n};\n\nuse futures::future::BoxFuture;\nuse pin_project_lite::pin_project;\nuse tokio::time::Sleep;\nuse tower::{BoxError, Layer, Service};\n\n/// This tower layer injects an arbitrary delay before calling downstream layers.\n#[derive(Clone)]\npub struct DelayLayer {\n    delay: Duration,\n}\n\nimpl DelayLayer {\n    #[allow(unused)]\n    pub const fn new(delay: Duration) -> Self {\n        DelayLayer { delay }\n    }\n}\n\nimpl<S> Layer<S> for DelayLayer {\n    type Service = Delay<S>;\n    fn layer(&self, service: S) -> Self::Service {\n        Delay::new(service, self.delay)\n    }\n}\n\nimpl std::fmt::Debug for DelayLayer {\n    fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {\n        f.debug_struct(\"DelayLayer\")\n            .field(\"delay\", &self.delay)\n            .finish()\n    }\n}\n\n/// This tower service injects an arbitrary delay before calling downstream layers.\n#[derive(Debug, Clone)]\npub struct Delay<S> {\n    inner: S,\n    delay: Duration,\n}\n\nimpl<S> Delay<S> {\n    pub fn new(inner: S, delay: Duration) -> Self {\n        Delay { inner, delay }\n    }\n}\n\nimpl<S, Request> Service<Request> for Delay<S>\nwhere\n    S: Service<Request>,\n    S::Error: Into<BoxError>,\n{\n    type Response = S::Response;\n\n    type Error = BoxError;\n\n    type Future = ResponseFuture<S::Future>;\n\n    fn poll_ready(\n        &mut self,\n        cx: &mut std::task::Context<'_>,\n    ) -> std::task::Poll<Result<(), Self::Error>> {\n        println!(\"Delay::poll_ready called\");\n        match self.inner.poll_ready(cx) {\n            Poll::Pending => Poll::Pending,\n            Poll::Ready(r) => Poll::Ready(r.map_err(Into::into)),\n        }\n    }\n\n    fn call(&mut self, req: Request) -> Self::Future {\n        println!(\"Delay::call executed\");\n        let response = self.inner.call(req);\n        let sleep = tokio::time::sleep(self.delay);\n\n        ResponseFuture::new(response, sleep)\n    }\n}\n\n// `Delay` response future\npin_project! {\n    #[derive(Debug)]\n    pub struct ResponseFuture<S> {\n        #[pin]\n        response: S,\n        #[pin]\n        sleep: Sleep,\n    }\n}\n\nimpl<S> ResponseFuture<S> {\n    pub(crate) fn new(response: S, sleep: Sleep) -> Self {\n        ResponseFuture { response, sleep }\n    }\n}\n\nimpl<F, S, E> Future for ResponseFuture<F>\nwhere\n    F: Future<Output = Result<S, E>>,\n    E: Into<BoxError>,\n{\n    type Output = Result<S, BoxError>;\n\n    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {\n        let this = self.project();\n\n        // First poll the sleep until complete\n        match this.sleep.poll(cx) {\n            Poll::Pending => return Poll::Pending,\n            Poll::Ready(_) => {}\n        }\n\n        // Then poll the inner future\n        match this.response.poll(cx) {\n            Poll::Ready(v) => Poll::Ready(v.map_err(Into::into)),\n            Poll::Pending => Poll::Pending,\n        }\n    }\n}\n\n#[derive(Clone)]\npub struct SharedConcurrencyLimitLayer {\n    semaphore: std::sync::Arc<tokio::sync::Semaphore>,\n}\n\nimpl SharedConcurrencyLimitLayer {\n    #[allow(unused)]\n    pub fn new(limit: usize) -> Self {\n        Self {\n            semaphore: std::sync::Arc::new(tokio::sync::Semaphore::new(limit)),\n        }\n    }\n}\n\nimpl<S> tower::Layer<S> for SharedConcurrencyLimitLayer {\n    type Service = SharedConcurrencyLimit<S>;\n\n    fn layer(&self, inner: S) -> Self::Service {\n        SharedConcurrencyLimit {\n            inner,\n            semaphore: self.semaphore.clone(),\n        }\n    }\n}\n\n#[derive(Clone)]\npub struct SharedConcurrencyLimit<S> {\n    inner: S,\n    semaphore: std::sync::Arc<tokio::sync::Semaphore>,\n}\n\nimpl<S, Req> tower::Service<Req> for SharedConcurrencyLimit<S>\nwhere\n    S: tower::Service<Req> + Clone + Send + 'static,\n    S::Future: Send + 'static,\n    Req: Send + 'static,\n{\n    type Response = S::Response;\n    type Error = S::Error;\n    type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;\n\n    fn poll_ready(\n        &mut self,\n        _cx: &mut std::task::Context<'_>,\n    ) -> std::task::Poll<Result<(), Self::Error>> {\n        // always ready, we handle limits in call\n        std::task::Poll::Ready(Ok(()))\n    }\n\n    fn call(&mut self, req: Req) -> Self::Future {\n        let semaphore = self.semaphore.clone();\n        let mut inner = self.inner.clone();\n\n        Box::pin(async move {\n            let _permit = semaphore.acquire_owned().await.unwrap();\n            inner.call(req).await\n        })\n    }\n}\n"
  },
  {
    "path": "tests/support/mod.rs",
    "content": "pub mod delay_server;\npub mod error;\npub mod layer;\npub mod server;\n\n// TODO: remove once done converting to new support server?\n#[allow(unused)]\npub static DEFAULT_USER_AGENT: &str =\n    concat!(env!(\"CARGO_PKG_NAME\"), \"/\", env!(\"CARGO_PKG_VERSION\"));\n"
  },
  {
    "path": "tests/support/server.rs",
    "content": "use std::{\n    convert::Infallible, future::Future, io, net, sync::mpsc as std_mpsc, thread, time::Duration,\n};\n\nuse tokio::{io::AsyncReadExt, net::TcpStream, runtime, sync::oneshot};\nuse wreq::Body;\n\npub struct Server {\n    addr: net::SocketAddr,\n    panic_rx: std_mpsc::Receiver<()>,\n    events_rx: std_mpsc::Receiver<Event>,\n    shutdown_tx: Option<oneshot::Sender<()>>,\n}\n\n#[non_exhaustive]\npub enum Event {\n    ConnectionClosed,\n}\n\nimpl Server {\n    pub fn addr(&self) -> net::SocketAddr {\n        self.addr\n    }\n\n    #[allow(unused)]\n    pub fn events(&mut self) -> Vec<Event> {\n        let mut events = Vec::new();\n        while let Ok(event) = self.events_rx.try_recv() {\n            events.push(event);\n        }\n        events\n    }\n}\n\nimpl Drop for Server {\n    fn drop(&mut self) {\n        if let Some(tx) = self.shutdown_tx.take() {\n            let _ = tx.send(());\n        }\n\n        if !::std::thread::panicking() {\n            self.panic_rx\n                .recv_timeout(Duration::from_secs(3))\n                .expect(\"test server should not panic\");\n        }\n    }\n}\n\n#[allow(unused)]\npub fn http<F, Fut>(func: F) -> Server\nwhere\n    F: Fn(http::Request<hyper::body::Incoming>) -> Fut + Clone + Send + 'static,\n    Fut: Future<Output = http::Response<Body>> + Send + 'static,\n{\n    let infall = move |req| {\n        let fut = func(req);\n        async move { Ok::<_, Infallible>(fut.await) }\n    };\n    http_with_config(infall, |_builder| {})\n}\n\ntype Builder = hyper_util::server::conn::auto::Builder<hyper_util::rt::TokioExecutor>;\n\npub fn http_with_config<F1, Fut, E, F2, Bu>(func: F1, apply_config: F2) -> Server\nwhere\n    F1: Fn(http::Request<hyper::body::Incoming>) -> Fut + Clone + Send + 'static,\n    Fut: Future<Output = Result<http::Response<Body>, E>> + Send + 'static,\n    E: Into<Box<dyn std::error::Error + Send + Sync>>,\n    F2: FnOnce(&mut Builder) -> Bu + Send + 'static,\n{\n    // Spawn new runtime in thread to prevent reactor execution context conflict\n    let test_name = thread::current().name().unwrap_or(\"<unknown>\").to_string();\n    thread::spawn(move || {\n        let rt = runtime::Builder::new_current_thread()\n            .enable_all()\n            .build()\n            .expect(\"new rt\");\n        let listener = rt.block_on(async move {\n            tokio::net::TcpListener::bind(&std::net::SocketAddr::from(([127, 0, 0, 1], 0)))\n                .await\n                .unwrap()\n        });\n        let addr = listener.local_addr().unwrap();\n\n        let (shutdown_tx, mut shutdown_rx) = oneshot::channel();\n        let (panic_tx, panic_rx) = std_mpsc::channel();\n        let (events_tx, events_rx) = std_mpsc::channel();\n        let tname = format!(\n            \"test({test_name})-support-server\",\n        );\n        thread::Builder::new()\n            .name(tname)\n            .spawn(move || {\n                rt.block_on(async move {\n                    let mut builder =\n                        hyper_util::server::conn::auto::Builder::new(hyper_util::rt::TokioExecutor::new());\n                    apply_config(&mut builder);\n\n                    loop {\n                        tokio::select! {\n                            _ = &mut shutdown_rx => {\n                                break;\n                            }\n                            accepted = listener.accept() => {\n                                let (io, _) = accepted.expect(\"accepted\");\n                                let func = func.clone();\n                                let svc = hyper::service::service_fn(func);\n                                let builder = builder.clone();\n                                let events_tx = events_tx.clone();\n                                tokio::spawn(async move {\n                                    let _ = builder.serve_connection_with_upgrades(hyper_util::rt::TokioIo::new(io), svc).await;\n                                    let _ = events_tx.send(Event::ConnectionClosed);\n                                });\n                            }\n                        }\n                    }\n                    let _ = panic_tx.send(());\n                });\n            })\n            .expect(\"thread spawn\");\n        Server {\n            addr,\n            panic_rx,\n            events_rx,\n            shutdown_tx: Some(shutdown_tx),\n        }\n    })\n    .join()\n    .unwrap()\n}\n\n#[allow(unused)]\npub fn low_level_with_response<F>(do_response: F) -> Server\nwhere\n    for<'c> F: Fn(&'c [u8], &'c mut TcpStream) -> Box<dyn Future<Output = ()> + Send + 'c>\n        + Clone\n        + Send\n        + 'static,\n{\n    // Spawn new runtime in thread to prevent reactor execution context conflict\n    let test_name = thread::current().name().unwrap_or(\"<unknown>\").to_string();\n    thread::spawn(move || {\n        let rt = runtime::Builder::new_current_thread()\n            .enable_all()\n            .build()\n            .expect(\"new rt\");\n        let listener = rt.block_on(async move {\n            tokio::net::TcpListener::bind(&std::net::SocketAddr::from(([127, 0, 0, 1], 0)))\n                .await\n                .unwrap()\n        });\n        let addr = listener.local_addr().unwrap();\n\n        let (shutdown_tx, mut shutdown_rx) = oneshot::channel();\n        let (panic_tx, panic_rx) = std_mpsc::channel();\n        let (events_tx, events_rx) = std_mpsc::channel();\n        let tname = format!(\"test({test_name})-support-server\",);\n        thread::Builder::new()\n            .name(tname)\n            .spawn(move || {\n                rt.block_on(async move {\n                    loop {\n                        tokio::select! {\n                            _ = &mut shutdown_rx => {\n                                break;\n                            }\n                            accepted = listener.accept() => {\n                                let (io, _) = accepted.expect(\"accepted\");\n                                let do_response = do_response.clone();\n                                let events_tx = events_tx.clone();\n                                tokio::spawn(async move {\n                                    low_level_server_client(io, do_response).await;\n                                    let _ = events_tx.send(Event::ConnectionClosed);\n                                });\n                            }\n                        }\n                    }\n                    let _ = panic_tx.send(());\n                });\n            })\n            .expect(\"thread spawn\");\n        Server {\n            addr,\n            panic_rx,\n            events_rx,\n            shutdown_tx: Some(shutdown_tx),\n        }\n    })\n    .join()\n    .unwrap()\n}\n\n#[allow(unused)]\nasync fn low_level_server_client<F>(mut client_socket: TcpStream, do_response: F)\nwhere\n    for<'c> F: Fn(&'c [u8], &'c mut TcpStream) -> Box<dyn Future<Output = ()> + Send + 'c>,\n{\n    loop {\n        let request = low_level_read_http_request(&mut client_socket)\n            .await\n            .expect(\"read_http_request failed\");\n        if request.is_empty() {\n            // connection closed by client\n            break;\n        }\n\n        Box::into_pin(do_response(&request, &mut client_socket)).await;\n    }\n}\n\n#[allow(unused)]\nasync fn low_level_read_http_request(client_socket: &mut TcpStream) -> io::Result<Vec<u8>> {\n    let mut buf = Vec::new();\n\n    // Read until the delimiter \"\\r\\n\\r\\n\" is found\n    loop {\n        let mut temp_buffer = [0; 1024];\n        let n = client_socket.read(&mut temp_buffer).await?;\n\n        if n == 0 {\n            break;\n        }\n\n        buf.extend_from_slice(&temp_buffer[..n]);\n\n        if let Some(pos) = buf.windows(4).position(|window| window == b\"\\r\\n\\r\\n\") {\n            return Ok(buf.drain(..pos + 4).collect());\n        }\n    }\n\n    Ok(buf)\n}\n"
  },
  {
    "path": "tests/timeouts.rs",
    "content": "mod support;\nuse std::time::Duration;\n\nuse pretty_env_logger::env_logger;\nuse support::server;\nuse wreq::Client;\n\n#[tokio::test]\nasync fn client_timeout() {\n    let _ = env_logger::try_init();\n\n    let server = server::http(move |_req| {\n        async {\n            // delay returning the response\n            tokio::time::sleep(Duration::from_millis(300)).await;\n            http::Response::default()\n        }\n    });\n\n    let client = Client::builder()\n        .timeout(Duration::from_millis(100))\n        .no_proxy()\n        .build()\n        .unwrap();\n\n    let url = format!(\"http://{}/slow\", server.addr());\n    let err = client.get(&url).send().await.unwrap_err();\n\n    assert!(err.is_timeout());\n    assert_eq!(err.uri().map(|u| u.to_string()), Some(url));\n}\n\n#[tokio::test]\nasync fn request_timeout() {\n    let _ = env_logger::try_init();\n\n    let server = server::http(move |_req| {\n        async {\n            // delay returning the response\n            tokio::time::sleep(Duration::from_millis(300)).await;\n            http::Response::default()\n        }\n    });\n\n    let client = Client::builder().no_proxy().build().unwrap();\n\n    let url = format!(\"http://{}/slow\", server.addr());\n\n    let err = client\n        .get(&url)\n        .timeout(Duration::from_millis(100))\n        .send()\n        .await\n        .unwrap_err();\n\n    assert!(err.is_timeout() && !err.is_connect());\n    assert_eq!(err.uri().map(|u| u.to_string()), Some(url));\n}\n\n#[tokio::test]\nasync fn connect_timeout() {\n    let _ = env_logger::try_init();\n\n    let client = Client::builder()\n        .connect_timeout(Duration::from_millis(100))\n        .no_proxy()\n        .build()\n        .unwrap();\n\n    let url = \"http://192.0.2.1:81/slow\";\n\n    let err = client\n        .get(url)\n        .timeout(Duration::from_millis(1000))\n        .send()\n        .await\n        .unwrap_err();\n\n    assert!(err.is_timeout());\n}\n\n#[tokio::test]\nasync fn connect_many_timeout_succeeds() {\n    let _ = env_logger::try_init();\n\n    let server = server::http(move |_req| async { http::Response::default() });\n    let port = server.addr().port();\n\n    let client = Client::builder()\n        .resolve_to_addrs(\n            \"many_addrs\",\n            [\"127.0.0.1:81\".parse().unwrap(), server.addr()],\n        )\n        .connect_timeout(Duration::from_millis(100))\n        .no_proxy()\n        .build()\n        .unwrap();\n\n    let url = format!(\"http://many_addrs:{port}/eventual\");\n\n    let _ = client\n        .get(url)\n        .timeout(Duration::from_millis(1000))\n        .send()\n        .await\n        .unwrap();\n}\n\n#[tokio::test]\nasync fn connect_many_timeout() {\n    let _ = env_logger::try_init();\n\n    let client = Client::builder()\n        .resolve_to_addrs(\n            \"many_addrs\",\n            [\n                \"192.0.2.1:81\".parse().unwrap(),\n                \"192.0.2.2:81\".parse().unwrap(),\n            ],\n        )\n        .connect_timeout(Duration::from_millis(100))\n        .no_proxy()\n        .build()\n        .unwrap();\n\n    let url = \"http://many_addrs:81/slow\".to_string();\n\n    let err = client\n        .get(url)\n        .timeout(Duration::from_millis(1000))\n        .send()\n        .await\n        .unwrap_err();\n\n    assert!(err.is_connect() && err.is_timeout());\n}\n\n#[cfg(feature = \"stream\")]\n#[tokio::test]\nasync fn response_timeout() {\n    let _ = env_logger::try_init();\n\n    let server = server::http(move |_req| {\n        async {\n            // immediate response, but delayed body\n            let body = wreq::Body::wrap_stream(futures_util::stream::once(async {\n                tokio::time::sleep(Duration::from_secs(1)).await;\n                Ok::<_, std::convert::Infallible>(\"Hello\")\n            }));\n\n            http::Response::new(body)\n        }\n    });\n\n    let client = Client::builder()\n        .timeout(Duration::from_millis(500))\n        .no_proxy()\n        .build()\n        .unwrap();\n\n    let url = format!(\"http://{}/slow\", server.addr());\n    let res = client.get(&url).send().await.expect(\"Failed to get\");\n    let err = res.text().await.unwrap_err();\n\n    assert!(err.is_timeout());\n}\n\n#[tokio::test]\nasync fn read_timeout_applies_to_headers() {\n    let _ = env_logger::try_init();\n\n    let server = server::http(move |_req| {\n        async {\n            // delay returning the response\n            tokio::time::sleep(Duration::from_millis(300)).await;\n            http::Response::default()\n        }\n    });\n\n    let client = Client::builder()\n        .read_timeout(Duration::from_millis(100))\n        .no_proxy()\n        .build()\n        .unwrap();\n\n    let url = format!(\"http://{}/slow\", server.addr());\n\n    let err = client.get(&url).send().await.unwrap_err();\n\n    assert!(err.is_timeout());\n    assert_eq!(err.uri().map(|u| u.to_string()), Some(url));\n}\n\n#[cfg(feature = \"stream\")]\n#[tokio::test]\nasync fn read_timeout_applies_to_body() {\n    let _ = env_logger::try_init();\n\n    let server = server::http(move |_req| {\n        async {\n            // immediate response, but delayed body\n            let body = wreq::Body::wrap_stream(futures_util::stream::once(async {\n                tokio::time::sleep(Duration::from_millis(300)).await;\n                Ok::<_, std::convert::Infallible>(\"Hello\")\n            }));\n\n            http::Response::new(body)\n        }\n    });\n\n    let client = Client::builder()\n        .read_timeout(Duration::from_millis(100))\n        .no_proxy()\n        .build()\n        .unwrap();\n\n    let url = format!(\"http://{}/slow\", server.addr());\n    let res = client.get(&url).send().await.expect(\"Failed to get\");\n    let err = res.text().await.unwrap_err();\n\n    assert!(err.is_timeout());\n}\n\n#[cfg(feature = \"stream\")]\n#[tokio::test]\nasync fn read_timeout_allows_slow_response_body() {\n    let _ = env_logger::try_init();\n\n    let server = server::http(move |_req| {\n        async {\n            // immediate response, but body that has slow chunks\n            let slow = futures_util::stream::unfold(0, |state| async move {\n                if state < 3 {\n                    tokio::time::sleep(Duration::from_millis(100)).await;\n                    Some((\n                        Ok::<_, std::convert::Infallible>(state.to_string()),\n                        state + 1,\n                    ))\n                } else {\n                    None\n                }\n            });\n            let body = wreq::Body::wrap_stream(slow);\n\n            http::Response::new(body)\n        }\n    });\n\n    let client = Client::builder()\n        .read_timeout(Duration::from_millis(200))\n        //.timeout(Duration::from_millis(200))\n        .no_proxy()\n        .build()\n        .unwrap();\n\n    let url = format!(\"http://{}/slow\", server.addr());\n    let res = client.get(&url).send().await.expect(\"Failed to get\");\n    let body = res.text().await.expect(\"body text\");\n\n    assert_eq!(body, \"012\");\n}\n\n#[tokio::test]\nasync fn response_body_timeout_forwards_size_hint() {\n    let _ = env_logger::try_init();\n\n    let server = server::http(move |_req| async { http::Response::new(b\"hello\".to_vec().into()) });\n\n    let client = Client::builder().no_proxy().build().unwrap();\n\n    let url = format!(\"http://{}/slow\", server.addr());\n\n    let res = client\n        .get(&url)\n        .timeout(Duration::from_secs(1))\n        .send()\n        .await\n        .expect(\"response\");\n\n    assert_eq!(res.content_length(), Some(5));\n}\n"
  },
  {
    "path": "tests/unix_socket.rs",
    "content": "#![cfg(unix)]\n\nuse std::{hash::BuildHasher, time::Duration};\n\nuse http::Method;\nuse http_body_util::Full;\nuse hyper::{Request, Response, body::Incoming, service::service_fn};\nuse hyper_util::{\n    rt::{TokioExecutor, TokioIo},\n    server::conn::auto::Builder,\n};\nuse tokio::{net::UnixListener, task};\nuse wreq::{Client, Proxy};\n\nfn random_sock_path() -> std::path::PathBuf {\n    let mut buf = std::env::temp_dir();\n    // libstd uses system random to create each one\n    let rng = std::collections::hash_map::RandomState::new();\n    let n = rng.hash_one(\"uds-sock\");\n    buf.push(format!(\"test-uds-sock-{}\", n));\n    buf\n}\n\n#[tokio::test]\nasync fn test_unix_socket() {\n    let sock_path = random_sock_path();\n\n    let listener = UnixListener::bind(&sock_path).unwrap();\n    let server = async move {\n        loop {\n            let (stream, _) = listener.accept().await.unwrap();\n            let io = TokioIo::new(stream);\n            let service = service_fn(|_req: Request<Incoming>| async {\n                Ok::<_, hyper::Error>(Response::new(Full::new(&b\"hello unix\"[..])))\n            });\n            task::spawn(async move {\n                if let Err(e) = hyper::server::conn::http1::Builder::new()\n                    .serve_connection(io, service)\n                    .await\n                {\n                    eprintln!(\"server error: {:?}\", e);\n                }\n            });\n        }\n    };\n    tokio::spawn(server);\n\n    let client = Client::builder()\n        .proxy(Proxy::unix(sock_path).unwrap())\n        .timeout(Duration::from_secs(10))\n        .build()\n        .unwrap();\n\n    let resp = client.get(\"http://localhost/\").send().await.unwrap();\n    let body = resp.text().await.unwrap();\n    assert_eq!(body, \"hello unix\");\n}\n\n#[tokio::test]\nasync fn test_proxy_unix_socket() {\n    let sock_path = random_sock_path();\n\n    let listener = UnixListener::bind(&sock_path).unwrap();\n    let server = async move {\n        loop {\n            let (stream, _) = listener.accept().await.unwrap();\n            let io = TokioIo::new(stream);\n            let service = service_fn(|req: Request<Incoming>| {\n                async move {\n                    if Method::CONNECT == req.method() {\n                        // Received an HTTP request like:\n                        // ```\n                        // CONNECT www.domain.com:443 HTTP/1.1\n                        // Host: www.domain.com:443\n                        // Proxy-Connection: Keep-Alive\n                        // ```\n                        //\n                        // When HTTP method is CONNECT we should return an empty body,\n                        // then we can eventually upgrade the connection and talk a new protocol.\n                        //\n                        // Note: only after client received an empty body with STATUS_OK can the\n                        // connection be upgraded, so we can't return a response inside\n                        // `on_upgrade` future.\n                        let authority = req.uri().authority().cloned().unwrap();\n                        tokio::task::spawn({\n                            let req = req;\n                            async move {\n                                match hyper::upgrade::on(req).await {\n                                    Ok(upgraded) => {\n                                        tracing::info!(\"upgraded connection to: {}\", authority);\n                                        if let Ok(mut io) =\n                                            tokio::net::TcpStream::connect(authority.to_string())\n                                                .await\n                                        {\n                                            let _ = tokio::io::copy_bidirectional(\n                                                &mut TokioIo::new(upgraded),\n                                                &mut io,\n                                            )\n                                            .await;\n                                        }\n                                    }\n                                    Err(e) => tracing::warn!(\"upgrade error: {}\", e),\n                                }\n                            }\n                        });\n\n                        Ok::<_, hyper::Error>(Response::new(Full::new(&b\"\"[..])))\n                    } else {\n                        Ok::<_, hyper::Error>(Response::new(Full::new(\n                            &b\"unsupported request method\"[..],\n                        )))\n                    }\n                }\n            });\n            task::spawn(async move {\n                if let Err(e) = Builder::new(TokioExecutor::new())\n                    .serve_connection_with_upgrades(io, service)\n                    .await\n                {\n                    eprintln!(\"server error: {:?}\", e);\n                }\n            });\n        }\n    };\n    tokio::spawn(server);\n\n    let client = Client::builder()\n        .proxy(Proxy::unix(sock_path).unwrap())\n        .timeout(Duration::from_secs(10))\n        .build()\n        .unwrap();\n\n    let resp = client.get(\"https://www.google.com\").send().await.unwrap();\n    assert!(resp.status().is_success(), \"Expected successful response\");\n}\n"
  },
  {
    "path": "tests/upgrade.rs",
    "content": "mod support;\nuse http::Method;\nuse support::server;\nuse tokio::io::{AsyncReadExt, AsyncWriteExt};\nuse wreq::Client;\n\n#[tokio::test]\nasync fn http_upgrade() {\n    let server = server::http(move |req| {\n        assert_eq!(req.method(), \"GET\");\n        assert_eq!(req.headers()[\"connection\"], \"upgrade\");\n        assert_eq!(req.headers()[\"upgrade\"], \"foobar\");\n\n        tokio::spawn(async move {\n            let mut upgraded = hyper_util::rt::TokioIo::new(hyper::upgrade::on(req).await.unwrap());\n\n            let mut buf = vec![0; 7];\n            upgraded.read_exact(&mut buf).await.unwrap();\n            assert_eq!(buf, b\"foo=bar\");\n\n            upgraded.write_all(b\"bar=foo\").await.unwrap();\n        });\n\n        async {\n            http::Response::builder()\n                .status(http::StatusCode::SWITCHING_PROTOCOLS)\n                .header(http::header::CONNECTION, \"upgrade\")\n                .header(http::header::UPGRADE, \"foobar\")\n                .body(wreq::Body::default())\n                .unwrap()\n        }\n    });\n\n    let res = Client::builder()\n        .build()\n        .unwrap()\n        .get(format!(\"http://{}\", server.addr()))\n        .header(http::header::CONNECTION, \"upgrade\")\n        .header(http::header::UPGRADE, \"foobar\")\n        .send()\n        .await\n        .unwrap();\n\n    assert_eq!(res.status(), http::StatusCode::SWITCHING_PROTOCOLS);\n    let mut upgraded = res.upgrade().await.unwrap();\n\n    upgraded.write_all(b\"foo=bar\").await.unwrap();\n\n    let mut buf = vec![];\n    upgraded.read_to_end(&mut buf).await.unwrap();\n    assert_eq!(buf, b\"bar=foo\");\n}\n\n#[tokio::test]\nasync fn http2_upgrade() {\n    let server = server::http_with_config(\n        move |req| {\n            assert_eq!(req.method(), http::Method::CONNECT);\n            assert_eq!(req.version(), http::Version::HTTP_2);\n\n            tokio::spawn(async move {\n                let mut upgraded =\n                    hyper_util::rt::TokioIo::new(hyper::upgrade::on(req).await.unwrap());\n\n                let mut buf = vec![0; 7];\n                upgraded.read_exact(&mut buf).await.unwrap();\n                assert_eq!(buf, b\"foo=bar\");\n\n                upgraded.write_all(b\"bar=foo\").await.unwrap();\n            });\n\n            async { Ok::<_, std::convert::Infallible>(http::Response::default()) }\n        },\n        |builder| {\n            let mut http2 = builder.http2();\n            http2.enable_connect_protocol();\n        },\n    );\n\n    let res = Client::builder()\n        .http2_only()\n        .build()\n        .unwrap()\n        .request(Method::CONNECT, format!(\"http://{}\", server.addr()))\n        .send()\n        .await\n        .unwrap();\n\n    assert_eq!(res.status(), http::StatusCode::OK);\n    assert_eq!(res.version(), http::Version::HTTP_2);\n    let mut upgraded = res.upgrade().await.unwrap();\n\n    upgraded.write_all(b\"foo=bar\").await.unwrap();\n\n    let mut buf = vec![];\n    upgraded.read_to_end(&mut buf).await.unwrap();\n    assert_eq!(buf, b\"bar=foo\");\n}\n"
  },
  {
    "path": "tests/zstd.rs",
    "content": "mod support;\nuse support::server;\nuse tokio::io::AsyncWriteExt;\nuse wreq::Client;\n\n#[tokio::test]\nasync fn zstd_response() {\n    zstd_case(10_000, 4096).await;\n}\n\n#[tokio::test]\nasync fn zstd_single_byte_chunks() {\n    zstd_case(10, 1).await;\n}\n\n#[tokio::test]\nasync fn test_zstd_empty_body() {\n    let server = server::http(move |req| async move {\n        assert_eq!(req.method(), \"HEAD\");\n\n        http::Response::builder()\n            .header(\"content-encoding\", \"zstd\")\n            .body(Default::default())\n            .unwrap()\n    });\n\n    let res = wreq::head(format!(\"http://{}/zstd\", server.addr()))\n        .send()\n        .await\n        .unwrap();\n\n    let body = res.text().await.unwrap();\n\n    assert_eq!(body, \"\");\n}\n\n#[tokio::test]\nasync fn test_accept_header_is_not_changed_if_set() {\n    let server = server::http(move |req| async move {\n        assert_eq!(req.headers()[\"accept\"], \"application/json\");\n        assert!(\n            req.headers()[\"accept-encoding\"]\n                .to_str()\n                .unwrap()\n                .contains(\"zstd\")\n        );\n        http::Response::default()\n    });\n\n    let res = wreq::get(format!(\"http://{}/accept\", server.addr()))\n        .header(\n            wreq::header::ACCEPT,\n            wreq::header::HeaderValue::from_static(\"application/json\"),\n        )\n        .send()\n        .await\n        .unwrap();\n\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n}\n\n#[tokio::test]\nasync fn test_accept_encoding_header_is_not_changed_if_set() {\n    let server = server::http(move |req| async move {\n        assert_eq!(req.headers()[\"accept\"], \"*/*\");\n        assert_eq!(req.headers()[\"accept-encoding\"], \"identity\");\n        http::Response::default()\n    });\n\n    let res = wreq::get(format!(\"http://{}/accept-encoding\", server.addr()))\n        .header(wreq::header::ACCEPT, \"*/*\")\n        .header(\n            wreq::header::ACCEPT_ENCODING,\n            wreq::header::HeaderValue::from_static(\"identity\"),\n        )\n        .send()\n        .await\n        .unwrap();\n\n    assert_eq!(res.status(), wreq::StatusCode::OK);\n}\n\nasync fn zstd_case(response_size: usize, chunk_size: usize) {\n    use futures_util::stream::StreamExt;\n\n    let content: String = (0..response_size).fold(String::new(), |mut acc, i| {\n        acc.push_str(&format!(\"test {i}\"));\n        acc\n    });\n\n    let zstded_content = zstd::encode_all(content.as_bytes(), 3).unwrap();\n\n    let mut response = format!(\n        \"\\\n         HTTP/1.1 200 OK\\r\\n\\\n         Server: test-accept\\r\\n\\\n         Content-Encoding: zstd\\r\\n\\\n         Content-Length: {}\\r\\n\\\n         \\r\\n\",\n        &zstded_content.len()\n    )\n    .into_bytes();\n    response.extend(&zstded_content);\n\n    let server = server::http(move |req| {\n        assert!(\n            req.headers()[\"accept-encoding\"]\n                .to_str()\n                .unwrap()\n                .contains(\"zstd\")\n        );\n\n        let zstded = zstded_content.clone();\n        async move {\n            let len = zstded.len();\n            let stream =\n                futures_util::stream::unfold((zstded, 0), move |(zstded, pos)| async move {\n                    let chunk = zstded.chunks(chunk_size).nth(pos)?.to_vec();\n\n                    Some((chunk, (zstded, pos + 1)))\n                });\n\n            let body = wreq::Body::wrap_stream(stream.map(Ok::<_, std::convert::Infallible>));\n\n            http::Response::builder()\n                .header(\"content-encoding\", \"zstd\")\n                .header(\"content-length\", len)\n                .body(body)\n                .unwrap()\n        }\n    });\n\n    let res = wreq::get(format!(\"http://{}/zstd\", server.addr()))\n        .send()\n        .await\n        .expect(\"response\");\n\n    let body = res.text().await.expect(\"text\");\n    assert_eq!(body, content);\n}\n\nconst COMPRESSED_RESPONSE_HEADERS: &[u8] = b\"HTTP/1.1 200 OK\\x0d\\x0a\\\n            Content-Type: text/plain\\x0d\\x0a\\\n            Connection: keep-alive\\x0d\\x0a\\\n            Content-Encoding: zstd\\x0d\\x0a\";\n\nconst RESPONSE_CONTENT: &str = \"some message here\";\n\nfn zstd_compress(input: &[u8]) -> Vec<u8> {\n    zstd::encode_all(input, 3).unwrap()\n}\n\n#[tokio::test]\nasync fn test_non_chunked_non_fragmented_response() {\n    let server = server::low_level_with_response(|_raw_request, client_socket| {\n        Box::new(async move {\n            let zstded_content = zstd_compress(RESPONSE_CONTENT.as_bytes());\n            let content_length_header =\n                format!(\"Content-Length: {}\\r\\n\\r\\n\", zstded_content.len()).into_bytes();\n            let response = [\n                COMPRESSED_RESPONSE_HEADERS,\n                &content_length_header,\n                &zstded_content,\n            ]\n            .concat();\n\n            client_socket\n                .write_all(response.as_slice())\n                .await\n                .expect(\"response write_all failed\");\n            client_socket.flush().await.expect(\"response flush failed\");\n        })\n    });\n\n    let res = wreq::get(format!(\"http://{}/\", server.addr()))\n        .send()\n        .await\n        .expect(\"response\");\n\n    assert_eq!(res.text().await.expect(\"text\"), RESPONSE_CONTENT);\n}\n\n#[tokio::test]\nasync fn test_chunked_fragmented_response_1() {\n    const DELAY_BETWEEN_RESPONSE_PARTS: tokio::time::Duration =\n        tokio::time::Duration::from_millis(1000);\n    const DELAY_MARGIN: tokio::time::Duration = tokio::time::Duration::from_millis(50);\n\n    let server = server::low_level_with_response(|_raw_request, client_socket| {\n        Box::new(async move {\n            let zstded_content = zstd_compress(RESPONSE_CONTENT.as_bytes());\n            let response_first_part = [\n                COMPRESSED_RESPONSE_HEADERS,\n                format!(\n                    \"Transfer-Encoding: chunked\\r\\n\\r\\n{:x}\\r\\n\",\n                    zstded_content.len()\n                )\n                .as_bytes(),\n                &zstded_content,\n            ]\n            .concat();\n            let response_second_part = b\"\\r\\n0\\r\\n\\r\\n\";\n\n            client_socket\n                .write_all(response_first_part.as_slice())\n                .await\n                .expect(\"response_first_part write_all failed\");\n            client_socket\n                .flush()\n                .await\n                .expect(\"response_first_part flush failed\");\n\n            tokio::time::sleep(DELAY_BETWEEN_RESPONSE_PARTS).await;\n\n            client_socket\n                .write_all(response_second_part)\n                .await\n                .expect(\"response_second_part write_all failed\");\n            client_socket\n                .flush()\n                .await\n                .expect(\"response_second_part flush failed\");\n        })\n    });\n\n    let start = tokio::time::Instant::now();\n    let res = wreq::get(format!(\"http://{}/\", server.addr()))\n        .send()\n        .await\n        .expect(\"response\");\n\n    assert_eq!(res.text().await.expect(\"text\"), RESPONSE_CONTENT);\n    assert!(start.elapsed() >= DELAY_BETWEEN_RESPONSE_PARTS - DELAY_MARGIN);\n}\n\n#[tokio::test]\nasync fn test_chunked_fragmented_response_2() {\n    const DELAY_BETWEEN_RESPONSE_PARTS: tokio::time::Duration =\n        tokio::time::Duration::from_millis(1000);\n    const DELAY_MARGIN: tokio::time::Duration = tokio::time::Duration::from_millis(50);\n\n    let server = server::low_level_with_response(|_raw_request, client_socket| {\n        Box::new(async move {\n            let zstded_content = zstd_compress(RESPONSE_CONTENT.as_bytes());\n            let response_first_part = [\n                COMPRESSED_RESPONSE_HEADERS,\n                format!(\n                    \"Transfer-Encoding: chunked\\r\\n\\r\\n{:x}\\r\\n\",\n                    zstded_content.len()\n                )\n                .as_bytes(),\n                &zstded_content,\n                b\"\\r\\n\",\n            ]\n            .concat();\n            let response_second_part = b\"0\\r\\n\\r\\n\";\n\n            client_socket\n                .write_all(response_first_part.as_slice())\n                .await\n                .expect(\"response_first_part write_all failed\");\n            client_socket\n                .flush()\n                .await\n                .expect(\"response_first_part flush failed\");\n\n            tokio::time::sleep(DELAY_BETWEEN_RESPONSE_PARTS).await;\n\n            client_socket\n                .write_all(response_second_part)\n                .await\n                .expect(\"response_second_part write_all failed\");\n            client_socket\n                .flush()\n                .await\n                .expect(\"response_second_part flush failed\");\n        })\n    });\n\n    let start = tokio::time::Instant::now();\n    let res = wreq::get(format!(\"http://{}/\", server.addr()))\n        .send()\n        .await\n        .expect(\"response\");\n\n    assert_eq!(res.text().await.expect(\"text\"), RESPONSE_CONTENT);\n    assert!(start.elapsed() >= DELAY_BETWEEN_RESPONSE_PARTS - DELAY_MARGIN);\n}\n\n#[tokio::test]\nasync fn test_chunked_fragmented_response_with_extra_bytes() {\n    const DELAY_BETWEEN_RESPONSE_PARTS: tokio::time::Duration =\n        tokio::time::Duration::from_millis(1000);\n    const DELAY_MARGIN: tokio::time::Duration = tokio::time::Duration::from_millis(50);\n\n    let server = server::low_level_with_response(|_raw_request, client_socket| {\n        Box::new(async move {\n            let zstded_content = zstd_compress(RESPONSE_CONTENT.as_bytes());\n            let response_first_part = [\n                COMPRESSED_RESPONSE_HEADERS,\n                format!(\n                    \"Transfer-Encoding: chunked\\r\\n\\r\\n{:x}\\r\\n\",\n                    zstded_content.len()\n                )\n                .as_bytes(),\n                &zstded_content,\n            ]\n            .concat();\n            let response_second_part = b\"\\r\\n2ab\\r\\n0\\r\\n\\r\\n\";\n\n            client_socket\n                .write_all(response_first_part.as_slice())\n                .await\n                .expect(\"response_first_part write_all failed\");\n            client_socket\n                .flush()\n                .await\n                .expect(\"response_first_part flush failed\");\n\n            tokio::time::sleep(DELAY_BETWEEN_RESPONSE_PARTS).await;\n\n            client_socket\n                .write_all(response_second_part)\n                .await\n                .expect(\"response_second_part write_all failed\");\n            client_socket\n                .flush()\n                .await\n                .expect(\"response_second_part flush failed\");\n        })\n    });\n\n    let start = tokio::time::Instant::now();\n    let res = wreq::get(format!(\"http://{}/\", server.addr()))\n        .send()\n        .await\n        .expect(\"response\");\n\n    let err = res.text().await.expect_err(\"there must be an error\");\n    assert!(err.is_decode());\n    assert!(start.elapsed() >= DELAY_BETWEEN_RESPONSE_PARTS - DELAY_MARGIN);\n}\n\n// Big response can have multiple ZSTD frames in it\n#[tokio::test]\nasync fn test_non_chunked_non_fragmented_multiple_frames_response() {\n    let server = server::low_level_with_response(|_raw_request, client_socket| {\n        Box::new(async move {\n            // Split the content into two parts\n            let content_bytes = RESPONSE_CONTENT.as_bytes();\n            let mid = content_bytes.len() / 2;\n            // Compress each part separately to create multiple ZSTD frames\n            let compressed_part1 = zstd::encode_all(&content_bytes[0..mid], 3).unwrap();\n            let compressed_part2 = zstd::encode_all(&content_bytes[mid..], 3).unwrap();\n            // Concatenate the compressed frames\n            let mut zstded_content = compressed_part1;\n            zstded_content.extend_from_slice(&compressed_part2);\n            // Set Content-Length to the total length of the concatenated frames\n            let content_length_header =\n                format!(\"Content-Length: {}\\r\\n\\r\\n\", zstded_content.len()).into_bytes();\n            let response = [\n                COMPRESSED_RESPONSE_HEADERS,\n                &content_length_header,\n                &zstded_content,\n            ]\n            .concat();\n\n            client_socket\n                .write_all(response.as_slice())\n                .await\n                .expect(\"response write_all failed\");\n            client_socket.flush().await.expect(\"response flush failed\");\n        })\n    });\n\n    let res = wreq::get(format!(\"http://{}/\", server.addr()))\n        .send()\n        .await\n        .expect(\"response\");\n\n    assert_eq!(res.text().await.expect(\"text\"), RESPONSE_CONTENT);\n}\n\n#[tokio::test]\nasync fn test_chunked_fragmented_multiple_frames_in_one_chunk() {\n    // Define constants for delay and timing margin\n    const DELAY_BETWEEN_RESPONSE_PARTS: tokio::time::Duration =\n        tokio::time::Duration::from_millis(1000); // 1-second delay\n    const DELAY_MARGIN: tokio::time::Duration = tokio::time::Duration::from_millis(50); // Margin for timing assertions\n\n    // Set up a low-level server\n    let server = server::low_level_with_response(|_raw_request, client_socket| {\n        Box::new(async move {\n            // Split RESPONSE_CONTENT into two parts\n            let mid = RESPONSE_CONTENT.len() / 2;\n            let part1 = &RESPONSE_CONTENT[0..mid];\n            let part2 = &RESPONSE_CONTENT[mid..];\n\n            // Compress each part separately to create two ZSTD frames\n            let compressed_part1 = zstd_compress(part1.as_bytes());\n            let compressed_part2 = zstd_compress(part2.as_bytes());\n\n            // Concatenate the frames into a single chunk's data\n            let chunk_data = [compressed_part1.as_slice(), compressed_part2.as_slice()].concat();\n\n            // Calculate the chunk size in bytes\n            let chunk_size = chunk_data.len();\n\n            // Prepare the initial response part: headers + chunk size\n            let headers = [\n                COMPRESSED_RESPONSE_HEADERS, /* e.g., \"HTTP/1.1 200 OK\\r\\nContent-Encoding:\n                                              * zstd\\r\\n\" */\n                b\"Transfer-Encoding: chunked\\r\\n\\r\\n\", // Indicate chunked encoding\n                format!(\"{chunk_size:x}\\r\\n\").as_bytes(), // Chunk size in hex\n            ]\n            .concat();\n\n            // Send headers + chunk size + chunk data\n            client_socket\n                .write_all([headers.as_slice(), &chunk_data].concat().as_slice())\n                .await\n                .expect(\"write_all failed\");\n            client_socket.flush().await.expect(\"flush failed\");\n\n            // Introduce a delay to simulate fragmentation\n            tokio::time::sleep(DELAY_BETWEEN_RESPONSE_PARTS).await;\n\n            // Send chunk terminator + final chunk\n            client_socket\n                .write_all(b\"\\r\\n0\\r\\n\\r\\n\")\n                .await\n                .expect(\"write_all failed\");\n            client_socket.flush().await.expect(\"flush failed\");\n        })\n    });\n\n    // Record the start time for delay verification\n    let start = tokio::time::Instant::now();\n\n    let res = wreq::get(format!(\"http://{}/\", server.addr()))\n        .send()\n        .await\n        .expect(\"Failed to get response\");\n\n    // Verify the decompressed response matches the original content\n    assert_eq!(\n        res.text().await.expect(\"Failed to read text\"),\n        RESPONSE_CONTENT\n    );\n    assert!(start.elapsed() >= DELAY_BETWEEN_RESPONSE_PARTS - DELAY_MARGIN);\n}\n\n#[tokio::test]\nasync fn test_connection_reuse_with_chunked_fragmented_multiple_frames_in_one_chunk() {\n    // Define constants for delay and timing margin\n    const DELAY_BETWEEN_RESPONSE_PARTS: tokio::time::Duration =\n        tokio::time::Duration::from_millis(1000); // 1-second delay\n    const DELAY_MARGIN: tokio::time::Duration = tokio::time::Duration::from_millis(50); // Margin for timing assertions\n\n    // We will record the peer addresses of each client request here\n    let peer_addrs = std::sync::Arc::new(std::sync::Mutex::new(Vec::<std::net::SocketAddr>::new()));\n    let peer_addrs_clone = peer_addrs.clone();\n\n    // Set up a low-level server (it will reuse existing client connection, executing callback for\n    // each client request)\n    let server = server::low_level_with_response(move |_raw_request, client_socket| {\n        let peer_addrs = peer_addrs_clone.clone();\n        Box::new(async move {\n            // Split RESPONSE_CONTENT into two parts\n            let mid = RESPONSE_CONTENT.len() / 2;\n            let part1 = &RESPONSE_CONTENT[0..mid];\n            let part2 = &RESPONSE_CONTENT[mid..];\n\n            // Compress each part separately to create two ZSTD frames\n            let compressed_part1 = zstd_compress(part1.as_bytes());\n            let compressed_part2 = zstd_compress(part2.as_bytes());\n\n            // Concatenate the frames into a single chunk's data\n            let chunk_data = [compressed_part1.as_slice(), compressed_part2.as_slice()].concat();\n\n            // Calculate the chunk size in bytes\n            let chunk_size = chunk_data.len();\n\n            // Prepare the initial response part: headers + chunk size\n            let headers = [\n                COMPRESSED_RESPONSE_HEADERS, /* e.g., \"HTTP/1.1 200 OK\\r\\nContent-Encoding:\n                                              * zstd\\r\\n\" */\n                b\"Transfer-Encoding: chunked\\r\\n\\r\\n\", // Indicate chunked encoding\n                format!(\"{chunk_size:x}\\r\\n\").as_bytes(), // Chunk size in hex\n            ]\n            .concat();\n\n            // Send headers + chunk size + chunk data\n            client_socket\n                .write_all([headers.as_slice(), &chunk_data].concat().as_slice())\n                .await\n                .expect(\"write_all failed\");\n            client_socket.flush().await.expect(\"flush failed\");\n\n            // Introduce a delay to simulate fragmentation\n            tokio::time::sleep(DELAY_BETWEEN_RESPONSE_PARTS).await;\n\n            peer_addrs\n                .lock()\n                .unwrap()\n                .push(client_socket.peer_addr().unwrap());\n\n            // Send chunk terminator + final chunk\n            client_socket\n                .write_all(b\"\\r\\n0\\r\\n\\r\\n\")\n                .await\n                .expect(\"write_all failed\");\n            client_socket.flush().await.expect(\"flush failed\");\n        })\n    });\n\n    let client = Client::builder()\n        .pool_idle_timeout(std::time::Duration::from_secs(30))\n        .pool_max_idle_per_host(1)\n        .build()\n        .unwrap();\n\n    const NUMBER_OF_REQUESTS: usize = 5;\n\n    for _ in 0..NUMBER_OF_REQUESTS {\n        // Record the start time for delay verification\n        let start = tokio::time::Instant::now();\n\n        let res = client\n            .get(format!(\"http://{}/\", server.addr()))\n            .send()\n            .await\n            .expect(\"Failed to get response\");\n\n        // Verify the decompressed response matches the original content\n        assert_eq!(\n            res.text().await.expect(\"Failed to read text\"),\n            RESPONSE_CONTENT\n        );\n        assert!(start.elapsed() >= DELAY_BETWEEN_RESPONSE_PARTS - DELAY_MARGIN);\n    }\n\n    drop(client);\n\n    // Check that all peer addresses are the same\n    let peer_addrs = peer_addrs.lock().unwrap();\n    assert_eq!(\n        peer_addrs.len(),\n        NUMBER_OF_REQUESTS,\n        \"Expected {} peer addresses, but got {}\",\n        NUMBER_OF_REQUESTS,\n        peer_addrs.len()\n    );\n    let first_addr = peer_addrs[0];\n    assert!(\n        peer_addrs.iter().all(|addr| addr == &first_addr),\n        \"All peer addresses should be the same, but found differences: {peer_addrs:?}\"\n    );\n}\n"
  }
]