Repository: 0x676e67/rquest Branch: main Commit: ccf70e4d3cab Files: 165 Total size: 1.5 MB Directory structure: gitextract_e98nv8b_/ ├── .github/ │ ├── FUNDING.yml │ ├── ISSUE_TEMPLATE/ │ │ ├── bug_report.md │ │ └── feature_request.md │ ├── compilation-guide/ │ │ └── build.yml │ ├── dependabot.yml │ └── workflows/ │ └── ci.yml ├── .gitignore ├── Cargo.toml ├── LICENSE ├── README.md ├── RELEASE.md ├── bench/ │ ├── http1.rs │ ├── http1_over_tls.rs │ ├── http2.rs │ ├── http2_over_tls.rs │ ├── support/ │ │ ├── bench.rs │ │ ├── client.rs │ │ └── server.rs │ └── support.rs ├── cliff.toml ├── examples/ │ ├── cert_store.rs │ ├── connect_via_lower_priority_tokio_runtime.rs │ ├── emulate.rs │ ├── form.rs │ ├── http1_websocket.rs │ ├── http2_websocket.rs │ ├── json_dynamic.rs │ ├── json_typed.rs │ ├── keylog.rs │ ├── request_with_emulate.rs │ ├── request_with_interface.rs │ ├── request_with_local_address.rs │ ├── request_with_proxy.rs │ ├── request_with_redirect.rs │ ├── request_with_version.rs │ ├── tor_socks.rs │ └── unix_socket.rs ├── rustfmt.toml ├── src/ │ ├── client/ │ │ ├── body.rs │ │ ├── conn/ │ │ │ ├── connector.rs │ │ │ ├── descriptor.rs │ │ │ ├── http.rs │ │ │ ├── proxy/ │ │ │ │ ├── socks.rs │ │ │ │ └── tunnel.rs │ │ │ ├── proxy.rs │ │ │ ├── tcp/ │ │ │ │ └── tokio.rs │ │ │ ├── tcp.rs │ │ │ ├── tls_info.rs │ │ │ ├── uds.rs │ │ │ └── verbose.rs │ │ ├── conn.rs │ │ ├── core/ │ │ │ ├── body/ │ │ │ │ ├── incoming.rs │ │ │ │ ├── length.rs │ │ │ │ └── watch.rs │ │ │ ├── body.rs │ │ │ ├── conn/ │ │ │ │ ├── http1.rs │ │ │ │ └── http2.rs │ │ │ ├── conn.rs │ │ │ ├── dispatch.rs │ │ │ ├── error.rs │ │ │ ├── proto/ │ │ │ │ ├── headers.rs │ │ │ │ ├── http1/ │ │ │ │ │ ├── buf.rs │ │ │ │ │ ├── conn.rs │ │ │ │ │ ├── decode.rs │ │ │ │ │ ├── dispatch.rs │ │ │ │ │ ├── encode.rs │ │ │ │ │ ├── ext.rs │ │ │ │ │ ├── io.rs │ │ │ │ │ └── role.rs │ │ │ │ ├── http1.rs │ │ │ │ ├── http2/ │ │ │ │ │ ├── client.rs │ │ │ │ │ └── ping.rs │ │ │ │ └── http2.rs │ │ │ ├── proto.rs │ │ │ ├── rt/ │ │ │ │ ├── bounds.rs │ │ │ │ ├── timer.rs │ │ │ │ └── tokio.rs │ │ │ ├── rt.rs │ │ │ └── upgrade.rs │ │ ├── core.rs │ │ ├── emulate.rs │ │ ├── future.rs │ │ ├── group.rs │ │ ├── layer/ │ │ │ ├── client/ │ │ │ │ ├── exec.rs │ │ │ │ ├── lazy.rs │ │ │ │ └── pool.rs │ │ │ ├── client.rs │ │ │ ├── config.rs │ │ │ ├── decoder.rs │ │ │ ├── redirect/ │ │ │ │ ├── future.rs │ │ │ │ └── policy.rs │ │ │ ├── redirect.rs │ │ │ ├── retry/ │ │ │ │ ├── classify.rs │ │ │ │ └── scope.rs │ │ │ ├── retry.rs │ │ │ ├── timeout/ │ │ │ │ ├── body.rs │ │ │ │ └── future.rs │ │ │ └── timeout.rs │ │ ├── layer.rs │ │ ├── multipart.rs │ │ ├── request.rs │ │ ├── response.rs │ │ ├── ws/ │ │ │ ├── json.rs │ │ │ └── message.rs │ │ └── ws.rs │ ├── client.rs │ ├── config.rs │ ├── cookie.rs │ ├── dns/ │ │ ├── gai.rs │ │ ├── hickory.rs │ │ └── resolve.rs │ ├── dns.rs │ ├── error.rs │ ├── ext.rs │ ├── header.rs │ ├── into_uri.rs │ ├── lib.rs │ ├── proxy/ │ │ ├── mac.rs │ │ ├── matcher.rs │ │ ├── uds.rs │ │ └── win.rs │ ├── proxy.rs │ ├── redirect.rs │ ├── retry.rs │ ├── sync.rs │ ├── tls/ │ │ ├── compress.rs │ │ ├── conn/ │ │ │ ├── ext.rs │ │ │ ├── macros.rs │ │ │ └── service.rs │ │ ├── conn.rs │ │ ├── keylog/ │ │ │ └── handle.rs │ │ ├── keylog.rs │ │ ├── session.rs │ │ ├── trust/ │ │ │ ├── identity.rs │ │ │ ├── parse.rs │ │ │ └── store.rs │ │ └── trust.rs │ ├── tls.rs │ ├── trace.rs │ └── util.rs └── tests/ ├── badssl.rs ├── brotli.rs ├── client.rs ├── connector_layers.rs ├── cookie.rs ├── deflate.rs ├── emulate.rs ├── gzip.rs ├── layers.rs ├── multipart.rs ├── proxy.rs ├── redirect.rs ├── retry.rs ├── support/ │ ├── crl.pem │ ├── delay_server.rs │ ├── error.rs │ ├── layer.rs │ ├── mod.rs │ ├── server.cert │ ├── server.key │ └── server.rs ├── timeouts.rs ├── unix_socket.rs ├── upgrade.rs └── zstd.rs ================================================ FILE CONTENTS ================================================ ================================================ FILE: .github/FUNDING.yml ================================================ # These are supported funding model platforms github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] patreon: # Replace with a single Patreon username open_collective: # Replace with a single Open Collective username ko_fi: tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry liberapay: # Replace with a single Liberapay username issuehunt: # Replace with a single IssueHunt username lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry polar: # Replace with a single Polar username buy_me_a_coffee: # Replace with a single Buy Me a Coffee username thanks_dev: # Replace with a single thanks.dev username custom: ['https://github.com/0x676e67/0x676e67/blob/main/SPONSOR.md'] ================================================ FILE: .github/ISSUE_TEMPLATE/bug_report.md ================================================ --- name: Bug report about: Create a report to help us improve title: '' labels: '' assignees: '' --- **Describe the bug** A clear and concise description of what the bug is. **To Reproduce** Steps to reproduce the behavior: 1. Go to '...' 2. Click on '....' 3. Scroll down to '....' 4. See error **Expected behavior** A clear and concise description of what you expected to happen. **Screenshots** If applicable, add screenshots to help explain your problem. **Desktop (please complete the following information):** - OS: [e.g. iOS] - Browser [e.g. chrome, safari] - Version [e.g. 22] **Smartphone (please complete the following information):** - Device: [e.g. iPhone6] - OS: [e.g. iOS8.1] - Browser [e.g. stock browser, safari] - Version [e.g. 22] **Additional context** Add any other context about the problem here. ================================================ FILE: .github/ISSUE_TEMPLATE/feature_request.md ================================================ --- name: Feature request about: Suggest an idea for this project title: '' labels: '' assignees: '' --- **Is your feature request related to a problem? Please describe.** A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] **Describe the solution you'd like** A clear and concise description of what you want to happen. **Describe alternatives you've considered** A clear and concise description of any alternative solutions or features you've considered. **Additional context** Add any other context or screenshots about the feature request here. ================================================ FILE: .github/compilation-guide/build.yml ================================================ name: build on: push: branches: [ "main" ] pull_request: branches: [ "main" ] workflow_dispatch: env: CARGO_TERM_COLOR: always jobs: linux: name: Build Linux (GNU) runs-on: ubuntu-latest strategy: matrix: include: - target: x86_64 target_triple: x86_64-unknown-linux-gnu apt_packages: "" custom_env: {} - target: i686 target_triple: i686-unknown-linux-gnu apt_packages: crossbuild-essential-i386 custom_env: CC: i686-linux-gnu-gcc CXX: i686-linux-gnu-g++ CARGO_TARGET_I686_UNKNOWN_LINUX_GNU_LINKER: i686-linux-gnu-g++ RUSTC_LINKER: i686-linux-gnu-g++ - target: aarch64 target_triple: aarch64-unknown-linux-gnu apt_packages: crossbuild-essential-arm64 custom_env: CFLAGS_aarch64_unknown_linux_gnu: -D__ARM_ARCH=8 CC: aarch64-linux-gnu-gcc CXX: aarch64-linux-gnu-g++ CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER: aarch64-linux-gnu-g++ RUSTC_LINKER: aarch64-linux-gnu-g++ - target: armv7 target_triple: armv7-unknown-linux-gnueabihf apt_packages: crossbuild-essential-armhf custom_env: CC: arm-linux-gnueabihf-gcc CXX: arm-linux-gnueabihf-g++ CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_LINKER: arm-linux-gnueabihf-g++ RUSTC_LINKER: arm-linux-gnueabihf-g++ steps: - uses: actions/checkout@v4 - name: Install base dependencies on Ubuntu run: | sudo apt-get update sudo apt-get install -y build-essential cmake perl pkg-config libclang-dev musl-tools - name: Install target-specific APT dependencies if: ${{ matrix.apt_packages != '' }} run: | sudo apt-get update sudo apt-get install -y ${{ matrix.apt_packages }} - name: Add Rust target run: rustup target add ${{ matrix.target_triple }} - name: Build for ${{ matrix.target }} env: ${{ matrix.custom_env }} run: cargo build --release --target ${{ matrix.target_triple }} - name: Archive build artifacts run: | cd target/${{ matrix.target_triple }}/release zip -r ../../../build-linux-${{ matrix.target }}.zip * working-directory: ${{ github.workspace }} - name: Upload build artifact uses: actions/upload-artifact@v4 with: name: build-linux-${{ matrix.target }} path: build-linux-${{ matrix.target }}.zip retention-days: 1 musllinux: name: Build Linux (musl) runs-on: ubuntu-latest strategy: matrix: include: - target: x86_64 target_triple: x86_64-unknown-linux-musl package: x86_64-linux-musl-cross apt_packages: "" custom_env: CC: x86_64-linux-musl-gcc CXX: x86_64-linux-musl-g++ CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_LINKER: x86_64-linux-musl-g++ RUSTC_LINKER: x86_64-linux-musl-g++ - target: aarch64 target_triple: aarch64-unknown-linux-musl package: aarch64-linux-musl-cross apt_packages: crossbuild-essential-arm64 custom_env: CC: aarch64-linux-musl-gcc CXX: aarch64-linux-musl-g++ CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_LINKER: aarch64-linux-musl-g++ RUSTC_LINKER: aarch64-linux-musl-g++ - target: i686 target_triple: i686-unknown-linux-musl package: i686-linux-musl-cross apt_packages: crossbuild-essential-i386 custom_env: CC: i686-linux-musl-gcc CXX: i686-linux-musl-g++ CARGO_TARGET_I686_UNKNOWN_LINUX_MUSL_LINKER: i686-linux-musl-g++ RUSTC_LINKER: i686-linux-musl-g++ - target: armv7 target_triple: armv7-unknown-linux-musleabihf package: armv7l-linux-musleabihf-cross apt_packages: crossbuild-essential-armhf custom_env: CC: armv7l-linux-musleabihf-gcc CXX: armv7l-linux-musleabihf-g++ CARGO_TARGET_ARMV7_UNKNOWN_LINUX_MUSLEABIHF_LINKER: armv7l-linux-musleabihf-g++ RUSTC_LINKER: armv7l-linux-musleabihf-g++ steps: - uses: actions/checkout@v4 - name: Install base dependencies on Ubuntu run: | sudo apt-get update sudo apt-get install -y build-essential cmake perl pkg-config libclang-dev musl-tools - name: Install target-specific APT dependencies if: ${{ matrix.apt_packages != '' }} run: | sudo apt-get update sudo apt-get install -y ${{ matrix.apt_packages }} - name: Prepare musl cross-compiler run: | wget https://github.com/musl-cc/musl.cc/releases/latest/download/${{ matrix.package }}.tgz tar xzf ${{ matrix.package }}.tgz -C /opt echo "/opt/${{ matrix.package }}/bin/" >> $GITHUB_PATH - name: Add Rust target run: rustup target add ${{ matrix.target_triple }} - name: Build for ${{ matrix.target }} env: ${{ matrix.custom_env }} run: cargo build --release --target ${{ matrix.target_triple }} - name: Archive build artifacts run: | cd target/${{ matrix.target_triple }}/release zip -r ../../../build-musllinux-${{ matrix.target }}.zip * working-directory: ${{ github.workspace }} - name: Upload build artifact uses: actions/upload-artifact@v4 with: name: build-musllinux-${{ matrix.target }} path: build-musllinux-${{ matrix.target }}.zip retention-days: 1 windows: name: Build Windows runs-on: windows-latest strategy: matrix: include: - target: x86_64 target_triple: x86_64-pc-windows-msvc - target: i686 target_triple: i686-pc-windows-msvc steps: - uses: actions/checkout@v4 - name: Install dependencies on Windows run: | choco install cmake -y choco install strawberryperl -y choco install pkgconfiglite -y choco install llvm -y choco install nasm -y shell: cmd - name: Build on Windows with Static Linking env: RUSTFLAGS: "-C target-feature=+crt-static" run: cargo build --release --target ${{ matrix.target_triple }} - name: Archive build artifacts shell: pwsh run: | Compress-Archive -Path 'target\${{ matrix.target_triple }}\release\*' -DestinationPath "build-windows-${{ matrix.target }}.zip" -CompressionLevel Optimal -Force working-directory: ${{ github.workspace }} - name: Upload build artifact uses: actions/upload-artifact@v4 with: name: build-windows-${{ matrix.target }} path: build-windows-${{ matrix.target }}.zip retention-days: 1 macos: name: Build macOS strategy: matrix: include: - target: x86_64 runner: macos-latest target_triple: x86_64-apple-darwin - target: aarch64 runner: macos-latest target_triple: aarch64-apple-darwin runs-on: ${{ matrix.runner }} steps: - uses: actions/checkout@v4 - name: Install dependencies on macOS run: | brew update brew install --formula cmake pkg-config llvm - name: Add Rust target run: rustup target add ${{ matrix.target_triple }} - name: Build for ${{ matrix.target }} run: cargo build --release --target ${{ matrix.target_triple }} - name: Archive build artifacts run: | cd target/${{ matrix.target_triple }}/release zip -r ../../../build-macos-${{ matrix.target }}.zip * working-directory: ${{ github.workspace }} - name: Upload build artifact uses: actions/upload-artifact@v4 with: name: build-macos-${{ matrix.target }} path: build-macos-${{ matrix.target }}.zip retention-days: 1 ================================================ FILE: .github/dependabot.yml ================================================ # To get started with Dependabot version updates, you'll need to specify which # package ecosystems to update and where the package manifests are located. # Please see the documentation for all configuration options: # https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates version: 2 updates: - package-ecosystem: "github-actions" # Workflow files stored in the # default location of `.github/workflows` directory: "/" schedule: interval: "weekly" - package-ecosystem: "cargo" directory: "/" schedule: interval: "weekly" # todo: if only this worked, see https://github.com/dependabot/dependabot-core/issues/4009 # only tell us if there's a new 'breaking' change we could upgrade to # versioning-strategy: increase-if-necessary # disable regular version updates, security updates are unaffected open-pull-requests-limit: 0 ================================================ FILE: .github/workflows/ci.yml ================================================ name: CI on: push: tags: ["v*"] pull_request: paths-ignore: - 'docs/**' - '*.md' - '.github/**' - 'README.md' workflow_dispatch: concurrency: group: ${{ github.workflow }}-${{ github.ref_name }}-${{ github.event.pull_request.number || github.sha }} cancel-in-progress: true permissions: contents: write packages: write jobs: style: name: Style runs-on: ubuntu-latest steps: - uses: actions/checkout@v6 - uses: actions-rs/toolchain@v1 with: toolchain: stable override: true components: rustfmt, clippy - name: Style check run: cargo fmt --all -- --check - name: Clippy check run: cargo clippy --all-targets --all-features -- -D warnings docs: name: Docs runs-on: ubuntu-latest steps: - uses: actions/checkout@v6 - uses: actions-rs/toolchain@v1 with: toolchain: stable override: true - name: Build docs run: cargo doc --document-private-items --all-features hack: name: Hack runs-on: ubuntu-latest steps: - uses: actions/checkout@v6 - uses: actions-rs/toolchain@v1 with: toolchain: stable override: true - uses: Swatinem/rust-cache@v2 - name: Install cargo-hack from crates.io uses: baptiste0928/cargo-install@v3 with: crate: cargo-hack - name: Run hack script run: cargo hack check --each-feature msrv: name: MSRV needs: [style] runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v6 - uses: dtolnay/rust-toolchain@stable - name: Resolve MSRV aware dependencies run: cargo update env: CARGO_RESOLVER_INCOMPATIBLE_RUST_VERSIONS: fallback - name: Get MSRV package metadata id: metadata run: cargo metadata --no-deps --format-version 1 | jq -r '"msrv=" + .packages[0].rust_version' >> $GITHUB_OUTPUT - name: Install rust (${{ steps.metadata.outputs.msrv }}) uses: dtolnay/rust-toolchain@master with: toolchain: ${{ steps.metadata.outputs.msrv }} - uses: Swatinem/rust-cache@v2 - name: Check run: cargo check test: name: Test runs-on: ubuntu-latest strategy: fail-fast: false matrix: feature-set: - "--all-features" - "--no-default-features" - "--no-default-features --features webpki-roots" - "--no-default-features --features form" - "--no-default-features --features query" - "--features cookies" - "--features gzip,stream" - "--features brotli,stream" - "--features zstd,stream" - "--features deflate,stream" - "--features json" - "--features multipart" - "--features stream" - "--features hickory-dns" steps: - uses: actions/checkout@v6 - uses: actions-rs/toolchain@v1 with: toolchain: stable override: true - uses: Swatinem/rust-cache@v2 - uses: taiki-e/install-action@v2 with: tool: cargo-nextest - name: Run tests (${{ matrix.feature-set }}) run: cargo nextest run --workspace ${{ matrix.feature-set }} build: name: Build (${{ matrix.env }}) runs-on: ${{ matrix.os }} environment: ${{ matrix.env }} strategy: matrix: include: - os: ubuntu-latest env: Linux - os: windows-latest env: Windows - os: macos-latest env: macOS - os: ubuntu-latest env: Android steps: - uses: actions/checkout@v6 - uses: actions-rs/toolchain@v1 with: toolchain: stable override: true - name: Install NASM (Windows) if: matrix.os == 'windows-latest' run: choco install nasm -y - name: Build if: matrix.env != 'Android' run: cargo build --all-features - name: Add Android targets if: matrix.env == 'Android' run: rustup target add aarch64-linux-android x86_64-linux-android - uses: nttld/setup-ndk@v1.6.0 if: matrix.env == 'Android' id: setup-ndk with: ndk-version: r27c add-to-path: true - name: Build with cargo-ndk if: matrix.env == 'Android' env: ANDROID_NDK_HOME: ${{ steps.setup-ndk.outputs.ndk-path }} ANDROID_NDK_ROOT: ${{ steps.setup-ndk.outputs.ndk-path }} run: | cargo install cargo-ndk cargo ndk -t arm64-v8a -t x86_64 build --all-features release: name: Release needs: [style, test, docs, hack, msrv, build] runs-on: ubuntu-latest environment: Release if: startsWith(github.ref, 'refs/tags/') steps: - uses: actions/checkout@v6 - uses: actions-rs/toolchain@v1 with: toolchain: stable override: true - uses: katyo/publish-crates@v2 with: registry-token: ${{ secrets.CARGO_REGISTRY_TOKEN }} ignore-unpublished-changes: true - name: Upload binaries to GitHub Release uses: softprops/action-gh-release@v3 with: token: ${{ secrets.GITHUB_TOKEN }} prerelease: ${{ contains(github.ref, 'alpha') || contains(github.ref, 'beta') }} generate_release_notes: true ================================================ FILE: .gitignore ================================================ # Generated by Cargo # will have compiled files and executables target Cargo.lock *.swp .history .vscode .direnv result curl **/*.rs.bk /.DS_Store keylog.txt *.json .zed *.log # These are backup files generated by rustfmt **/*.rs.bk # MSVC Windows builds of rustc generate these, which store debugging information *.pdb # Generated by cargo mutants # Contains mutation testing data **/mutants.out*/ # RustRover # JetBrains specific template is maintained in a separate JetBrains.gitignore that can # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. .idea/ ================================================ FILE: Cargo.toml ================================================ [package] name = "wreq" version = "6.0.0-rc.28" description = "An ergonomic Rust HTTP Client with TLS fingerprint" keywords = ["http", "client", "websocket", "ja3", "ja4"] categories = ["web-programming::http-client"] repository = "https://github.com/0x676e67/wreq" documentation = "https://docs.rs/wreq" authors = ["0x676e67 "] readme = "README.md" license = "Apache-2.0" edition = "2024" rust-version = "1.85" include = ["README.md", "LICENSE", "src/**/*.rs"] [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] targets = ["x86_64-unknown-linux-gnu"] [features] default = ["webpki-roots"] # Enable support for decoding text. charset = ["dep:encoding_rs", "dep:mime"] # Enable cookies store support. cookies = ["dep:cookie"] # Enable gzip decompression support. gzip = ["dep:tower-http", "tower-http?/decompression-gzip"] # Enable brotli decompression support. brotli = ["dep:tower-http", "tower-http?/decompression-br"] # Enable zstd decompression support. zstd = ["dep:tower-http", "tower-http?/decompression-zstd"] # Enable deflate decompression support. deflate = ["dep:tower-http", "tower-http?/decompression-deflate"] # Enable URL query string serialization support. query = ["dep:serde", "dep:serde_html_form"] # Enable x-www-form-urlencoded form support. form = ["dep:serde", "dep:serde_html_form"] # Enable JSON support. json = ["dep:serde", "dep:serde_json"] # Enable multipart/form-data support. multipart = ["dep:mime_guess", "dep:sync_wrapper", "sync_wrapper?/futures"] # Enable hickory DNS resolver. hickory-dns = ["dep:hickory-resolver"] # Enable streaming support. stream = ["tokio/fs", "dep:sync_wrapper", "sync_wrapper?/futures"] # Enable SOCKS/4/5 proxy support. socks = ["dep:tokio-socks"] # Enable WebSocket support. ws = ["dep:tokio-tungstenite", "tokio-tungstenite?/handshake"] # Enable webpki-roots for TLS certificate validation. webpki-roots = ["dep:webpki-root-certs"] # Use the system's proxy configuration. system-proxy = ["dep:system-configuration", "dep:windows-registry"] # Enable tracing logging. tracing = ["dep:tracing", "http2/tracing", "tracing?/std"] # Enables the `parking_lot` crate for synchronization primitives. parking_lot = ["dep:parking_lot", "http2/parking_lot"] # Prefix BoringSSL symbols in libcrypto/libssl to avoid linker conflicts # when multiple OpenSSL versions coexist in the same process. prefix-symbols = ["btls/prefix-symbols"] [dependencies] percent-encoding = "2.3.2" url = "2.5.8" bytes = "1.11.1" http = "1.4.0" http2 = { version = "0.5.16", features = ["unstable"] } httparse = "1.10.1" http-body = "1.0.1" http-body-util = "0.1.3" want = "0.3.1" pin-project-lite = "0.2.17" futures-util = { version = "0.3.32", default-features = false } smallvec = { version = "1.15.1", features = ["const_generics", "const_new"] } socket2 = { version = "0.6.3", features = ["all"] } ipnet = "2.12.0" lru = "0.17.0" btls = "0.5.6" btls-sys = "0.5.6" tokio-btls = "0.5.6" tokio = { version = "1.52.1", default-features = false, features = [ "net", "time", "rt", ] } tokio-util = { version = "0.7.18", default-features = false } tower = { version = "0.5.3", default-features = false, features = [ "timeout", "util", "retry", ] } # Optional deps... ## serde serde = { version = "1.0", optional = true } serde_json = { version = "1.0", optional = true } serde_html_form = { version = "0.4.0", optional = true } ## multipart mime_guess = { version = "2.0", default-features = false, optional = true } ## charset encoding_rs = { version = "0.8", optional = true } mime = { version = "0.3.17", optional = true } ## sync wrapper sync_wrapper = { version = "1.0.2", optional = true } ## webpki root certs webpki-root-certs = { version = "1.0.7", optional = true } ## cookies cookie = { version = "0.18", optional = true } ## tower http tower-http = { version = "0.6.8", default-features = false, optional = true } ## socks tokio-socks = { version = "0.5.2", optional = true } ## websocket tokio-tungstenite = { version = "0.29.0", default-features = false, optional = true } ## hickory-dns hickory-resolver = { version = "0.26.0", optional = true } ## parking_lot parking_lot = { version = "0.12.5", optional = true } ## tracing tracing = { version = "0.1", default-features = false,optional = true } ## windows [target.'cfg(windows)'.dependencies] windows-registry = { version = "0.6.0", optional = true } ## macOS [target.'cfg(target_os = "macos")'.dependencies] system-configuration = { version = "0.7.0", optional = true } ## interface binding [target.'cfg(unix)'.dependencies] libc = "0.2.182" [dev-dependencies] hyper = { version = "1.7.0", default-features = false, features = [ "http1", "http2", "server", ] } hyper-util = { version = "0.1.20", features = [ "http1", "http2", "server-auto", "server-graceful", "tokio", ] } serde = { version = "1.0", features = ["derive"] } tokio = { version = "1.0", default-features = false, features = [ "macros", "rt-multi-thread", ] } futures = { version = "0.3.0", default-features = false, features = ["std"] } tower = { version = "0.5.2", default-features = false, features = ["limit"] } tokio-test = "0.4.5" tracing = "0.1" tracing-subscriber = "0.3.20" pretty_env_logger = "0.5" brotli = "8.0.2" flate2 = "1.1.9" zstd = "0.13.3" # for benchmarks sysinfo = { version = "0.38.2", default-features = false, features = ["system"] } criterion = { version = "0.8.2", features = ["async_tokio"] } reqwest = { version = "0.13", default-features = false, features = ["rustls", "stream", "http2"] } [profile.bench] opt-level = 3 codegen-units = 1 incremental = false [[bench]] name = "http1" path = "bench/http1.rs" harness = false required-features = ["stream"] [[bench]] name = "http2" path = "bench/http2.rs" harness = false required-features = ["stream"] [[bench]] name = "http1_over_tls" path = "bench/http1_over_tls.rs" harness = false required-features = ["stream"] [[bench]] name = "http2_over_tls" path = "bench/http2_over_tls.rs" harness = false required-features = ["stream"] [[test]] name = "cookie" path = "tests/cookie.rs" required-features = ["cookies"] [[test]] name = "gzip" path = "tests/gzip.rs" required-features = ["gzip", "stream"] [[test]] name = "brotli" path = "tests/brotli.rs" required-features = ["brotli", "stream"] [[test]] name = "zstd" path = "tests/zstd.rs" required-features = ["zstd", "stream"] [[test]] name = "deflate" path = "tests/deflate.rs" required-features = ["deflate", "stream"] [[test]] name = "multipart" path = "tests/multipart.rs" required-features = ["multipart", "stream"] [[test]] name = "retry" path = "tests/retry.rs" [[example]] name = "json_dynamic" path = "examples/json_dynamic.rs" required-features = ["json"] [[example]] name = "json_typed" path = "examples/json_typed.rs" required-features = ["json"] [[example]] name = "tor_socks" path = "examples/tor_socks.rs" required-features = ["socks"] [[example]] name = "form" path = "examples/form.rs" required-features = ["form"] [[example]] name = "connect_via_lower_priority_tokio_runtime" path = "examples/connect_via_lower_priority_tokio_runtime.rs" required-features = ["tracing"] [[example]] name = "emulate" path = "examples/emulate.rs" required-features = ["gzip", "brotli", "zstd", "deflate", "tracing"] [[example]] name = "cert_store" path = "examples/cert_store.rs" required-features = ["webpki-roots"] [[example]] name = "request_with_redirect" path = "examples/request_with_redirect.rs" [[example]] name = "request_with_version" path = "examples/request_with_version.rs" [[example]] name = "request_with_proxy" path = "examples/request_with_proxy.rs" required-features = ["socks"] [[example]] name = "request_with_emulate" path = "examples/request_with_emulate.rs" required-features = ["gzip", "brotli", "zstd", "deflate", "tracing"] [[example]] name = "request_with_local_address" path = "examples/request_with_local_address.rs" [[example]] name = "request_with_interface" path = "examples/request_with_interface.rs" [[example]] name = "http1_websocket" path = "examples/http1_websocket.rs" required-features = ["ws", "futures-util/std"] [[example]] name = "http2_websocket" path = "examples/http2_websocket.rs" required-features = ["ws", "futures-util/std"] [[example]] name = "keylog" path = "examples/keylog.rs" [[example]] name = "unix_socket" path = "examples/unix_socket.rs" ================================================ FILE: LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2016 Sean McArthur Copyright 2026 0x676e67 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: README.md ================================================ # wreq [![CI](https://github.com/0x676e67/wreq/actions/workflows/ci.yml/badge.svg)](https://github.com/0x676e67/wreq/actions/workflows/ci.yml) [![Crates.io License](https://img.shields.io/crates/l/wreq)](https://github.com/0x676e67/wreq/blob/main/LICENSE) [![Crates.io MSRV](https://img.shields.io/crates/msrv/wreq?logo=rust)](https://crates.io/crates/wreq) [![crates.io](https://img.shields.io/crates/v/wreq.svg?logo=rust)](https://crates.io/crates/wreq) [![Discord chat][discord-badge]][discord-url] [discord-badge]: https://img.shields.io/discord/1486741856397164788.svg?logo=discord [discord-url]: https://discord.gg/rfbvyFkgq3 > 🚀 Help me work seamlessly with open source sharing by [sponsoring me on GitHub](https://github.com/0x676e67/0x676e67/blob/main/SPONSOR.md) An ergonomic and modular Rust HTTP Client for high-fidelity protocol matching, featuring customizable TLS, JA3/JA4, and HTTP/2 signature capabilities. ## Features - Plain bodies, JSON, urlencoded, multipart - HTTP Trailer - Cookie Store - Redirect Policy - Original Header - Rotating Proxies - Tower Middleware - WebSocket Upgrade - HTTPS via BoringSSL - HTTP/2 over TLS Parity - Certificate Store (CAs & mTLS) ## Example The following example uses the [Tokio](https://tokio.rs) runtime with optional features enabled by adding this to your `Cargo.toml`: ```toml [dependencies] tokio = { version = "1", features = ["full"] } wreq = "6.0.0-rc.28" wreq-util = "3.0.0-rc.10" ``` And then the code: ```rust use wreq::Client; use wreq_util::Emulation; #[tokio::main] async fn main() -> wreq::Result<()> { // Build a client let client = Client::builder() .emulation(Emulation::Safari26) .build()?; // Use the API you're already familiar with let resp = client.get("https://tls.peet.ws/api/all").send().await?; println!("{}", resp.text().await?); Ok(()) } ``` ## Behavior - **HTTP/1 over TLS** In the Rust ecosystem, most HTTP clients rely on the [http](https://github.com/hyperium/http) library, which performs well but does not preserve header case. This causes some **WAFs** to reject **HTTP/1** requests with lowercase headers (see [discussion](https://github.com/seanmonstar/reqwest/discussions/2227)). **wreq** addresses this by fully supporting **HTTP/1** header case sensitivity. - **HTTP/2 over TLS** Due to the complexity of **TLS** encryption and the widespread adoption of **HTTP/2**, browser fingerprints such as **JA3**, **JA4**, and **Akamai** cannot be reliably emulated using simple fingerprint strings. Instead of parsing and emulating these string-based fingerprints, **wreq** provides fine-grained control over **TLS** and **HTTP/2** extensions and settings for precise browser behavior emulation. - **Device Emulation** **TLS** and **HTTP/2** fingerprints are often identical across various browser models because these underlying protocols evolve slower than browser release cycles. **100+ browser device emulation profiles** are maintained in [wreq-util](https://github.com/0x676e67/wreq-util). ## Building Compiling alongside **openssl-sys** can cause symbol conflicts with **boringssl** that lead to [link failures](https://github.com/cloudflare/boring/issues/197), and on **Linux** and **Android** this can be avoided by enabling the **`prefix-symbols`** feature. Install [BoringSSL build dependencies](https://github.com/google/boringssl/blob/master/BUILDING.md#build-prerequisites) and build with: ```bash sudo apt-get install build-essential cmake perl pkg-config libclang-dev musl-tools git -y cargo build --release ``` This GitHub Actions [workflow](.github/compilation-guide/build.yml) can be used to compile the project on **Linux**, **Windows**, and **macOS**. ## Services Help sustain the ongoing development of this open-source project by reaching out for [commercial support](mailto:gngppz@gmail.com). Receive private guidance, expert reviews, or direct access to the maintainer, with personalized technical assistance tailored to your needs. ## License Licensed under either of Apache License, Version 2.0 ([LICENSE](./LICENSE) or http://www.apache.org/licenses/LICENSE-2.0). ## Contribution Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the [Apache-2.0](./LICENSE) license, shall be licensed as above, without any additional terms or conditions. ## Sponsors **Solve reCAPTCHA in less than 2 seconds** **[Captcha.fun](https://captcha.fun/?utm_source=github&utm_medium=readme&utm_campaign=wreq)** delivers fast, reliable CAPTCHA solving built for automation at scale. With simple API integration, consistent performance, and competitive pricing, it's an easy way to keep your workflows moving without delays—use code **`WREQ`** for **10% bonus credits**. **[Dashboard](https://dash.captcha.fun/)** | **[Docs](http://docs.captcha.fun/)** | **[Discord](https://discord.gg/captchafun)** --- TLS fingerprinting alone isn't enough for modern bot protection. **[Hyper Solutions](https://hypersolutions.co?utm_source=github&utm_medium=readme&utm_campaign=wreq)** provides the missing piece - API endpoints that generate valid antibot tokens for: **Akamai** • **DataDome** • **Kasada** • **Incapsula** No browser automation. Just simple API calls that return the exact cookies and headers these systems require. **[Dashboard](https://hypersolutions.co?utm_source=github&utm_medium=readme&utm_campaign=wreq)** | **[Docs](https://docs.justhyped.dev)** | **[Discord](https://discord.gg/akamai)** ## Accolades A hard fork of [reqwest](https://github.com/seanmonstar/reqwest). ================================================ FILE: RELEASE.md ================================================ ## [unreleased] ### Features - *(cookie)* RFC 9113 compliant cookie handling ([#1106](https://github.com/0x676e67/wreq/issues/1106)) - ([81f3adb](https://github.com/0x676e67/wreq/commit/81f3adb85e0fff869439bd4eac48405e78916c9a)) - *(cookie)* Fill missing domain/path in `get_all` from stored scope ([#1082](https://github.com/0x676e67/wreq/issues/1082)) - ([240d84e](https://github.com/0x676e67/wreq/commit/240d84eab5eb4df8548933ee2c13337d86e1afe1)) - *(multipart)* Add Form::set_boundary for custom boundaries ([#1094](https://github.com/0x676e67/wreq/issues/1094)) - ([30adda1](https://github.com/0x676e67/wreq/commit/30adda14d21824f5b6c8b7817d0da76a4876b007)) - *(tls)* Allow pluggable TLS session cache ([#1101](https://github.com/0x676e67/wreq/issues/1101)) - ([98c1306](https://github.com/0x676e67/wreq/commit/98c130643afca83b15811d42466011900d672bc4)) ### Bug Fixes - *(bench)* Fix CPU sysinfo reading in benchmark ([#1080](https://github.com/0x676e67/wreq/issues/1080)) - ([7882497](https://github.com/0x676e67/wreq/commit/78824973f82de07f86528a4e5df1cf99f313d325)) - *(http2)* Prevent panic when calling to_str on non-UTF8 headers ([#1070](https://github.com/0x676e67/wreq/issues/1070)) - ([2aa4b16](https://github.com/0x676e67/wreq/commit/2aa4b1601ec22aea0ef5eb1b97e566a217194351)) - *(rt)* Support fake time in legacy client and TokioTimer ([#1064](https://github.com/0x676e67/wreq/issues/1064)) - ([29acebc](https://github.com/0x676e67/wreq/commit/29acebcdc16b1cec24f0547e6d381e512322edd9)) - *(tcp)* Restore the missing TCP nodelay setting ([#1102](https://github.com/0x676e67/wreq/issues/1102)) - ([7ea12ed](https://github.com/0x676e67/wreq/commit/7ea12ede38a6617772cf5b66342d3b6f9c2ff7cb)) - Disable Nagle's algorithm to resolve HTTP/2 performance dip ([#1074](https://github.com/0x676e67/wreq/issues/1074)) - ([8f45ef4](https://github.com/0x676e67/wreq/commit/8f45ef41eb5738d07947e6b78917488680332213)) ### Refactor - *(conn)* Modular connector component ([#1100](https://github.com/0x676e67/wreq/issues/1100)) - ([6cf1279](https://github.com/0x676e67/wreq/commit/6cf1279d4a0b40075942692687c967b5da4292c7)) - *(multipart)* Streamline legacy Form implementation - ([45df222](https://github.com/0x676e67/wreq/commit/45df2228715df1ecbe8e35866f1ec3a82cd4e106)) - *(pool)* Redesign emulation and pool ID strategy ([#1103](https://github.com/0x676e67/wreq/issues/1103)) - ([c12f3a0](https://github.com/0x676e67/wreq/commit/c12f3a0d8e6dfd4536acb46bf2d318b2cd022aac)) - *(tls)* Decouple TLS backend logic into sub-modules ([#1105](https://github.com/0x676e67/wreq/issues/1105)) - ([c7a7e3c](https://github.com/0x676e67/wreq/commit/c7a7e3c94a40368894d4a63a959eb633c3a292f1)) - *(tls)* Expose certificate compression APIs ([#1085](https://github.com/0x676e67/wreq/issues/1085)) - ([8429954](https://github.com/0x676e67/wreq/commit/842995411c9262b04260137c588084340e59133e)) ### Documentation - *(hash)* Simplify documentation for `HashMemo` creation ([#1076](https://github.com/0x676e67/wreq/issues/1076)) - ([fe85f5d](https://github.com/0x676e67/wreq/commit/fe85f5d8972322fe76fdea8317563c730cce319f)) - Remove deprecated doc_cfg feature conditionally - ([29da566](https://github.com/0x676e67/wreq/commit/29da5662789a9ef8092943a29912dbb77cdde275)) - Clarify symbol conflict with OpenSSL ([#1068](https://github.com/0x676e67/wreq/issues/1068)) - ([ee2f9f0](https://github.com/0x676e67/wreq/commit/ee2f9f0cf0ab1faf6f56f85ca1a582f576c5f56f)) ### Performance - *(bench)* Optimize benchmark server ([#1073](https://github.com/0x676e67/wreq/issues/1073)) - ([bd8cd36](https://github.com/0x676e67/wreq/commit/bd8cd36084b0367a35d949355b12d5224ea800c0)) - *(buf)* Make `BufList::remaining` O(1) by caching length ([#1091](https://github.com/0x676e67/wreq/issues/1091)) - ([aaed745](https://github.com/0x676e67/wreq/commit/aaed745799bddacd91e62d373e64ab753ea2d8ee)) - *(error)* Hint compiler to inline trivial error-handling functions ([#1061](https://github.com/0x676e67/wreq/issues/1061)) - ([7746f74](https://github.com/0x676e67/wreq/commit/7746f74c3749116a3e2148a59771c8219077e94b)) - *(http1)* Eliminate `ParserConfig` clones on the HTTP/1.1 request hot path ([#1088](https://github.com/0x676e67/wreq/issues/1088)) - ([9edb950](https://github.com/0x676e67/wreq/commit/9edb95002b121b914dd6cc2f8004f55ba6f2e8bf)) - *(http2)* Backport and apply hyper client's H2 configuration ([#1063](https://github.com/0x676e67/wreq/issues/1063)) - ([6e2f160](https://github.com/0x676e67/wreq/commit/6e2f160e6ddc9b59a8e3de64fb487f5a47f428e8)) - *(multipart)* Improve memory layout of `multipart::Form` ([#1095](https://github.com/0x676e67/wreq/issues/1095)) - ([ff44181](https://github.com/0x676e67/wreq/commit/ff4418136e8529a5dedbe008d2dee24441ee232a)) - *(request)* Static init for common content-type header ([#1060](https://github.com/0x676e67/wreq/issues/1060)) - ([1e45fc5](https://github.com/0x676e67/wreq/commit/1e45fc557721de2d0d483cb00ccc38fe59aeb9a0)) - *(response)* Hint compiler to inline trivial response-handling functions ([#1062](https://github.com/0x676e67/wreq/issues/1062)) - ([be87bb8](https://github.com/0x676e67/wreq/commit/be87bb85646817cdb6c356ae8efa6eec587fac03)) ### Styling - *(bench)* Fmt code - ([c6e6726](https://github.com/0x676e67/wreq/commit/c6e6726f2f70c19dc898110af1a3b2131379036a)) - *(request)* Fmt imports for request.rs file - ([2c51823](https://github.com/0x676e67/wreq/commit/2c518232f713827bf3be31c4823d76127566c63a)) ### Miscellaneous Tasks - *(bench)* Update mod benchmark comment - ([f987254](https://github.com/0x676e67/wreq/commit/f987254db8d3f44aa4538bc4436ac7daa8aa608d)) - *(bench)* Format expected error annotations - ([7131366](https://github.com/0x676e67/wreq/commit/71313662072bad0fa18ed8c0a4d921c7ce706499)) - *(client)* Fmt code - ([21f27bc](https://github.com/0x676e67/wreq/commit/21f27bc22fb1304cb77ffa52acd3d12bdc56dcfe)) - *(conn)* Optimize `ConnectionId` cloning ([#1108](https://github.com/0x676e67/wreq/issues/1108)) - ([1a58655](https://github.com/0x676e67/wreq/commit/1a58655420f9b2c771cb433bf2e2a1d0b5158ad5)) - *(core)* Clear code - ([9411b19](https://github.com/0x676e67/wreq/commit/9411b19d16d1dee6b66657dc681c96c89394fe6f)) - *(tcp)* Prune redundant local address handling ([#1107](https://github.com/0x676e67/wreq/issues/1107)) - ([6a2f343](https://github.com/0x676e67/wreq/commit/6a2f343d280ecc9e40864a85d9b31d44de84ae36)) - Fmt code - ([69c7a76](https://github.com/0x676e67/wreq/commit/69c7a76b483695ebeaf8deded1bb74a655d11602)) - Fmt import - ([e96a759](https://github.com/0x676e67/wreq/commit/e96a7592ad957efd8e0d0cda3d2ccd6406694356)) - Update comments for compression support dependencies - ([3f154d3](https://github.com/0x676e67/wreq/commit/3f154d323ff71e7b4ad38c44a90373e6a5aa9569)) - Refactor `Cargo.toml` for clarity and organization - ([b272408](https://github.com/0x676e67/wreq/commit/b27240866ad81f29616186243ac5a49cf0d165b8)) - Lint core ([#1071](https://github.com/0x676e67/wreq/issues/1071)) - ([6ed8212](https://github.com/0x676e67/wreq/commit/6ed8212248bfd7085b56f3ff4330acb929d066bf)) - Fix clippy - ([cf29946](https://github.com/0x676e67/wreq/commit/cf2994669b1be87d3fc5555a5a5179acb54d62d5)) ### Bench - Add missing `TokioTimer` to http1 server builder ([#1081](https://github.com/0x676e67/wreq/issues/1081)) - ([cacd004](https://github.com/0x676e67/wreq/commit/cacd0046acb3051e1f227678a17a972a08a841e4)) - Format benchmark group labels - ([63f9e39](https://github.com/0x676e67/wreq/commit/63f9e3944d358b4fdcf1df73329d43c5632593e4)) - Improve benchmark test coverage ([#1075](https://github.com/0x676e67/wreq/issues/1075)) - ([ef41eb3](https://github.com/0x676e67/wreq/commit/ef41eb3fc14df26f6e51a979a978c3c8eeb73101)) - Simplify grouped benchmarks - ([c63ef51](https://github.com/0x676e67/wreq/commit/c63ef51583a463b44c9efce95e80719c5b803070)) - Include TLS-encrypted scenarios for HTTP/1 and HTTP/2 - ([10dc7fd](https://github.com/0x676e67/wreq/commit/10dc7fddccf1afc1a30f978b6f976d1cf19007ad)) - Add benchmarks for full and streaming bodies ([#1069](https://github.com/0x676e67/wreq/issues/1069)) - ([0186719](https://github.com/0x676e67/wreq/commit/01867191c4b78cb179980751508e6d1d4ebd685f)) - Add benchmarks for HTTP/1.1 and HTTP/2 ([#1065](https://github.com/0x676e67/wreq/issues/1065)) - ([71fb97a](https://github.com/0x676e67/wreq/commit/71fb97a6a19065e6655875ee3811deaa9c3ae429)) ### Build - *(deps)* Bump btls from 0.5.3 to 0.5.4 ([#1090](https://github.com/0x676e67/wreq/issues/1090)) - ([7c901db](https://github.com/0x676e67/wreq/commit/7c901db6ea4cce23500af66059851fd81e9c1d54)) - *(deps)* Replace `ahash` with `foldhash` in `lru` cache ([#1084](https://github.com/0x676e67/wreq/issues/1084)) - ([5c7b411](https://github.com/0x676e67/wreq/commit/5c7b4110a4b6678276218b7d6e43b6762b957ebe)) - *(deps)* Migrate from `boring2` to `btls` ([#1083](https://github.com/0x676e67/wreq/issues/1083)) - ([2d45542](https://github.com/0x676e67/wreq/commit/2d45542b230397875bd92fbca65389b24e17ca2f)) - *(deps)* Replace `raw-cpuid` with `sysinfo` implementation ([#1077](https://github.com/0x676e67/wreq/issues/1077)) - ([1ab8770](https://github.com/0x676e67/wreq/commit/1ab87707bb7939d79bd31d9460a79bece97dce8c)) - *(deps)* Bump nttld/setup-ndk from 1.5.0 to 1.6.0 ([#1072](https://github.com/0x676e67/wreq/issues/1072)) - ([3757645](https://github.com/0x676e67/wreq/commit/3757645801a260bb0db38cdbd12f26a2cc45ea5c)) - *(deps)* Replace `schnellru` with `lru` implementation ([#1066](https://github.com/0x676e67/wreq/issues/1066)) - ([13c9586](https://github.com/0x676e67/wreq/commit/13c9586c0951c881312cdb6036a188a20eb5746c)) ## New Contributors ❤️ * @sqdshguy made their first contribution in [#1094](https://github.com/0x676e67/wreq/pull/1094) ## [6.0.0-rc.28](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.27..v6.0.0-rc.28) - 2026-02-11 ### Bug Fixes - *(http1)* Use case-insensitive matching for trailer fields ([#1059](https://github.com/0x676e67/wreq/issues/1059)) - ([1b7d57b](https://github.com/0x676e67/wreq/commit/1b7d57bce1fcc7e471ba383a5b0c14fcc926d1de)) ### Performance - *(request)* Reduce overhead by lazy-loading headers for `json`/`form` data ([#1058](https://github.com/0x676e67/wreq/issues/1058)) - ([6992b6f](https://github.com/0x676e67/wreq/commit/6992b6ffd69bf61f710d97d97b436d630e38cbe7)) ## [6.0.0-rc.27](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.26..v6.0.0-rc.27) - 2026-01-17 ### Features - *(cookie)* Consolidate cookie methods into a unified add() ([#1043](https://github.com/0x676e67/wreq/issues/1043)) - ([59999e6](https://github.com/0x676e67/wreq/commit/59999e613305e8aa8e13150cec858525b9f4cb6f)) - *(tls)* Add peer certificate chain to `TlsInfo` ([#1049](https://github.com/0x676e67/wreq/issues/1049)) - ([f27cb78](https://github.com/0x676e67/wreq/commit/f27cb789c8db32ca4fd0bc4e6d8e007307639ba6)) ### Bug Fixes - *(verbose)* Correct connection verbose tracing ([#1055](https://github.com/0x676e67/wreq/issues/1055)) - ([22516ae](https://github.com/0x676e67/wreq/commit/22516ae9f1a4becf3827e1ba9889a6add59e38b6)) ### Refactor - *(redirect)* Expose `Attempt` fields as public API ([#1046](https://github.com/0x676e67/wreq/issues/1046)) - ([b97fa4f](https://github.com/0x676e67/wreq/commit/b97fa4fac5530fb455777db986f2f31f8719a6ad)) ### Performance - *(redirect)* Use static `HeaderName` for `cookie2` to avoid allocation ([#1047](https://github.com/0x676e67/wreq/issues/1047)) - ([0211cad](https://github.com/0x676e67/wreq/commit/0211cad5595220095179c0045aff1c3a76690a1e)) - *(tls)* Use `Bytes` for `peer_certificate` to enable cheap cloning ([#1050](https://github.com/0x676e67/wreq/issues/1050)) - ([27c8e74](https://github.com/0x676e67/wreq/commit/27c8e74936e6eff30761954f3e9f4133b08f611b)) ### Styling - *(cookie)* Prefer `dt <= SystemTime::now()` in expires check ([#1045](https://github.com/0x676e67/wreq/issues/1045)) - ([5da3114](https://github.com/0x676e67/wreq/commit/5da3114e749b6a7a0aeb0f8cdd72759bc1a216d5)) - *(cookie)* Prefer `Duration::is_zero()` in Max-Age=0 check ([#1044](https://github.com/0x676e67/wreq/issues/1044)) - ([1e607dd](https://github.com/0x676e67/wreq/commit/1e607dd0b0d9822dfc9873d7a2e0093defc6b445)) ### Miscellaneous Tasks - *(test)* Fix windows tests ([#1042](https://github.com/0x676e67/wreq/issues/1042)) - ([a22ca01](https://github.com/0x676e67/wreq/commit/a22ca01315ab62659a1498f3d157fb767cdeb828)) ### Build - *(deps)* Add `prefix-symbols` to resolve `OpenSSL` symbol conflicts ([#1056](https://github.com/0x676e67/wreq/issues/1056)) - ([9c40d0f](https://github.com/0x676e67/wreq/commit/9c40d0ff294ae6d15477284c205607147361c90a)) - *(deps)* Bump `url` dependency version to 2.5.8 ([#1053](https://github.com/0x676e67/wreq/issues/1053)) - ([f0ba09e](https://github.com/0x676e67/wreq/commit/f0ba09e08fbd24a4736b256ef87a1f10da3c0754)) - *(deps)* Update `http2` dependency version to 0.5.11 ([#1051](https://github.com/0x676e67/wreq/issues/1051)) - ([0ccc4e8](https://github.com/0x676e67/wreq/commit/0ccc4e8e6db4885dada569ecf161bf5104d8a37f)) ## New Contributors ❤️ * @Abernson made their first contribution in [#1049](https://github.com/0x676e67/wreq/pull/1049) ## [6.0.0-rc.26](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.25..v6.0.0-rc.26) - 2025-12-31 ### Features - Add `query` and `form` crate features ([#1035](https://github.com/0x676e67/wreq/issues/1035)) - ([091b9e9](https://github.com/0x676e67/wreq/commit/091b9e9e93fef8bc838910dc383a3fb6bdcb8778)) ### Bug Fixes - *(proxy)* Skip proxy headers for HTTPS destinations ([#1039](https://github.com/0x676e67/wreq/issues/1039)) - ([972737f](https://github.com/0x676e67/wreq/commit/972737f540150819d9659cb17e8cdc097dbb078f)) - *(redirect)* Fix redirect `location` encoding ([#1034](https://github.com/0x676e67/wreq/issues/1034)) - ([f8e2114](https://github.com/0x676e67/wreq/commit/f8e21143abe06f7ae65d26d3ffb979433fcfe394)) ### Refactor - *(header)* Hide internal details of `OrigHeaderName` ([#1036](https://github.com/0x676e67/wreq/issues/1036)) - ([5424935](https://github.com/0x676e67/wreq/commit/5424935235270cead6c5f2e9a7f59a5398ad001c)) ### Performance - *(proxy)* Improve proxy credential handling for concurrent requests ([#1041](https://github.com/0x676e67/wreq/issues/1041)) - ([4016d1b](https://github.com/0x676e67/wreq/commit/4016d1bfeb7b24122ecdc0906129e65841c3700c)) - *(uri)* Improve `String` to `Uri` conversion performance ([#1038](https://github.com/0x676e67/wreq/issues/1038)) - ([fcd5cc5](https://github.com/0x676e67/wreq/commit/fcd5cc54a7d3d0d0c2d3575af6f8c6ea1f0fdabe)) ### Miscellaneous Tasks - *(redirect)* Remove macros - ([c92fbaf](https://github.com/0x676e67/wreq/commit/c92fbaf87d33c11d681c7d47c09a54d47b2674fb)) ## New Contributors ❤️ * @blinjrm made their first contribution in [#1039](https://github.com/0x676e67/wreq/pull/1039) ## [6.0.0-rc.25](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.23..v6.0.0-rc.25) - 2025-12-23 ### Features - *(cookie)* Refactor `CookieStore` cookie compression strategy ([#1005](https://github.com/0x676e67/wreq/issues/1005)) - ([2dc14cd](https://github.com/0x676e67/wreq/commit/2dc14cd9207d0c1cb41583395a7f544acb40aadf)) - *(error)* Add `is_proxy_connect` for proxy connection errors ([#1014](https://github.com/0x676e67/wreq/issues/1014)) - ([0578465](https://github.com/0x676e67/wreq/commit/0578465eb64a23b2d47fb7080ea372646c4783d6)) - *(proxy)* Compatibility for sending HTTP requests without HTTPS tunneling ([#991](https://github.com/0x676e67/wreq/issues/991)) - ([bd1d58b](https://github.com/0x676e67/wreq/commit/bd1d58bcf3b87924486b9515f6f678dc8ca36800)) - *(redirect)* Add async support to redirect policy ([#996](https://github.com/0x676e67/wreq/issues/996)) - ([bc6f113](https://github.com/0x676e67/wreq/commit/bc6f11376d884dcd614889861bb55157907cdab7)) - *(response)* Introduce trailers support ([#1021](https://github.com/0x676e67/wreq/issues/1021)) - ([28bcc63](https://github.com/0x676e67/wreq/commit/28bcc63cb0e9083c944d55ca3895ee70a1ed636b)) ### Bug Fixes - *(proxy)* Improve domain matching case insensitivity ([#1031](https://github.com/0x676e67/wreq/issues/1031)) - ([87f9019](https://github.com/0x676e67/wreq/commit/87f90191bbb5fe39174ab2777b4d526145f2e75c)) - *(proxy)* Fix HTTP requests proxied through an `SOCKS5`/`HTTPS tunnel` ([#990](https://github.com/0x676e67/wreq/issues/990)) - ([7207dd5](https://github.com/0x676e67/wreq/commit/7207dd55989f9ef2d3577261928252b5dc90f206)) - *(redirect)* Ensure redirect URLs are properly encoded ([#1017](https://github.com/0x676e67/wreq/issues/1017)) - ([8ad5023](https://github.com/0x676e67/wreq/commit/8ad5023932b480c1cf94d8bbddc9bb2b59a83d6c)) - *(request)* Fix missing `http::Request` conversion extensions ([#1000](https://github.com/0x676e67/wreq/issues/1000)) - ([9df5f14](https://github.com/0x676e67/wreq/commit/9df5f14f3657692ae19691105826d30c23056996)) - *(test)* Fix decompression test ([#998](https://github.com/0x676e67/wreq/issues/998)) - ([54f5ee6](https://github.com/0x676e67/wreq/commit/54f5ee63877e5ec3ef04167dcdb25b1025a0b2f7)) ### Refactor - *(config)* Simplify extension config type wrappers ([#1009](https://github.com/0x676e67/wreq/issues/1009)) - ([adf84e3](https://github.com/0x676e67/wreq/commit/adf84e38abaa921f10a3994920bbe494bafc608a)) - *(core)* Use flat module style - ([30a8c13](https://github.com/0x676e67/wreq/commit/30a8c135c26bc4853c24f3a5209b6ad098a4f74a)) - *(decoder)* Reorder decoder tower layers ([#1026](https://github.com/0x676e67/wreq/issues/1026)) - ([910378d](https://github.com/0x676e67/wreq/commit/910378d9965cd11a9d0c9bf0478428d1f200802d)) - *(ext)* Remove extension wrapper types ([#999](https://github.com/0x676e67/wreq/issues/999)) - ([15b4866](https://github.com/0x676e67/wreq/commit/15b48664364a436d863b5f94881d6e36402b7f10)) - *(mod)* Use flat module style and merge legacy client ([#993](https://github.com/0x676e67/wreq/issues/993)) - ([75db3ea](https://github.com/0x676e67/wreq/commit/75db3eaa3b63d52580cef711cd2b3a5960d3850d)) - *(proxy)* Use flat module style - ([0925369](https://github.com/0x676e67/wreq/commit/0925369c903046ae745bba8eb7330ae2086fa4b7)) - *(redirect)* Refactor handling of redirect history ([#1002](https://github.com/0x676e67/wreq/issues/1002)) - ([b1ce184](https://github.com/0x676e67/wreq/commit/b1ce184b901aa5f1d11eb1af4dd6b02dffedfed6)) ### Documentation - *(proxy)* Fix docs prompt ([#1010](https://github.com/0x676e67/wreq/issues/1010)) - ([989e691](https://github.com/0x676e67/wreq/commit/989e6910014124cc579eabd372a34ea665d37c63)) - Update documentation for `Request` and `RequestBuilder` - ([e30b393](https://github.com/0x676e67/wreq/commit/e30b3932323f23e902ae97d0178d1409ff2ef290)) - Fix documentation build warning ([#1008](https://github.com/0x676e67/wreq/issues/1008)) - ([303c54e](https://github.com/0x676e67/wreq/commit/303c54eba89e4cd2252da3a986710ad330034da8)) ### Performance - *(client)* Reduce one `HeaderMap` clone during header merge ([#987](https://github.com/0x676e67/wreq/issues/987)) - ([ce030b8](https://github.com/0x676e67/wreq/commit/ce030b8c3ba6bb233775fad271e1ecff49a95a61)) - *(ext)* Update query handling to avoid copying ([#1007](https://github.com/0x676e67/wreq/issues/1007)) - ([be0366f](https://github.com/0x676e67/wreq/commit/be0366fb656cdffde5504c0354ebff36a65a34b2)) - *(proxy)* Reduce branch matching ([#992](https://github.com/0x676e67/wreq/issues/992)) - ([ed00aec](https://github.com/0x676e67/wreq/commit/ed00aec00371097810d634901bd648dc990041f5)) - *(redirect)* Avoid cloning inner service for non-redirect requests ([#1028](https://github.com/0x676e67/wreq/issues/1028)) - ([7933341](https://github.com/0x676e67/wreq/commit/79333414a4c6a83e35356ab68ea301b0976472f4)) ### Styling - *(connector)* Fmt code - ([8a15bf4](https://github.com/0x676e67/wreq/commit/8a15bf418c902ada7975976d5278d20487535831)) - *(layer)* Use flat module style ([#1027](https://github.com/0x676e67/wreq/issues/1027)) - ([519e4ca](https://github.com/0x676e67/wreq/commit/519e4ca6c3ceba8e355838fb2ba0a359ddb3feff)) - Fmt code - ([53df061](https://github.com/0x676e67/wreq/commit/53df061e44f049c38de1d63b1ef2077070eea7fe)) - Fmt code - ([c15fc08](https://github.com/0x676e67/wreq/commit/c15fc08abc9210bcd98460e112e3fc746b39e748)) ### Testing - *(response)* Remove duplicate tests - ([7c1df27](https://github.com/0x676e67/wreq/commit/7c1df27efecb5f0a5abdaeec33d5f2bf9a885610)) ### Miscellaneous Tasks - *(body)* Remove `Debug` trait implementation for Body - ([72aea5e](https://github.com/0x676e67/wreq/commit/72aea5eb8e48fc2c561b0b4718f8a4654d0d31cf)) - *(body)* Remove unnecessary `cfg_attr` for stream feature - ([9c698b3](https://github.com/0x676e67/wreq/commit/9c698b38088529c9d79c293f41b3697a784b5b7a)) - *(body)* Simplify body construction ([#1020](https://github.com/0x676e67/wreq/issues/1020)) - ([7116f11](https://github.com/0x676e67/wreq/commit/7116f11e0e80ad9651b6f19ced93c2ac8a4d3731)) - *(decoder)* Add debug assertion for decoder presence - ([977a7ba](https://github.com/0x676e67/wreq/commit/977a7ba80ff4080a19460f8c74908eac509084e6)) - *(layer)* Move body timeout layer to the outermost layer ([#1032](https://github.com/0x676e67/wreq/issues/1032)) - ([294e9d8](https://github.com/0x676e67/wreq/commit/294e9d8b4b257eb69ad23e7f1b0508ff5c6a8442)) - *(multipart)* Remove custom `Debug` trait implementations - ([4512913](https://github.com/0x676e67/wreq/commit/45129134b0c67dafb26fc2038f8fd9a4dc92b4ca)) - *(req/resp)* Fmt docs ([#1022](https://github.com/0x676e67/wreq/issues/1022)) - ([d395827](https://github.com/0x676e67/wreq/commit/d39582730c9d92cdb76e133648a4582511bac647)) - *(request)* Simplify request construction ([#1018](https://github.com/0x676e67/wreq/issues/1018)) - ([2b044fb](https://github.com/0x676e67/wreq/commit/2b044fbb8b748418b3dfd551c8b9b3ba629b5529)) - *(request)* Fmt code - ([32fa617](https://github.com/0x676e67/wreq/commit/32fa61771646a1c1c22cb205e94016006b87232a)) - *(response)* Remove `Debug` implementation for `Response` - ([51f86a5](https://github.com/0x676e67/wreq/commit/51f86a56bb35ca317a108796430e97cfe386bb0f)) - *(response)* Simplify response construction ([#1016](https://github.com/0x676e67/wreq/issues/1016)) - ([08a8066](https://github.com/0x676e67/wreq/commit/08a8066d690a2b902017a5ed9598c4e6972ca57c)) - *(style)* Fmt code - ([9f1fd12](https://github.com/0x676e67/wreq/commit/9f1fd12f4af694be89ca2c4e0a8f054ab4e6a310)) - Add MSRV job to CI workflow - ([681a763](https://github.com/0x676e67/wreq/commit/681a763eeac5bd75f29868d5907f72d0d8033e8e)) - Use `http_body_util::BodyDataStream` ([#1015](https://github.com/0x676e67/wreq/issues/1015)) - ([75baf44](https://github.com/0x676e67/wreq/commit/75baf44b84bccb3236e8d1b13249d61e344a4b44)) - Remove cmake pinning from Windows CI step - ([87fc1f6](https://github.com/0x676e67/wreq/commit/87fc1f69989101ac412e4e8e585a4d2a5dfb1073)) - Add Android NDK tests ([#1011](https://github.com/0x676e67/wreq/issues/1011)) - ([adab15a](https://github.com/0x676e67/wreq/commit/adab15ac1c02411470f914311e299fc84ee3772f)) ### Revert - *(request)* Restore upstream header insertion strategy ([#995](https://github.com/0x676e67/wreq/issues/995)) - ([00c1d6d](https://github.com/0x676e67/wreq/commit/00c1d6d98d760512885270fa5211769ce311fc2a)) ### Build - *(deps)* Update `system-configuration` version to 0.7.0 ([#1024](https://github.com/0x676e67/wreq/issues/1024)) - ([040fc99](https://github.com/0x676e67/wreq/commit/040fc9942ab677a56d9432910db181ec181904f6)) - *(deps)* Bump actions/checkout from 5 to 6 ([#1023](https://github.com/0x676e67/wreq/issues/1023)) - ([814b9c8](https://github.com/0x676e67/wreq/commit/814b9c880727def1f6cf1586526971d91a473a4f)) - Cargo diet - ([f0d1ea1](https://github.com/0x676e67/wreq/commit/f0d1ea18226b46185106e9d096acd542ee39a454)) ## [6.0.0-rc.23](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.22..v6.0.0-rc.23) - 2025-11-28 ### Bug Fixes - *(client)* Handle multi-value default headers without overriding requests ([#986](https://github.com/0x676e67/wreq/issues/986)) - ([745fa26](https://github.com/0x676e67/wreq/commit/745fa265a99a857c394226f4d2b64f7783813d17)) - *(test)* Fix decompression empty body test ([#979](https://github.com/0x676e67/wreq/issues/979)) - ([9e11af1](https://github.com/0x676e67/wreq/commit/9e11af143fc452e65a42cd720138b96c7433ffd4)) ### Refactor - *(http1)* Replace many args of `Chunked::step` with struct - ([6ffef6c](https://github.com/0x676e67/wreq/commit/6ffef6ca138f341340aa4f2086fdbca009ca301e)) - Change fast_random from xorshift to siphash a counter ([#983](https://github.com/0x676e67/wreq/issues/983)) - ([a386091](https://github.com/0x676e67/wreq/commit/a38609107949bc88e2dd38a0978bde91f8684b38)) ### Build - *(deps)* Bump actions/checkout from 5 to 6 ([#978](https://github.com/0x676e67/wreq/issues/978)) - ([81d8d82](https://github.com/0x676e67/wreq/commit/81d8d82f811d60a71f6a5e0eff712134dfd15f80)) ### Deps - Update tokio-tungstenite version to 0.28.0 ([#982](https://github.com/0x676e67/wreq/issues/982)) - ([cf8a71e](https://github.com/0x676e67/wreq/commit/cf8a71ea6957ccd40beda136678954787fcab9db)) ## [6.0.0-rc.22](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.21..v6.0.0-rc.22) - 2025-11-21 ### Features - *(rt)* Add Timer::now() method to allow overriding the instant returned ([#976](https://github.com/0x676e67/wreq/issues/976)) - ([7cf3b95](https://github.com/0x676e67/wreq/commit/7cf3b95f8f445aff46ddd6455e0afaadb72bba36)) ### Bug Fixes - *(http1)* Fix rare missed write wakeup on connections ([#974](https://github.com/0x676e67/wreq/issues/974)) - ([d6bccef](https://github.com/0x676e67/wreq/commit/d6bccefe0e7d474e9bb1a375a3707326fa5db9a4)) - *(proxy)* Fix 407 proxy auth failures for HTTP requests ([#975](https://github.com/0x676e67/wreq/issues/975)) - ([df67842](https://github.com/0x676e67/wreq/commit/df6784232b9f3b146c872ecb8606336ad2a06256)) ### Performance - *(uri)* Avoid double copying during URI percent encoding ([#977](https://github.com/0x676e67/wreq/issues/977)) - ([6a1a406](https://github.com/0x676e67/wreq/commit/6a1a406d6f12eb3baf320a435330256b71bf8cf3)) ### Miscellaneous Tasks - *(client)* Refactor proxy auth handling logic - ([e54df35](https://github.com/0x676e67/wreq/commit/e54df351be60c6957759f82c3ca6861aca31db33)) ## [6.0.0-rc.21](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.20..v6.0.0-rc.21) - 2025-11-07 ### Features - *(uri)* Percent-encode spaces when building request URLs ([#972](https://github.com/0x676e67/wreq/issues/972)) - ([de1c937](https://github.com/0x676e67/wreq/commit/de1c9379c101764e1dc5f32d300154edec7f89f6)) ### Bug Fixes - *(proxy)* Support proxy auth where password is omitted ([#971](https://github.com/0x676e67/wreq/issues/971)) - ([f7ffd56](https://github.com/0x676e67/wreq/commit/f7ffd565b8129007b2ee8ccd756f0ccf248decef)) ### Refactor - *(dns)* Redesign DNS API for improved ergonomics and functionality ([#968](https://github.com/0x676e67/wreq/issues/968)) - ([9c3c3f5](https://github.com/0x676e67/wreq/commit/9c3c3f50fe4249be3a1a878d5ad24506bf7778f1)) - *(proxy)* Consolidate platform-specific modules into mod.rs ([#956](https://github.com/0x676e67/wreq/issues/956)) - ([99d3ed7](https://github.com/0x676e67/wreq/commit/99d3ed74ce0c520baba77301a3a6da20701b550c)) ### Documentation - *(retry)* Fix typo ([#957](https://github.com/0x676e67/wreq/issues/957)) - ([ed5fef2](https://github.com/0x676e67/wreq/commit/ed5fef2a18f473b770799abfa64c092529ebf74d)) ### Performance - *(connector)* Disable Nagle's algorithm for TLS handshake ([#955](https://github.com/0x676e67/wreq/issues/955)) - ([35f4265](https://github.com/0x676e67/wreq/commit/35f426502dada4e4fb245048feccd3b6762f0ea0)) ### Testing - *(redirect)* Improve redirect cookie tests ([#963](https://github.com/0x676e67/wreq/issues/963)) - ([852f280](https://github.com/0x676e67/wreq/commit/852f28059719f3e485e58e9b92f2591466d0f342)) ### Miscellaneous Tasks - *(connector)* Fmt code - ([00fa021](https://github.com/0x676e67/wreq/commit/00fa021349eec058456e2e51ed6b01ab72eedecf)) - *(dcos)* Improve API docs ([#954](https://github.com/0x676e67/wreq/issues/954)) - ([10eabd7](https://github.com/0x676e67/wreq/commit/10eabd775aacce16a8e0a616c5919124bb5456ef)) - Update docs - ([9c08747](https://github.com/0x676e67/wreq/commit/9c0874711a10b5d68ee6710218dac4ee3a07d982)) - Fix style check ([#959](https://github.com/0x676e67/wreq/issues/959)) - ([6c3c02b](https://github.com/0x676e67/wreq/commit/6c3c02bab811893de65b599a8fc75fd50dadd103)) ### Build - *(deps)* Update windows-registry requirement from 0.5.0 to 0.6.0 ([#962](https://github.com/0x676e67/wreq/issues/962)) - ([b51a8fb](https://github.com/0x676e67/wreq/commit/b51a8fbfb5b9f6e3c235ce389926021236e57386)) ## [6.0.0-rc.20](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.16..v6.0.0-rc.20) - 2025-09-19 ### Refactor - *(tls)* Replace `prefer_chacha20` with `preserve_tls13_cipher_list` ([#953](https://github.com/0x676e67/wreq/issues/953)) - ([3d4f61d](https://github.com/0x676e67/wreq/commit/3d4f61d1135c066df07073899c1cfe81c1fcf961)) ## [6.0.0-rc.16](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.15..v6.0.0-rc.16) - 2025-09-17 ### Features - *(ws)* Implement `FusedStream` trait for WebSocket ([#949](https://github.com/0x676e67/wreq/issues/949)) - ([d292ef7](https://github.com/0x676e67/wreq/commit/d292ef799a4dfac4500f5ccd785e3fdebeecbe7c)) ### Bug Fixes - *(client)* Allow absolute-form if is_proxied is set even on HTTPS ([#945](https://github.com/0x676e67/wreq/issues/945)) - ([0df02e1](https://github.com/0x676e67/wreq/commit/0df02e1c8db43cd94e32541ce0e24b3966441804)) - *(error)* Drop leftover debug logging ([#948](https://github.com/0x676e67/wreq/issues/948)) - ([3f73ae6](https://github.com/0x676e67/wreq/commit/3f73ae688bd7acd8a7292eb2a5a6ab7b9892de3b)) - *(http2)* Fix chained calls ([#952](https://github.com/0x676e67/wreq/issues/952)) - ([a1765dc](https://github.com/0x676e67/wreq/commit/a1765dce6403ea037769331bf51e520f13b7f024)) ### Refactor - *(ws)* Improve close method API ergonomics ([#947](https://github.com/0x676e67/wreq/issues/947)) - ([de9e36b](https://github.com/0x676e67/wreq/commit/de9e36b98e1d372d658c55eeb2cc324d67177b06)) ### Miscellaneous Tasks - *(client)* Fmt code - ([ccc54f7](https://github.com/0x676e67/wreq/commit/ccc54f7cb0805749fac896d3e388383916cf1200)) - *(examples)* Remove tracing logs from examples - ([dae70b4](https://github.com/0x676e67/wreq/commit/dae70b4320372c00387a2090ba34099ca1e22246)) - *(examples)* Change HTTP client to use wreq with proxy - ([ba92b95](https://github.com/0x676e67/wreq/commit/ba92b95a913811f7979ff8e51239390c2c62f3d4)) ## [6.0.0-rc.15](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.14..v6.0.0-rc.15) - 2025-09-12 ### Features - *(http1)* Remove `preserve_header_case` support ([#943](https://github.com/0x676e67/wreq/issues/943)) - ([fd59127](https://github.com/0x676e67/wreq/commit/fd59127a8afebc42adf4e7eb40faaf792377e62b)) - *(retry)* Introduce configurable retry policy ([#935](https://github.com/0x676e67/wreq/issues/935)) - ([f4644d8](https://github.com/0x676e67/wreq/commit/f4644d8a08545de19638abd80484210190f123f2)) ### Refactor - *(ext)* Introduce ergonomic and functional API ([#942](https://github.com/0x676e67/wreq/issues/942)) - ([52709b3](https://github.com/0x676e67/wreq/commit/52709b3dc3b3c7a756bb370c8efc31dba86f2fc9)) - *(keylog)* Redesign API for better ergonomics and functionality ([#941](https://github.com/0x676e67/wreq/issues/941)) - ([7845b9b](https://github.com/0x676e67/wreq/commit/7845b9b9d6c3c31cda3c52f573a1446e710710d7)) ### Testing - *(client)* Update header tests and examples ([#939](https://github.com/0x676e67/wreq/issues/939)) - ([bfb8739](https://github.com/0x676e67/wreq/commit/bfb8739b0c0a03e06e54d9c68f7783ca1415b0a3)) ### Miscellaneous Tasks - *(internal)* Remove unnecessary `Debug` bounds - ([4aa1088](https://github.com/0x676e67/wreq/commit/4aa1088888ba8fe4e64a2ff7cf874b1d0174b154)) - *(response)* Drop `Uri::try_from` in From> - ([9e16fba](https://github.com/0x676e67/wreq/commit/9e16fba5e1be1bf95b9b06ad16e0a9858c0b60c2)) - *(retry)* Remove unused code - ([147fe60](https://github.com/0x676e67/wreq/commit/147fe60d5c62048b064e7896d90e96011383ffa9)) - *(sync)* Remove unused code ([#940](https://github.com/0x676e67/wreq/issues/940)) - ([a17f799](https://github.com/0x676e67/wreq/commit/a17f79957e722589b6e122f54fae2f1a82893c5b)) ## [6.0.0-rc.14](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.13..v6.0.0-rc.14) - 2025-09-05 ### Bug Fixes - *(client)* Ensure `Accept-Encoding` header is applied correctly ([#928](https://github.com/0x676e67/wreq/issues/928)) - ([f9f9331](https://github.com/0x676e67/wreq/commit/f9f9331ca28f07fd1d5ad4067d297c66dfe013c1)) ### Refactor - *(client)* Enforce `ClientBuilder` initialization via `Client::builder()` ([#932](https://github.com/0x676e67/wreq/issues/932)) - ([513e6f5](https://github.com/0x676e67/wreq/commit/513e6f56169ba357c8d830d77745092d1a90750c)) - *(response)* Accept AsRef for charset for better ([#934](https://github.com/0x676e67/wreq/issues/934)) - ([b95e3b5](https://github.com/0x676e67/wreq/commit/b95e3b5791b983b436c892569a1d3a678999ed26)) ### Performance - *(client)* Prevent header duplication by reordering layers ([#930](https://github.com/0x676e67/wreq/issues/930)) - ([ca72a53](https://github.com/0x676e67/wreq/commit/ca72a5341e0ca7d0afe187d1fcd63e1ce1895596)) - *(client)* Avoid redundant header copy ([#929](https://github.com/0x676e67/wreq/issues/929)) - ([c0d8df7](https://github.com/0x676e67/wreq/commit/c0d8df7c1b8d4dfb002dc6bf6ff417ba67f2d587)) ### Miscellaneous Tasks - *(client)* Speed up client initialization ([#931](https://github.com/0x676e67/wreq/issues/931)) - ([be90796](https://github.com/0x676e67/wreq/commit/be90796bda2c481c773c9c93e26420da92faa932)) - *(test)* Fmt code - ([f5ab83c](https://github.com/0x676e67/wreq/commit/f5ab83cfb4d28518dab06e63d28c6f234bfd590f)) - *(tests)* Fmt code ([#933](https://github.com/0x676e67/wreq/issues/933)) - ([86ee4e3](https://github.com/0x676e67/wreq/commit/86ee4e3343466f0284837d4bec6429f28620fc1a)) ## [6.0.0-rc.13](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.12..v6.0.0-rc.13) - 2025-09-02 ### Bug Fixes - *(cookie)* Normalize host handling with port ([#926](https://github.com/0x676e67/wreq/issues/926)) - ([66368be](https://github.com/0x676e67/wreq/commit/66368be48fd8437c1f2c8cd3ef9e7f0f8432a245)) ### Styling - *(redirect)* Fmt code - ([db195ef](https://github.com/0x676e67/wreq/commit/db195efaedd4232cf27c4161414de64c4898b1fe)) ## [6.0.0-rc.12](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.11..v6.0.0-rc.12) - 2025-09-02 ### Features - *(lib)* Introduce request shortcut ([#924](https://github.com/0x676e67/wreq/issues/924)) - ([ad6b79d](https://github.com/0x676e67/wreq/commit/ad6b79d0042df52e0e1c418a66a66760308837ac)) ## [6.0.0-rc.11](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.9..v6.0.0-rc.11) - 2025-08-31 ### Features - *(tls)* Allow custom ALPN configuration ([#921](https://github.com/0x676e67/wreq/issues/921)) - ([9edfd54](https://github.com/0x676e67/wreq/commit/9edfd54732bae3fd98510d307c4320f48bf44a6d)) ### Bug Fixes - *(cookie)* Fix cookie deletion and lookup logic ([#923](https://github.com/0x676e67/wreq/issues/923)) - ([e6014ef](https://github.com/0x676e67/wreq/commit/e6014ef049826062e305e475e10e4c142980a3d5)) ### Documentation - *(tls)* Refine `TlsOptions` field documentation ([#922](https://github.com/0x676e67/wreq/issues/922)) - ([2b42c9c](https://github.com/0x676e67/wreq/commit/2b42c9c3b43b3aabaed6d1c66b0f0bc21070cd48)) - *(tls)* Update module docs ([#920](https://github.com/0x676e67/wreq/issues/920)) - ([04c1258](https://github.com/0x676e67/wreq/commit/04c12583c67f0205e5dfd049db19316acbc32cce)) ### Miscellaneous Tasks - *(tls)* Streamline conn module type re-exports - ([362c12a](https://github.com/0x676e67/wreq/commit/362c12a50956eb3955a5a6735ebd0bfac39b1e8b)) - *(tls)* Remove ext & cert compression wrappers ([#918](https://github.com/0x676e67/wreq/issues/918)) - ([d9c3e84](https://github.com/0x676e67/wreq/commit/d9c3e8420075f8f6feca0f1725728f0cc25603aa)) ## [6.0.0-rc.9](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.8..v6.0.0-rc.9) - 2025-08-30 ### Features - *(cookie)* Improve `cookie_provider` for better ergonomics and flexibility ([#895](https://github.com/0x676e67/wreq/issues/895)) - ([70dd6d9](https://github.com/0x676e67/wreq/commit/70dd6d9d13181b252ce8b69ba807fd5b7b9a15a4)) - *(dns)* Export `IntoResolve` as public API ([#913](https://github.com/0x676e67/wreq/issues/913)) - ([b1b6278](https://github.com/0x676e67/wreq/commit/b1b6278830e20496e965cdbb9adca7d03974f928)) - *(lib)* Add shortcut request methods ([#903](https://github.com/0x676e67/wreq/issues/903)) - ([03cce5e](https://github.com/0x676e67/wreq/commit/03cce5e87dfc9fc77d7ce8eb10bcb60069a3114e)) - *(proxy)* Add Unix socket proxy support ([#900](https://github.com/0x676e67/wreq/issues/900)) - ([d60a6f3](https://github.com/0x676e67/wreq/commit/d60a6f30b0d299f2f1e44f79ec5f9b6cdf94bddf)) - *(redirect)* Support accessing redirect history in response ([#917](https://github.com/0x676e67/wreq/issues/917)) - ([46278eb](https://github.com/0x676e67/wreq/commit/46278eb6a38b48a75803cf7b49161690d0b90161)) - *(redirect)* Allow custom redirects to access response headers ([#916](https://github.com/0x676e67/wreq/issues/916)) - ([7a1c86a](https://github.com/0x676e67/wreq/commit/7a1c86abab7d835a5da92b2573d7e5ef71ff6980)) - *(response)* Preserve URL when converting `Response` to `http::Response` ([#897](https://github.com/0x676e67/wreq/issues/897)) - ([72b24c7](https://github.com/0x676e67/wreq/commit/72b24c7284d21af2bfbfcc0bcdbac9bc20a5feac)) - *(ws)* Remove Utf8Bytes::from_bytes_unchecked, unsafe UTF-8 ([#912](https://github.com/0x676e67/wreq/issues/912)) - ([e6b8bcf](https://github.com/0x676e67/wreq/commit/e6b8bcfd33ec6a70cf705da1665ca6d15cae520e)) ### Refactor - *(connect)* Safely convert `socket2::Socket` to Tokio `TcpSocket` ([#904](https://github.com/0x676e67/wreq/issues/904)) - ([2461be9](https://github.com/0x676e67/wreq/commit/2461be98fc73e2fd78c396a69c70ce9ab4f7bbf0)) - *(core)* Replace Tokio I/O abstraction ([#909](https://github.com/0x676e67/wreq/issues/909)) - ([16976b9](https://github.com/0x676e67/wreq/commit/16976b935f01a6464d4c0ae1e3611e45429b351b)) - *(deps)* Remove dependency on `url::Url` ([#914](https://github.com/0x676e67/wreq/issues/914)) - ([356950d](https://github.com/0x676e67/wreq/commit/356950d2cfbcb9f4f4ff5832ca696a95880171f2)) - *(h2)* Refactor legacy unsafe wrapper code ([#905](https://github.com/0x676e67/wreq/issues/905)) - ([172f1c5](https://github.com/0x676e67/wreq/commit/172f1c558292b4630875b0e3910ee2cb4337f071)) - *(io)* Use Pin::as_deref_mut() from std instead of custom polyfill ([#906](https://github.com/0x676e67/wreq/issues/906)) - ([d3d80f1](https://github.com/0x676e67/wreq/commit/d3d80f16e23e8e1594f2c45041b9403ea2b6be03)) ### Documentation - *(identity)* Update documentation - ([459afd6](https://github.com/0x676e67/wreq/commit/459afd6a90c4da254dd6598f604c3b1fd1841cec)) - *(proxy)* Remove type export section - ([ae81ef5](https://github.com/0x676e67/wreq/commit/ae81ef533e2439d0398a22b6740521fddcb6cc0d)) - *(request)* Update docs on request methods with cfg support - ([654e225](https://github.com/0x676e67/wreq/commit/654e2258d8472c3427af09b13c19f70949f38ca9)) ### Performance - *(http1)* Write during header sorting ([#899](https://github.com/0x676e67/wreq/issues/899)) - ([f025e3f](https://github.com/0x676e67/wreq/commit/f025e3fcfce4d8a8d31726b46e92ad8f51dcf46f)) - *(http2)* Significantly improve http2 multi-core performance ([#892](https://github.com/0x676e67/wreq/issues/892)) - ([2c3f873](https://github.com/0x676e67/wreq/commit/2c3f8736b21589ab4f9f2dec1f56c0a9de321dd0)) - *(layer)* Inline layer creation for faster client build - ([78e8fc7](https://github.com/0x676e67/wreq/commit/78e8fc7b203ac382a5fb70183564513c7346cbe1)) ### Styling - *(cookie)* Fmt code - ([315bccf](https://github.com/0x676e67/wreq/commit/315bccfc65101642b2a56f583c573b6d11148bb7)) - *(header)* Simplify header sorting branch match - ([ee23d25](https://github.com/0x676e67/wreq/commit/ee23d25fd258f51eb33b20d72460913c38e7a517)) - *(proto)* Fmt code - ([02e0bc0](https://github.com/0x676e67/wreq/commit/02e0bc06876a458536268863938a4906354791b9)) - *(request)* Fmt code - ([d6e56e4](https://github.com/0x676e67/wreq/commit/d6e56e4b9e85ab73d627d72a51ed04198483cf98)) ### Miscellaneous Tasks - *(ci)* Speed up tests with feature matrix in GitHub Actions ([#894](https://github.com/0x676e67/wreq/issues/894)) - ([d66dc66](https://github.com/0x676e67/wreq/commit/d66dc6671fadbd427ea2c1d0e4fa07e61d62b4db)) - *(proxy)* Debug-print HTTP headers - ([628e6b4](https://github.com/0x676e67/wreq/commit/628e6b462561a7fd5fe987dff6e14a76b02272de)) - *(upgrade)* Drop unused code - ([bb26177](https://github.com/0x676e67/wreq/commit/bb261776fe41f1024f3af1d73147fd0440b2f908)) - Minimize package size - ([938e3f5](https://github.com/0x676e67/wreq/commit/938e3f56c113bd721ceb9216f15c2e8e141f6d50)) ### Build - *(deps)* Bump actions/checkout from 4 to 5 ([#908](https://github.com/0x676e67/wreq/issues/908)) - ([5f6723a](https://github.com/0x676e67/wreq/commit/5f6723a7a8aad0db11f27ff9aa8e5b208f5f6cb4)) - *(deps)* Minimize out-of-the-box dependencies ([#902](https://github.com/0x676e67/wreq/issues/902)) - ([5b68106](https://github.com/0x676e67/wreq/commit/5b68106bcda7ae78209afb35925704f13765717b)) - *(deps)* Bump actions/checkout from 3 to 5 ([#893](https://github.com/0x676e67/wreq/issues/893)) - ([9877ed6](https://github.com/0x676e67/wreq/commit/9877ed6c177c139719bf35245027399e39a7cae7)) ## [6.0.0-rc.8](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.7..v6.0.0-rc.8) - 2025-08-12 ### Features - *(dns)* Improve `dns_resolver` for better ergonomics and flexibility ([#891](https://github.com/0x676e67/wreq/issues/891)) - ([9e3f974](https://github.com/0x676e67/wreq/commit/9e3f97450af724abba62cc1ee586c292b16e8498)) ### Bug Fixes - *(deps)* Upgrade url to v2.5.4 to address CVE-2024-12224 ([#887](https://github.com/0x676e67/wreq/issues/887)) - ([7038272](https://github.com/0x676e67/wreq/commit/70382725752d44682b5e684d7af3522614941f94)) - *(pool)* Prevent failure when registering the waker with this oneshot ([#888](https://github.com/0x676e67/wreq/issues/888)) - ([f7d914d](https://github.com/0x676e67/wreq/commit/f7d914d96712bb3f20403d1dce1c30c4d03c7586)) ### Refactor - *(client)* Remove `no_keepalive` method ([#890](https://github.com/0x676e67/wreq/issues/890)) - ([0c15943](https://github.com/0x676e67/wreq/commit/0c159431a296163eb52cf95d0ea9f1e9fc84e3c0)) ### Documentation - *(README)* Update example - ([b620408](https://github.com/0x676e67/wreq/commit/b6204085abbfba933e6bfb368f7a8579b4bea417)) - *(service)* Update service docs - ([a644502](https://github.com/0x676e67/wreq/commit/a64450253447a8a4287c89e28c66cbd5f9a8c689)) ### Testing - *(common)* Add missing assertion in full_rewind test ([#889](https://github.com/0x676e67/wreq/issues/889)) - ([c84746a](https://github.com/0x676e67/wreq/commit/c84746af284f4b0c2ec72f4d01150cb53de30ac9)) ### Build - *(deps)* Update async-tungstenite requirement from 0.30.0 to 0.31.0 ([#884](https://github.com/0x676e67/wreq/issues/884)) - ([d484f71](https://github.com/0x676e67/wreq/commit/d484f71b1ba2ad26ee9fa28b230d6c4ce5f63df8)) ## [6.0.0-rc.7](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.5..v6.0.0-rc.7) - 2025-08-10 ### Features - *(ws)* Option for `default_headers` method in websocket ([#883](https://github.com/0x676e67/wreq/issues/883)) - ([fd36b7a](https://github.com/0x676e67/wreq/commit/fd36b7a817f3fb8d2b59dea73c34ff4fd3249d87)) ### Bug Fixes - *(request)* Correct `default_headers` method semantics ([#882](https://github.com/0x676e67/wreq/issues/882)) - ([2cbd0ac](https://github.com/0x676e67/wreq/commit/2cbd0ac56813a9e4b022d1747dce512943c31993)) ### Refactor - *(dns)* Make hickory module internal ([#881](https://github.com/0x676e67/wreq/issues/881)) - ([e441048](https://github.com/0x676e67/wreq/commit/e441048a6b5df1af3e715cbeceba7e178bbb22eb)) ### Miscellaneous Tasks - *(client)* Expose additional configuration options - ([65bd959](https://github.com/0x676e67/wreq/commit/65bd95963500af6205f9f06b4cc059b67a0ed740)) ## [6.0.0-rc.5](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.4..v6.0.0-rc.5) - 2025-08-09 ### Features - *(ws)* Expose the `message` module for external use ([#874](https://github.com/0x676e67/wreq/issues/874)) - ([abed4ac](https://github.com/0x676e67/wreq/commit/abed4ac82d8ad82c72593ad931477acea70557b0)) ### Refactor - *(cookie)* Refactor legacy jar cookie implementation ([#871](https://github.com/0x676e67/wreq/issues/871)) - ([ebb1504](https://github.com/0x676e67/wreq/commit/ebb1504400102c71af9d76e9084f8d2ea14c16c7)) - *(dns)* Consolidate legacy DNS modules ([#876](https://github.com/0x676e67/wreq/issues/876)) - ([f54367c](https://github.com/0x676e67/wreq/commit/f54367cad0d5c699596f80857af234e78ba3d166)) ### Documentation - *(module)* Improve module-level documentation ([#877](https://github.com/0x676e67/wreq/issues/877)) - ([4e2c15f](https://github.com/0x676e67/wreq/commit/4e2c15f39ba0bdf61b0aedb30d43779a4c455d58)) - *(tls)* Update documentation for configuration fields ([#880](https://github.com/0x676e67/wreq/issues/880)) - ([94c060e](https://github.com/0x676e67/wreq/commit/94c060ed2a3fcc744223ab6a7224e67fae8c9210)) ### Performance - *(upgrade)* Inline hot methods in async IO wrapper ([#875](https://github.com/0x676e67/wreq/issues/875)) - ([8388b52](https://github.com/0x676e67/wreq/commit/8388b5241a253bb8f550435aa9e487d9ce16b44d)) ### Styling - *(internal)* Refactor internal code layout and naming ([#878](https://github.com/0x676e67/wreq/issues/878)) - ([fbf11fd](https://github.com/0x676e67/wreq/commit/fbf11fd588cb773471fb46302405655eb53cafe6)) ### Testing - *(client)* Verify multiple identical headers are appended correctly ([#879](https://github.com/0x676e67/wreq/issues/879)) - ([f245f9c](https://github.com/0x676e67/wreq/commit/f245f9c47965ee4b7682050357f350e05a2ca549)) ### Miscellaneous Tasks - *(retry)* Remove unnecessary clone in request duplication - ([d78568c](https://github.com/0x676e67/wreq/commit/d78568cc6079aaefe3f3b02c3537e21646a1f7f0)) ### Build - *(ws)* Switch to runtime-agnostic WebSocket implementation ([#873](https://github.com/0x676e67/wreq/issues/873)) - ([3fb93ef](https://github.com/0x676e67/wreq/commit/3fb93efb76773d8349ade8f66fe3cabb543faa7b)) ## [6.0.0-rc.4](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.2..v6.0.0-rc.4) - 2025-08-07 ### Bug Fixes - *(cookie)* Store response cookies even with manual `Cookie` header ([#868](https://github.com/0x676e67/wreq/issues/868)) - ([d2f3bf0](https://github.com/0x676e67/wreq/commit/d2f3bf0ec425ad4880dbcba03951f260f8bb1015)) - *(header)* Preserve multi-value headers in `OrigHeaderMap` sorting ([#867](https://github.com/0x676e67/wreq/issues/867)) - ([b650956](https://github.com/0x676e67/wreq/commit/b6509561c779dde492a1208a2fe5f7c64832419d)) ### Refactor - *(client)* Allow `?Sized` trait objects in `dns_resolver` ([#870](https://github.com/0x676e67/wreq/issues/870)) - ([2baf195](https://github.com/0x676e67/wreq/commit/2baf1953024fdb646e205478d9dc568113ba2ec1)) ### Performance - *(cookie)* Optimize cookie layer to skip unnecessary matching ([#866](https://github.com/0x676e67/wreq/issues/866)) - ([ce9b531](https://github.com/0x676e67/wreq/commit/ce9b531bd4d0211b73fb64211f51a8549c948cfc)) ## [6.0.0-rc.2](https://github.com/0x676e67/wreq/compare/v6.0.0-rc.1..v6.0.0-rc.2) - 2025-08-04 ### Bug Fixes - *(build)* Resolve build failure on Windows when `default-features` are disabled ([#864](https://github.com/0x676e67/wreq/issues/864)) - ([4418e47](https://github.com/0x676e67/wreq/commit/4418e4773a711bf15a2e86777473f16b0af3d8e3)) ### Documentation - *(options)* Fix `Http2Options` description ([#863](https://github.com/0x676e67/wreq/issues/863)) - ([89b0957](https://github.com/0x676e67/wreq/commit/89b0957a196debafaeef6a6fa271a53b4a3f7964)) ## [6.0.0-rc.1](https://github.com/0x676e67/wreq/compare/v5.1.0..v6.0.0-rc.1) - 2025-08-03 ### Features - *(client)* Set default values for TCP keepalive and user_timeout ([#852](https://github.com/0x676e67/wreq/issues/852)) - ([f06fe61](https://github.com/0x676e67/wreq/commit/f06fe616b72a8d672c9a6118acfab7b96f18bbb6)) - *(client)* Expose TCP socket Happy Eyeballs timeout API ([#844](https://github.com/0x676e67/wreq/issues/844)) - ([bcbfbf8](https://github.com/0x676e67/wreq/commit/bcbfbf802c03b6cf58eaa566d38b4a8c29037635)) - *(client)* Expose TCP socket send/recv buffer APIs ([#843](https://github.com/0x676e67/wreq/issues/843)) - ([2ea1052](https://github.com/0x676e67/wreq/commit/2ea105290434339cdf84a83afe4d6f65c864e09a)) - *(client)* Disable redirects by default ([#805](https://github.com/0x676e67/wreq/issues/805)) - ([ecf6019](https://github.com/0x676e67/wreq/commit/ecf60193deaa6951212d862445645e0ba9175cd7)) - *(client)* Add convenience method for sending `OPTIONS` requests ([#787](https://github.com/0x676e67/wreq/issues/787)) - ([34f1586](https://github.com/0x676e67/wreq/commit/34f158610a52228ad4d0bc665c268714c5b34e0d)) - *(client)* Make `HTTP`/`TLS` config options publicly accessible ([#783](https://github.com/0x676e67/wreq/issues/783)) - ([a4e7b98](https://github.com/0x676e67/wreq/commit/a4e7b981790942364b07f65333602f4fcbb68a7a)) - *(client)* Add `SO_REUSEADDR` support for tcp socket ([#762](https://github.com/0x676e67/wreq/issues/762)) - ([8aced63](https://github.com/0x676e67/wreq/commit/8aced637eed476faeb2212930ba91570a0c4cbda)) - *(client)* Add tower HTTP request middleware layer ([#694](https://github.com/0x676e67/wreq/issues/694)) - ([0ad0021](https://github.com/0x676e67/wreq/commit/0ad0021bde7dd890aa58e3a1d4f422984fab9eec)) - *(client)* Add tcp_user_timeout builder option ([#688](https://github.com/0x676e67/wreq/issues/688)) - ([d1d0eb4](https://github.com/0x676e67/wreq/commit/d1d0eb459b4859a73fc7b75934804bfa30bc907a)) - *(client)* Add tcp_keepalive_interval and tcp_keepalive_retries to ClientBuilder ([#643](https://github.com/0x676e67/wreq/issues/643)) - ([32fe31e](https://github.com/0x676e67/wreq/commit/32fe31e0b6aca538909d3e5367d12d58269bf818)) - *(client)* Drop API for retrieving default headers ([#640](https://github.com/0x676e67/wreq/issues/640)) - ([1b4a445](https://github.com/0x676e67/wreq/commit/1b4a4451fb7bd28e610431a12b2a427b3da64e9b)) - *(client)* Add identity to be used for client certificate authentication ([#617](https://github.com/0x676e67/wreq/issues/617)) - ([55c2490](https://github.com/0x676e67/wreq/commit/55c249067c267099c400119e12491441e4c0e63a)) - *(client)* Adds support for SSLKEYLOGFILE output from client ([#605](https://github.com/0x676e67/wreq/issues/605)) - ([dc0c40b](https://github.com/0x676e67/wreq/commit/dc0c40bed2faa7b743d5a22496e83029a9b84dcf)) - *(client)* ClientBuilder::interface on Solarish OS ([#594](https://github.com/0x676e67/wreq/issues/594)) - ([c0a7fbc](https://github.com/0x676e67/wreq/commit/c0a7fbcaf98f276de74e9b11dbc23f5bb5ce457c)) - *(cookie)* Provide access to raw cookie API ([#830](https://github.com/0x676e67/wreq/issues/830)) - ([5c5e3e5](https://github.com/0x676e67/wreq/commit/5c5e3e5ccdc3383c7c2b71fe4954da039882f877)) - *(emulation)* Expose config fields via accessors while preserving `non_exhaustive` ([#854](https://github.com/0x676e67/wreq/issues/854)) - ([dfdf707](https://github.com/0x676e67/wreq/commit/dfdf707e3fb7ca6f3800e4741572eb51323d06cb)) - *(error)* Report custom reason phrase in error message ([#767](https://github.com/0x676e67/wreq/issues/767)) - ([b492bc1](https://github.com/0x676e67/wreq/commit/b492bc1d408f1742c1f1e688784707f877cc1d5b)) - *(error)* Check if the error is an upgrade error ([#623](https://github.com/0x676e67/wreq/issues/623)) - ([ddae516](https://github.com/0x676e67/wreq/commit/ddae516928663b2a9a181eb387dc1ff9aa567c79)) - *(examples)* Add emulation twitter android `TLS`/`HTTP2` example ([#612](https://github.com/0x676e67/wreq/issues/612)) - ([40c9a70](https://github.com/0x676e67/wreq/commit/40c9a70ad015e4f8db1e9dac8416c61b25d05318)) - *(header)* Enhance the usability of `OriginalHeaders` API ([#731](https://github.com/0x676e67/wreq/issues/731)) - ([99bfc39](https://github.com/0x676e67/wreq/commit/99bfc391dc5e90f439576da282f94be9bb78b1f8)) - *(headers)* Omit payload length for HTTP/2 `OPTIONS` ([#785](https://github.com/0x676e67/wreq/issues/785)) - ([bb00275](https://github.com/0x676e67/wreq/commit/bb00275602ea2468d42ee4674652315b4ae2dc6d)) - *(http2)* Upgrade `http2` dependency to 0.5.0 ([#651](https://github.com/0x676e67/wreq/issues/651)) - ([a21827b](https://github.com/0x676e67/wreq/commit/a21827bb30bc656d9ae0e71a5e6fa3cff4d6e94f)) - *(lib)* Export `EmulationBuilder` as a public API ([#825](https://github.com/0x676e67/wreq/issues/825)) - ([080f85f](https://github.com/0x676e67/wreq/commit/080f85f1021a5555586b986c4e6addaabaeba018)) - *(pool)* Distinguish connections by request emulation ([#841](https://github.com/0x676e67/wreq/issues/841)) - ([67884ee](https://github.com/0x676e67/wreq/commit/67884eea31720d743c98bca27b8c9fea02a2f555)) - *(redirect)* Per-request redirect config support ([#710](https://github.com/0x676e67/wreq/issues/710)) - ([265df64](https://github.com/0x676e67/wreq/commit/265df646689eceb08ab020535f756ce055182ec1)) - *(request)* Support per-request emulation configuration ([#759](https://github.com/0x676e67/wreq/issues/759)) - ([2ec6d21](https://github.com/0x676e67/wreq/commit/2ec6d21ad9ba1c815c81c9152fc35f70744b7211)) - *(request)* Adjust internal structure and allow skip default headers ([#723](https://github.com/0x676e67/wreq/issues/723)) - ([7be331d](https://github.com/0x676e67/wreq/commit/7be331d2dffc4dbe5226c95c5b8b3dd96897a324)) - *(request)* Setting headers order at the request level ([#602](https://github.com/0x676e67/wreq/issues/602)) - ([3b280f8](https://github.com/0x676e67/wreq/commit/3b280f845538e12a92f0976b9455604f8260ef90)) - *(response)* Add `Response.local_addr()` to get local address ([#835](https://github.com/0x676e67/wreq/issues/835)) - ([35652f5](https://github.com/0x676e67/wreq/commit/35652f547fc293a8339b5e475fd5e8b41e2fafd3)) - *(tls)* Treat different TLS configs as distinct sessions ([#779](https://github.com/0x676e67/wreq/issues/779)) - ([e05406d](https://github.com/0x676e67/wreq/commit/e05406d4bfdb93837c9168fecd24a467908ba7a5)) - *(tls)* Add API to set list of stable curves ([#633](https://github.com/0x676e67/wreq/issues/633)) - ([ea0eb17](https://github.com/0x676e67/wreq/commit/ea0eb17ed425d6477eebe629b7851f0e51a1bc75)) - *(websocket)* Support per-request emulation configuration ([#764](https://github.com/0x676e67/wreq/issues/764)) - ([468f86f](https://github.com/0x676e67/wreq/commit/468f86fd5811043a1d89437f1bea30c8cfbf93b8)) - *(ws)* Add support for header order on websocket builder ([#608](https://github.com/0x676e67/wreq/issues/608)) - ([ad9e0b9](https://github.com/0x676e67/wreq/commit/ad9e0b97d5733a800b17c32851c1824da83d05c4)) ### Bug Fixes - *(client)* Fix `HTTP/2` safe retry policy ([#715](https://github.com/0x676e67/wreq/issues/715)) - ([3a5c356](https://github.com/0x676e67/wreq/commit/3a5c35697d12dcf67d30db88abb8d1fe37b638a7)) - *(client)* Prevent future stack overflow in request handling ([#685](https://github.com/0x676e67/wreq/issues/685)) - ([402ffe3](https://github.com/0x676e67/wreq/commit/402ffe3184362a18696791621261c744a5f413b2)) - *(client)* Update client to retain tls keylog configuration ([#619](https://github.com/0x676e67/wreq/issues/619)) - ([22c0770](https://github.com/0x676e67/wreq/commit/22c0770d3a123fa2569d9174112fa7c2a309220f)) - *(client)* Fix `HTTP2` extensions to be applied in retry requests ([#596](https://github.com/0x676e67/wreq/issues/596)) - ([a1f0d32](https://github.com/0x676e67/wreq/commit/a1f0d32ede0bb146230781603d532217ccdc0430)) - *(core)* Improve client errors details if available ([#665](https://github.com/0x676e67/wreq/issues/665)) - ([fb41f70](https://github.com/0x676e67/wreq/commit/fb41f70c7b70a556c2a97f9b699049a5e1fb58f4)) - *(dns)* Prefer IPv6 addresses before IPv4 even if resolver ordered differently ([#658](https://github.com/0x676e67/wreq/issues/658)) - ([e913768](https://github.com/0x676e67/wreq/commit/e913768cf1be11b277b9b84b2f31b0090e426450)) - *(error)* Error::is_timeout() checks for crate::core::Error::is_timeout() - ([34e79f1](https://github.com/0x676e67/wreq/commit/34e79f1ea81085c66e9ffb66066c8a35254ebdc1)) - *(error)* Include request URL in error messages ([#737](https://github.com/0x676e67/wreq/issues/737)) - ([f312645](https://github.com/0x676e67/wreq/commit/f312645c31f53bcbf24d3899132b6fd9af890beb)) - *(hash)* Fix #780 ([#784](https://github.com/0x676e67/wreq/issues/784)) - ([7b5808d](https://github.com/0x676e67/wreq/commit/7b5808dbb6073cb81e657aedb19ce2f9965875d5)) - *(http2)* Rename `unknown_setting8` to `enable_connect_protocol` ([#647](https://github.com/0x676e67/wreq/issues/647)) - ([3464105](https://github.com/0x676e67/wreq/commit/34641053a7e2f6737ccf9803cc7ab02cc9d3c103)) - *(pool)* Cap pool idle interval to a minimum ([#814](https://github.com/0x676e67/wreq/issues/814)) - ([daba062](https://github.com/0x676e67/wreq/commit/daba06298e60ef67a8a57de15aaad0ac071294be)) - *(pool)* Don't spawn pool idle interval if timeout is 0 ([#806](https://github.com/0x676e67/wreq/issues/806)) - ([a6deeb4](https://github.com/0x676e67/wreq/commit/a6deeb44b8e8d67e322a33759b264bc81a17e7d4)) - *(proxy)* Restore default port 1080 for SOCKS proxies without explicit port ([#821](https://github.com/0x676e67/wreq/issues/821)) - ([256de2b](https://github.com/0x676e67/wreq/commit/256de2bb5ff60bd0f040277e0020ef84d0ea8b12)) - *(proxy)* Set https system proxy on windows ([#678](https://github.com/0x676e67/wreq/issues/678)) - ([7111b13](https://github.com/0x676e67/wreq/commit/7111b131db66abdddbbccafa5450f3d1637d229b)) - *(redirect)* Make the number of redirects of policy matches its maximum limit ([#629](https://github.com/0x676e67/wreq/issues/629)) - ([85bad99](https://github.com/0x676e67/wreq/commit/85bad9996a9d8785feb92cf3d2c3c845bc10a306)) - *(request)* Fix headers order ([#603](https://github.com/0x676e67/wreq/issues/603)) - ([9c85532](https://github.com/0x676e67/wreq/commit/9c8553229f62c901a2b739fed413be08fa558d4b)) - *(tls)* Fix encoding error when multiple ALPS extensions are present ([#861](https://github.com/0x676e67/wreq/issues/861)) - ([6ce6c73](https://github.com/0x676e67/wreq/commit/6ce6c73cd0479a169d0f7e6f90c4073cf6e3fc0a)) - *(ws)* Improve status code message on WebSocket upgrade failure ([#824](https://github.com/0x676e67/wreq/issues/824)) - ([4f6f6da](https://github.com/0x676e67/wreq/commit/4f6f6da67bc990be1753c4bb8e546c1b7ed35889)) ### Refactor - *(client)* Use `Either` to unify generic and boxed `Client` service types ([#849](https://github.com/0x676e67/wreq/issues/849)) - ([9cb05e7](https://github.com/0x676e67/wreq/commit/9cb05e794a6d5f1421482e15117ece37180099a7)) - *(client)* Move HTTP/2 safe retry logic into `tower` middleware ([#713](https://github.com/0x676e67/wreq/issues/713)) - ([136c791](https://github.com/0x676e67/wreq/commit/136c7912b54bb74cecc48618415a64f865d7830c)) - *(client)* Move read timeout logic into `tower` middleware ([#702](https://github.com/0x676e67/wreq/issues/702)) - ([06d5e47](https://github.com/0x676e67/wreq/commit/06d5e47f7dfb6353553d9bf5e99b185f644c19fd)) - *(client)* Move total timeout logic into Tower middleware ([#701](https://github.com/0x676e67/wreq/issues/701)) - ([ed8b2ea](https://github.com/0x676e67/wreq/commit/ed8b2eab0ee71278fd2f787089026cb66f64dd29)) - *(client)* Remove legacy HTTP/1 and HTTP/2 tuning options ([#644](https://github.com/0x676e67/wreq/issues/644)) - ([f019267](https://github.com/0x676e67/wreq/commit/f019267dd11fc7dd5ce4ab72b4c85a689a206710)) - *(client)* Replace header map by key - ([6012542](https://github.com/0x676e67/wreq/commit/60125429e1764e50b064c835a77e009e06a18827)) - *(client)* Replace header map by key ([#618](https://github.com/0x676e67/wreq/issues/618)) - ([237b17a](https://github.com/0x676e67/wreq/commit/237b17a649cf201fbac706044bb665e84c514804)) - *(config)* Replace duplicate types with type aliases ([#740](https://github.com/0x676e67/wreq/issues/740)) - ([6bb210b](https://github.com/0x676e67/wreq/commit/6bb210b95d550ac415e7fea3d142d2296e1d4fa1)) - *(config)* Unify request extensions config processing ([#712](https://github.com/0x676e67/wreq/issues/712)) - ([fb1b7b2](https://github.com/0x676e67/wreq/commit/fb1b7b2f3aab1e2c02ee9a0927ae5750e0ae740e)) - *(config)* Remove public config fields and improve backward compatibility ([#614](https://github.com/0x676e67/wreq/issues/614)) - ([6631c5c](https://github.com/0x676e67/wreq/commit/6631c5c9f4489b5b323eab25d953fb9d13b698f8)) - *(connect)* Modularize components by responsibility ([#819](https://github.com/0x676e67/wreq/issues/819)) - ([c996ec7](https://github.com/0x676e67/wreq/commit/c996ec7b0b6dca703b75b0007f9f36b142c9cc64)) - *(connect)* Remove `Connect` trait alias wrapper around `tower::Service` ([#807](https://github.com/0x676e67/wreq/issues/807)) - ([947a25b](https://github.com/0x676e67/wreq/commit/947a25b7f158f84c8483fbf18e780dc4747970b2)) - *(connect)* Streamline connector builder structure ([#705](https://github.com/0x676e67/wreq/issues/705)) - ([eb9308b](https://github.com/0x676e67/wreq/commit/eb9308bb3ded0b9fd6eabd77a947738f9ac78705)) - *(connect)* Cleanup dead code for `tracing` feature ([#689](https://github.com/0x676e67/wreq/issues/689)) - ([5574786](https://github.com/0x676e67/wreq/commit/5574786b1a2572a13b5dea8c59e554cf9b63acf0)) - *(connect)* Refactored internal connector builder - ([39f779b](https://github.com/0x676e67/wreq/commit/39f779b90b3a12705f5658f6a3c43a00c721d88e)) - *(cookie)* Integrate cookie store into `tower` layer ([#695](https://github.com/0x676e67/wreq/issues/695)) - ([c0cf8e3](https://github.com/0x676e67/wreq/commit/c0cf8e396b5b9743e6b19b9b59b60753a3052802)) - *(cookie)* Remove redundant store abstraction API ([#635](https://github.com/0x676e67/wreq/issues/635)) - ([8e34a91](https://github.com/0x676e67/wreq/commit/8e34a913e45cf684711c9c5c45a7e62f48d62cee)) - *(core)* Separate `body` and `proto` responsibilities ([#839](https://github.com/0x676e67/wreq/issues/839)) - ([9e65c9f](https://github.com/0x676e67/wreq/commit/9e65c9f1d4d4a00ded6a3916e74a99486cb41eb6)) - *(core)* Add socket addr to ConnectError ([#663](https://github.com/0x676e67/wreq/issues/663)) - ([877aa9c](https://github.com/0x676e67/wreq/commit/877aa9c7e791717a8c5ff106a376877d14442211)) - *(core)* Reduce dependency on `futures-util` ([#636](https://github.com/0x676e67/wreq/issues/636)) - ([87ed77b](https://github.com/0x676e67/wreq/commit/87ed77b02a251b65aed014a9d329a75a6d92e76a)) - *(core/client)* Remove old body delay_eof code ([#736](https://github.com/0x676e67/wreq/issues/736)) - ([a9d5db1](https://github.com/0x676e67/wreq/commit/a9d5db12aadfc17132c8444acaedb660ae67febe)) - *(decoder)* Migrate decompression handling to tower-http ([#720](https://github.com/0x676e67/wreq/issues/720)) - ([e2427d8](https://github.com/0x676e67/wreq/commit/e2427d8c60ea370ba092dda766d74ffd119e1655)) - *(dns)* Disable export of `hickory_resolver` module ([#646](https://github.com/0x676e67/wreq/issues/646)) - ([68fc1e4](https://github.com/0x676e67/wreq/commit/68fc1e4dcd65b0567a3e3b1fa4b485c42652d1b3)) - *(error)* Use standard library-style error handling ([#722](https://github.com/0x676e67/wreq/issues/722)) - ([97657fd](https://github.com/0x676e67/wreq/commit/97657fd816202dbd8f34a0f0733422dedd27184e)) - *(future)* Simplify `Client` future types with `Either` ([#851](https://github.com/0x676e67/wreq/issues/851)) - ([b6922d0](https://github.com/0x676e67/wreq/commit/b6922d0902dbe24daa656c06dcc4172b5193e43a)) - *(header)* Preserve header order and casing in `OrigHeaderMap` redesign ([#860](https://github.com/0x676e67/wreq/issues/860)) - ([cc0e637](https://github.com/0x676e67/wreq/commit/cc0e637798a115f4fdc41a1fd3799c5bdd10e127)) - *(http1)* Remove support for `title_case_headers` - ([4501d9a](https://github.com/0x676e67/wreq/commit/4501d9ace91a6adab041cddc8ce6d5e964e278c8)) - *(http2)* Add decriptive error for non-empty body in CONNECT request ([#634](https://github.com/0x676e67/wreq/issues/634)) - ([fa413e6](https://github.com/0x676e67/wreq/commit/fa413e629687df306d937d3f69e64619c80ad524)) - *(internally)* Normalize internal module structure ([#790](https://github.com/0x676e67/wreq/issues/790)) - ([8b768e7](https://github.com/0x676e67/wreq/commit/8b768e7579cacf8c85cb580abb47c88b9b7662dd)) - *(internally)* Backport hyper client ([#624](https://github.com/0x676e67/wreq/issues/624)) - ([4efc5a7](https://github.com/0x676e67/wreq/commit/4efc5a7c227dd257ca866fe1772341803d3c91bc)) - *(internally)* Refactor internal certificate loading ([#616](https://github.com/0x676e67/wreq/issues/616)) - ([2bf9da8](https://github.com/0x676e67/wreq/commit/2bf9da8b0defd4f805fccbd60d4468b14c9dfcdd)) - *(io)* Drop duplicated legacy IO code ([#836](https://github.com/0x676e67/wreq/issues/836)) - ([0b22b58](https://github.com/0x676e67/wreq/commit/0b22b585d7da3f7375d4d42d109f056c3769a089)) - *(layer)* Simplify tower `Service` error conversion ([#850](https://github.com/0x676e67/wreq/issues/850)) - ([e577afc](https://github.com/0x676e67/wreq/commit/e577afc903ff416b6db486b0e7c2fe0112914cf9)) - *(module)* Separate hash responsibilities ([#856](https://github.com/0x676e67/wreq/issues/856)) - ([a5f5caa](https://github.com/0x676e67/wreq/commit/a5f5caadd3513bd8d70081d20a131bd77fdc8451)) - *(module)* Separate util responsibilities ([#838](https://github.com/0x676e67/wreq/issues/838)) - ([9756969](https://github.com/0x676e67/wreq/commit/975696987c4e04a570f7af7fbd2f81de7de932b4)) - *(module)* Separate `proxy` and `client` responsibilities ([#833](https://github.com/0x676e67/wreq/issues/833)) - ([6b71f74](https://github.com/0x676e67/wreq/commit/6b71f74f70179a3499480b10d19f9bd26f0c5bd9)) - *(pool)* Simplify idle task using async/await ([#812](https://github.com/0x676e67/wreq/issues/812)) - ([808da8c](https://github.com/0x676e67/wreq/commit/808da8ceda00e88188339fde3477f097ce4d12a3)) - *(proxy)* Remove duplicated basic auth encoder ([#845](https://github.com/0x676e67/wreq/issues/845)) - ([5b0cf72](https://github.com/0x676e67/wreq/commit/5b0cf72b98be499c9fe4fe8a789d8ac8f9dbf88f)) - *(proxy)* Replace string comparison with constant comparison ([#820](https://github.com/0x676e67/wreq/issues/820)) - ([d5d60ab](https://github.com/0x676e67/wreq/commit/d5d60ab5c4b445e4584dbec8e65c801f6a6baaf2)) - *(proxy)* Remove support for `Proxy::custom` ([#756](https://github.com/0x676e67/wreq/issues/756)) - ([1a5a36a](https://github.com/0x676e67/wreq/commit/1a5a36a5b26de80f70f5116cfdf86806f39f2938)) - *(proxy)* Migrate proxy matcher from hyper-util ([#675](https://github.com/0x676e67/wreq/issues/675)) - ([fafe3a6](https://github.com/0x676e67/wreq/commit/fafe3a615319386ab5a908780996d61ab87dbe61)) - *(redirect)* Migrate from `iri-string` to `url` crate for URI resolution ([#757](https://github.com/0x676e67/wreq/issues/757)) - ([7b72c18](https://github.com/0x676e67/wreq/commit/7b72c18707c661e3e2eb4256a3d6aa00c6c1dd51)) - *(redirect)* Redesign redirection logic in `tower` middleware ([#708](https://github.com/0x676e67/wreq/issues/708)) - ([a53ce43](https://github.com/0x676e67/wreq/commit/a53ce43adde765be625fc5e1b176fffdfd5c0975)) - *(rewind)* Replace manual implementations of `ReadBufCursor` methods ([#595](https://github.com/0x676e67/wreq/issues/595)) - ([e11e214](https://github.com/0x676e67/wreq/commit/e11e214248f8a9bbe1a998f70b823f026035f3f6)) - *(service)* Eliminate unnecessary URL parsing ([#831](https://github.com/0x676e67/wreq/issues/831)) - ([4339692](https://github.com/0x676e67/wreq/commit/4339692b7333c8be6c6ed779ac8f172aaca12e40)) - *(socks)* Migrate to `tokio-socks` for easier maintenance ([#766](https://github.com/0x676e67/wreq/issues/766)) - ([b405fda](https://github.com/0x676e67/wreq/commit/b405fda0da727f35457a2a8b751be5c27455c50c)) - *(socks)* Reuse socks module logic from hyper-util ([#686](https://github.com/0x676e67/wreq/issues/686)) - ([ecb1493](https://github.com/0x676e67/wreq/commit/ecb1493d6cc259bf754ffdc0c93fd946c6a47d97)) - *(timeout)* Simplify `Pin` wrapping ([#732](https://github.com/0x676e67/wreq/issues/732)) - ([40518b6](https://github.com/0x676e67/wreq/commit/40518b6d4488474f098661de1f11393931d9ccdd)) - *(tls)* Allow setting `ALPN`/`ALPS` protocol preference order ([#743](https://github.com/0x676e67/wreq/issues/743)) - ([7d7f65f](https://github.com/0x676e67/wreq/commit/7d7f65f7e70194a4ec69af143d906302dd587486)) - *(tls)* Redesign certificate compression API for clarity and consistency ([#742](https://github.com/0x676e67/wreq/issues/742)) - ([7097c8d](https://github.com/0x676e67/wreq/commit/7097c8da26db2b7ac28287de257e0477ca1d0043)) - *(tls)* Remove unnecessary lazy closure from `TlsConnector` ([#739](https://github.com/0x676e67/wreq/issues/739)) - ([37cd919](https://github.com/0x676e67/wreq/commit/37cd919b41cf74c241071e8b7cc8f6ba29f9864f)) - *(tls)* Refactor TLS keylog tracking policy ([#655](https://github.com/0x676e67/wreq/issues/655)) - ([d88c83d](https://github.com/0x676e67/wreq/commit/d88c83dd2db6953415a62cb395efa3f07d95e355)) - *(tls)* Remove configuration not associated with TLS extensions ([#654](https://github.com/0x676e67/wreq/issues/654)) - ([d62475f](https://github.com/0x676e67/wreq/commit/d62475f7d4766e4e2356dc21a39cf244b21c0d36)) - *(tls)* Refactor certificate compression algorithm configuration API ([#639](https://github.com/0x676e67/wreq/issues/639)) - ([058fc9a](https://github.com/0x676e67/wreq/commit/058fc9a6c9152d088d723369fe408c658b4eea6c)) - *(tls)* Fefactor extension permutation configuration API ([#638](https://github.com/0x676e67/wreq/issues/638)) - ([da9059b](https://github.com/0x676e67/wreq/commit/da9059b9d44a429dd82abf9d883209662ad5cdbe)) - *(tls)* Distinguish certificate identity from store ([#621](https://github.com/0x676e67/wreq/issues/621)) - ([89e2c5c](https://github.com/0x676e67/wreq/commit/89e2c5ce687dab7c08c6da7c77c089afd97a3ab8)) - *(websocket)* Standardize WebSocket module exports ([#645](https://github.com/0x676e67/wreq/issues/645)) - ([f61a89f](https://github.com/0x676e67/wreq/commit/f61a89f0cc0e1279469d70a99c055fea53e8d173)) - *(ws)* Refactor HTTP2 upgrade to WebSocket ([#802](https://github.com/0x676e67/wreq/issues/802)) - ([e7b7052](https://github.com/0x676e67/wreq/commit/e7b705234e68f9a0e39eb86791931d985506a04b)) - Restructure the core implementation of the client ([#668](https://github.com/0x676e67/wreq/issues/668)) - ([1d445cb](https://github.com/0x676e67/wreq/commit/1d445cb15f444e8104cb264a0fae2e05091e3b8d)) - Store request timeout in request extensions instead ([#660](https://github.com/0x676e67/wreq/issues/660)) - ([e666be4](https://github.com/0x676e67/wreq/commit/e666be434af04458a86a4f8ae3d7bd1cf624002c)) - Remove futures-util unless using stream/multipart/compression/websocket/core ([#653](https://github.com/0x676e67/wreq/issues/653)) - ([e3d0c9f](https://github.com/0x676e67/wreq/commit/e3d0c9f960dd7803e83b2c024d1e5f736bccd50c)) - Drop futures-util for leaner core ([#648](https://github.com/0x676e67/wreq/issues/648)) - ([f46c161](https://github.com/0x676e67/wreq/commit/f46c1618e6a42722f024acad7db526b121536b44)) - Backport use `hyper-util` Tunnel ([#642](https://github.com/0x676e67/wreq/issues/642)) - ([446719d](https://github.com/0x676e67/wreq/commit/446719daecf7e4e2479f7c7b5f3785c6da2bddad)) - Renamed `tls_key_log_file` to `tls_keylog_file` for consistency ([#610](https://github.com/0x676e67/wreq/issues/610)) - ([5d1a85a](https://github.com/0x676e67/wreq/commit/5d1a85a1cc04a2380091398dee43146823590545)) ### Documentation - *(README)* Update for HTTP/3 over QUIC support - ([bba899c](https://github.com/0x676e67/wreq/commit/bba899c2b579f97a399b628c71f179ed07236a75)) - *(client)* Update `tcp_user_timeout` docs - ([1fa4d44](https://github.com/0x676e67/wreq/commit/1fa4d44394bd3fe1efe3bbbbd127a05cfc80d20d)) - *(client)* Clarify `Client` method usage ([#795](https://github.com/0x676e67/wreq/issues/795)) - ([3f56875](https://github.com/0x676e67/wreq/commit/3f56875e6c58176b7a73ed0464b0e6fcf5e16f8c)) - *(client)* Update `tower` middleware integration documentation ([#716](https://github.com/0x676e67/wreq/issues/716)) - ([6094176](https://github.com/0x676e67/wreq/commit/60941762e8addec2788c3ae97cf7714eab9967cd)) - *(connect)* Update docs for `Connector` builder - ([62b3b4a](https://github.com/0x676e67/wreq/commit/62b3b4a7291847faf36b694513ab38970fd3bda2)) - *(layer)* Update docs - ([ff14827](https://github.com/0x676e67/wreq/commit/ff1482791d218acff48469c04af4bccc8e38e44b)) - *(middleware)* Update module docs - ([b6b7071](https://github.com/0x676e67/wreq/commit/b6b7071c74844f365ffb4aa137f29be3f73cfd02)) - *(proxy)* Fix some typos in comment ([#592](https://github.com/0x676e67/wreq/issues/592)) - ([25f85b0](https://github.com/0x676e67/wreq/commit/25f85b06ce72181009e8e2727977557b47df4c68)) - *(timeout)* Update docs - ([512fa22](https://github.com/0x676e67/wreq/commit/512fa2281e665cb79e459ce6e3b5d6e124aed25a)) - *(tls)* Update prefer chacha20 documentation - ([9652f46](https://github.com/0x676e67/wreq/commit/9652f46735ad774e956b747fd2c2ff4f3dcb7bd9)) - *(ws)* Remove redundant comments - ([b401440](https://github.com/0x676e67/wreq/commit/b4014407690c8c73850c2a4299553f336e659fe5)) - *(x509)* Clarify thread safety and usage of CertStore ([#846](https://github.com/0x676e67/wreq/issues/846)) - ([f1423f8](https://github.com/0x676e67/wreq/commit/f1423f8414ff01bb4b1c3e5318e1f917754ab9ca)) - Revise and correct API documentation ([#724](https://github.com/0x676e67/wreq/issues/724)) - ([458e473](https://github.com/0x676e67/wreq/commit/458e4731ff25ca2625c4c2c53921be66c8a6bb8b)) - Improve formatting in documentation ([#696](https://github.com/0x676e67/wreq/issues/696)) - ([867a8a2](https://github.com/0x676e67/wreq/commit/867a8a273028926a838f71e7b6ead728c3234d11)) - Fix package docs - ([da20f76](https://github.com/0x676e67/wreq/commit/da20f766e9a25cad0cff7c128be8f7f1c0f2099e)) - Cleanup legacy server documentation - ([7a0b11c](https://github.com/0x676e67/wreq/commit/7a0b11cf8f013e644d63d70d07a0e21289c86bb9)) - Update documentation build ([#609](https://github.com/0x676e67/wreq/issues/609)) - ([eb06ebc](https://github.com/0x676e67/wreq/commit/eb06ebc81ac24ae821c4756196b58585303b723a)) ### Performance - *(client)* Avoid full `ClientRef` clone by matching and cloning service in-place ([#758](https://github.com/0x676e67/wreq/issues/758)) - ([8e547ad](https://github.com/0x676e67/wreq/commit/8e547ad17a504a230eb770a1ce3b92b6d0765186)) - *(client)* Replace `Box` with generic type for `Box` ([#755](https://github.com/0x676e67/wreq/issues/755)) - ([eb07a2a](https://github.com/0x676e67/wreq/commit/eb07a2af08afb8be084f6623625af0f13a87745a)) - *(client)* Optimize dyn trait response to reduce runtime overhead ([#746](https://github.com/0x676e67/wreq/issues/746)) - ([0d5cbaf](https://github.com/0x676e67/wreq/commit/0d5cbaf03ff4646c0b81152022fb223a2ffee329)) - *(client)* Optimize response future wrapping calls ([#726](https://github.com/0x676e67/wreq/issues/726)) - ([e24a0cd](https://github.com/0x676e67/wreq/commit/e24a0cdc422576f68b450f7b96c678fc2655f400)) - *(client)* Remove redundant execute request calls ([#718](https://github.com/0x676e67/wreq/issues/718)) - ([4285cf7](https://github.com/0x676e67/wreq/commit/4285cf7278813a9c2e6e1de7d77bfe7c9fc82470)) - *(client)* Avoid redundant box of `tower` layers ([#717](https://github.com/0x676e67/wreq/issues/717)) - ([0ae67f8](https://github.com/0x676e67/wreq/commit/0ae67f8b6aed1b956d1314fa2dc03f310f430286)) - *(connect)* Simplify complex `TokioIo` wrapper ([#763](https://github.com/0x676e67/wreq/issues/763)) - ([807c33b](https://github.com/0x676e67/wreq/commit/807c33b0a2e47ef5da081b475c584541f27a54d0)) - *(connect)* Embed single timeout layer directly to avoid `Box::pin` ([#725](https://github.com/0x676e67/wreq/issues/725)) - ([9d24080](https://github.com/0x676e67/wreq/commit/9d2408034372617a49f863f4fab9be381e46f1d7)) - *(cookie)* Avoid redundant conditional checks ([#730](https://github.com/0x676e67/wreq/issues/730)) - ([574ab8e](https://github.com/0x676e67/wreq/commit/574ab8ef32b8fd91007681d987336e518802c27e)) - *(cookie)* Avoid unnecessary URL parsing in cookie handling ([#699](https://github.com/0x676e67/wreq/issues/699)) - ([fa07991](https://github.com/0x676e67/wreq/commit/fa079912830a947df50632dd98751f7f351d5b4d)) - *(decoder)* Avoid unnecessary clone of decompression service ([#828](https://github.com/0x676e67/wreq/issues/828)) - ([ce78205](https://github.com/0x676e67/wreq/commit/ce78205750b08ae9c2565118870c9974681dd95e)) - *(ext)* Avoid deep calls when inlining is disabled ([#799](https://github.com/0x676e67/wreq/issues/799)) - ([e14a159](https://github.com/0x676e67/wreq/commit/e14a1592f68e235af88a275d52ce7b21f7a3306e)) - *(hash)* Improve hashing efficiency for large structures ([#780](https://github.com/0x676e67/wreq/issues/780)) - ([7a7730e](https://github.com/0x676e67/wreq/commit/7a7730e2c71b0005a31ce94298236691be5a5750)) - *(proxy)* Remove unnecessary sorting from `HeaderMap` ([#857](https://github.com/0x676e67/wreq/issues/857)) - ([2de64fe](https://github.com/0x676e67/wreq/commit/2de64fe14c591d07c07cf28d582dc8bebe7069d5)) - *(proxy)* Remove unnecessary call wrapping ([#855](https://github.com/0x676e67/wreq/issues/855)) - ([2472d39](https://github.com/0x676e67/wreq/commit/2472d39e2128740e437c3d0846f18ea0ff96c148)) - *(proxy)* Use zero-copy Bytes for proxy credentials ([#729](https://github.com/0x676e67/wreq/issues/729)) - ([5bb8e06](https://github.com/0x676e67/wreq/commit/5bb8e06499613d13fab1dc573ce8a1b61b70c23f)) - *(redirect)* Avoid copy when redirection is unsupported ([#728](https://github.com/0x676e67/wreq/issues/728)) - ([741b81e](https://github.com/0x676e67/wreq/commit/741b81edc5201f79542c7e09eb3d46b0f3440062)) - *(req/resp)* Inline frequently called accessor methods - ([7dc3424](https://github.com/0x676e67/wreq/commit/7dc3424a807bb5c60481cb0c6fb6551be2cefacd)) - *(response)* Avoid unnecessary URL cloning ([#747](https://github.com/0x676e67/wreq/issues/747)) - ([95743b3](https://github.com/0x676e67/wreq/commit/95743b37522f8992803427809ed2e0a90ae7902d)) - *(socks)* Optimize SOCKS connection handling ([#769](https://github.com/0x676e67/wreq/issues/769)) - ([5d3fe85](https://github.com/0x676e67/wreq/commit/5d3fe8530dedf76f4fc937981a29fccfbfb764c1)) - *(socks)* Optimize DNS resolution with custom non-blocking resolver ([#687](https://github.com/0x676e67/wreq/issues/687)) - ([49520ce](https://github.com/0x676e67/wreq/commit/49520ce80b6211ec85abfda9655b9196e34c0438)) - *(timeout)* Encapsulate all per-request timeout extensions uniformly ([#804](https://github.com/0x676e67/wreq/issues/804)) - ([dab45fd](https://github.com/0x676e67/wreq/commit/dab45fde9c70e646d576f049e4a46b7c5113fcb3)) - *(timeout)* Reduce unnecessary `as_mut` calls ([#719](https://github.com/0x676e67/wreq/issues/719)) - ([fa9570c](https://github.com/0x676e67/wreq/commit/fa9570c35220963e2c17a0741edaebf0fc340974)) - *(tls)* Inline builder hot path code - ([bc2ff43](https://github.com/0x676e67/wreq/commit/bc2ff43c1b4c39293426cce42724db1b2afd789f)) - *(tls)* Flatten TLS info construction for better performance ([#847](https://github.com/0x676e67/wreq/issues/847)) - ([2ab4edd](https://github.com/0x676e67/wreq/commit/2ab4edd01c2c022ae4bda0312c3f6307371916e9)) - *(tls)* Connect stage reduces call chains - ([29c9bd8](https://github.com/0x676e67/wreq/commit/29c9bd8d9beae3be15de37693341e192b8225e0a)) - *(ws)* Inline frequently called accessor methods ([#782](https://github.com/0x676e67/wreq/issues/782)) - ([929d917](https://github.com/0x676e67/wreq/commit/929d91777539911994527ed6d15ebf31e463b689)) - Inline hotspot `poll` method to reduce call overhead ([#714](https://github.com/0x676e67/wreq/issues/714)) - ([8c26d8b](https://github.com/0x676e67/wreq/commit/8c26d8b8f58de8a00d7e0a17dc63ccdfe1145653)) ### Styling - *(client)* Shorten paths in type aliases ([#733](https://github.com/0x676e67/wreq/issues/733)) - ([c83b8e8](https://github.com/0x676e67/wreq/commit/c83b8e82a4b21d63c79922df09b737066e3f314d)) - *(connector)* Simplify user-defined timeout layer setup ([#827](https://github.com/0x676e67/wreq/issues/827)) - ([d620a25](https://github.com/0x676e67/wreq/commit/d620a252eb7549b8cdd079897736d2847e1019cc)) - *(cookie)* Sync upstream API style ([#659](https://github.com/0x676e67/wreq/issues/659)) - ([03041af](https://github.com/0x676e67/wreq/commit/03041af75026269db1763636390a3bf72fe000d4)) - *(proxy)* Simplify `Matcher` Debug implementation - ([f15f36e](https://github.com/0x676e67/wreq/commit/f15f36e158091bf352fcfc334d9056d84889e6f8)) - *(proxy)* Simplify path constraint for `http::Uri` - ([171e7b8](https://github.com/0x676e67/wreq/commit/171e7b83b6e8647f05313f9b4bfbf24e6300cc78)) - *(redirect)* Rename `TowerRedirectPolicy` to `RedirectPolicy` - ([1e4431b](https://github.com/0x676e67/wreq/commit/1e4431b92f765397f89f92f542111bf5e694682f)) - *(request)* Simplify request config access ([#793](https://github.com/0x676e67/wreq/issues/793)) - ([0f6f523](https://github.com/0x676e67/wreq/commit/0f6f5232510cb9cce4b437a9e81685377f56fae2)) - *(socks)* Clippy format - ([20c8236](https://github.com/0x676e67/wreq/commit/20c8236d85e87c1693e01e61566f9d6f46652055)) - *(tunnel)* Fmt code - ([1a489b5](https://github.com/0x676e67/wreq/commit/1a489b5305512094d43274659173f2625a45ba0c)) - Format crate imports for consistency ([#709](https://github.com/0x676e67/wreq/issues/709)) - ([777c6e5](https://github.com/0x676e67/wreq/commit/777c6e5e137024d6f09bf1b53eff7434e573cbb4)) - Fmt code - ([7fb9b1e](https://github.com/0x676e67/wreq/commit/7fb9b1e88df9e088b3920620c84aad1ea0d2a7bb)) ### Testing - *(badssl)* Enable test_aes_hw_override test - ([a37219a](https://github.com/0x676e67/wreq/commit/a37219a47b0903375d033cc9a5c6e3701dcb4b74)) - *(deps)* Bump `hyper-util` to v0.1.13 ([#667](https://github.com/0x676e67/wreq/issues/667)) - ([862361c](https://github.com/0x676e67/wreq/commit/862361cac33a200bddbdd2c6b3430da36bccadda)) - *(emulation)* Add tests for additional emulation options ([#823](https://github.com/0x676e67/wreq/issues/823)) - ([e0b76a8](https://github.com/0x676e67/wreq/commit/e0b76a8d40cf0795fc5a81704248edc88b55b439)) - *(emulation)* Add firefox tests ([#822](https://github.com/0x676e67/wreq/issues/822)) - ([6ed1974](https://github.com/0x676e67/wreq/commit/6ed1974744138b6d6f0cd678d652ec32fcab1751)) - *(timeout)* Update timeout tests ([#691](https://github.com/0x676e67/wreq/issues/691)) - ([3781cef](https://github.com/0x676e67/wreq/commit/3781cefb547d0052d8b96a781ec6096ce86e2a64)) - Remove redundant decompression tests ([#734](https://github.com/0x676e67/wreq/issues/734)) - ([8efcd19](https://github.com/0x676e67/wreq/commit/8efcd19925d654ff4cc4a2f61c70672e2890fa60)) - Tests affected by removal of proxy-related environment variables ([#692](https://github.com/0x676e67/wreq/issues/692)) - ([79648b5](https://github.com/0x676e67/wreq/commit/79648b531199cbe86b1c0db4d570e38cf25ff2da)) - Switch over from libflate to flate2 in tests to reduce dependency footprint ([#593](https://github.com/0x676e67/wreq/issues/593)) - ([dc74305](https://github.com/0x676e67/wreq/commit/dc74305dc83a19ce0f0320a91d42ee1e76f13860)) ### Miscellaneous Tasks - *(body)* Re-expose body mod - ([99e27f2](https://github.com/0x676e67/wreq/commit/99e27f203c2dd766707494f3c76f8f5a4d69b092)) - *(body)* Re-expose body mod - ([1d9ee72](https://github.com/0x676e67/wreq/commit/1d9ee729de0eacb68167506f456db871146ec85c)) - *(client)* Remove unused comment - ([efac842](https://github.com/0x676e67/wreq/commit/efac842c9d54bf4e6e7fd83779c4123dab81f48c)) - *(client)* Defer initialization of internal client ([#811](https://github.com/0x676e67/wreq/issues/811)) - ([f5817c6](https://github.com/0x676e67/wreq/commit/f5817c63aa020faf6146343b002cd912a5dbe6cc)) - *(client)* Fmt future.rs - ([3a6c265](https://github.com/0x676e67/wreq/commit/3a6c26545ed768e8e7a7ce73427bef538a74604c)) - *(client)* Eliminate redundant cloning of `tower` middleware ([#698](https://github.com/0x676e67/wreq/issues/698)) - ([c52bb1d](https://github.com/0x676e67/wreq/commit/c52bb1d6b7f5be7158d1cf28c7df41b90dd7fc14)) - *(client)* Refactor client into responsibility-specific modules ([#683](https://github.com/0x676e67/wreq/issues/683)) - ([d70a9f2](https://github.com/0x676e67/wreq/commit/d70a9f29ce0c6f7b66f60b1c83af16f906b72821)) - *(config)* Merge standalone `config` into `middleware/config` ([#771](https://github.com/0x676e67/wreq/issues/771)) - ([96168aa](https://github.com/0x676e67/wreq/commit/96168aa679a600d402eb1f4daca124ddcc16dd40)) - *(connect)* Simplify parameters and improve documentation ([#858](https://github.com/0x676e67/wreq/issues/858)) - ([0eb219b](https://github.com/0x676e67/wreq/commit/0eb219be71f8d005c48fcd80988e758f176f83da)) - *(connect)* Simplify conditional cfg for TCP keepalive ([#842](https://github.com/0x676e67/wreq/issues/842)) - ([0c40c3a](https://github.com/0x676e67/wreq/commit/0c40c3a09c5d4bf9dec805b2fa1e79fc686afa9a)) - *(connect)* Relocate `connect` module to `http` ([#818](https://github.com/0x676e67/wreq/issues/818)) - ([77b00be](https://github.com/0x676e67/wreq/commit/77b00be6dd2f3f46704a152bbde9fbdabf787f1e)) - *(connect)* Simplified type import - ([fe10748](https://github.com/0x676e67/wreq/commit/fe10748d88a23979192e5141a119c8d40dc49d22)) - *(connect)* Fmt code - ([53c9a24](https://github.com/0x676e67/wreq/commit/53c9a24ddc998f0bbde5812725ea869a63707ca7)) - *(connector)* Fmt code - ([a703915](https://github.com/0x676e67/wreq/commit/a703915da5d5f78ed4887bd868b3ebcf5f9b756c)) - *(cookie)* Cleanup unused error types - ([81bcf3f](https://github.com/0x676e67/wreq/commit/81bcf3fed32c6f00c133c794c9aa9162a42b0c81)) - *(cookie)* Fmt code - ([c9e03b1](https://github.com/0x676e67/wreq/commit/c9e03b1f39222c03d49a620189ba9131996189d2)) - *(core)* Format `http1` and `http2` options wrappers ([#813](https://github.com/0x676e67/wreq/issues/813)) - ([6803663](https://github.com/0x676e67/wreq/commit/680366361c809d226e08ab5e5cfcd9635c88a409)) - *(core)* Shorten `crate::core::Error` to `Error` via import ([#797](https://github.com/0x676e67/wreq/issues/797)) - ([1bd5666](https://github.com/0x676e67/wreq/commit/1bd5666143dcfcae2b020fb0fea06362375ceffe)) - *(core)* Remove unused `task` mod - ([121a46b](https://github.com/0x676e67/wreq/commit/121a46bcc0888ea0b7525cd2f0e29020da30da8a)) - *(core)* Remove unused `rewind` mod - ([e6a6ec0](https://github.com/0x676e67/wreq/commit/e6a6ec03bebf77332d26400104c6045268b87622)) - *(core)* Remove legacy code duplicated with `tower::util` ([#727](https://github.com/0x676e67/wreq/issues/727)) - ([ed218cf](https://github.com/0x676e67/wreq/commit/ed218cf18c1b6208aee727c77b1627b070f36559)) - *(core)* Remove duplicate code - ([a22bcf7](https://github.com/0x676e67/wreq/commit/a22bcf73888f5e53a6f77ed535c736c97146a8fb)) - *(decoder)* Merge standalone `decoder` into `middleware/decoder` ([#770](https://github.com/0x676e67/wreq/issues/770)) - ([b917192](https://github.com/0x676e67/wreq/commit/b917192b6e0e62374bf216974f006ccb52035696)) - *(dispatch)* Cleanup legacy unused code ([#796](https://github.com/0x676e67/wreq/issues/796)) - ([4153e07](https://github.com/0x676e67/wreq/commit/4153e07a38bf21c6d3ecfeb366948f1ea6684710)) - *(emulation)* Derive(Clone) for Emulation ([#862](https://github.com/0x676e67/wreq/issues/862)) - ([1ec7a09](https://github.com/0x676e67/wreq/commit/1ec7a093340c5c2f1c2c5fbc0b2adf60b388019a)) - *(example)* Format code in examples - ([a0e63c5](https://github.com/0x676e67/wreq/commit/a0e63c54b55ef4cda748609289d02a0caf570f89)) - *(example)* Update examples - ([b089c6e](https://github.com/0x676e67/wreq/commit/b089c6e892d214e48a43f5c53c896211219039f1)) - *(examples)* Update examples - ([b8b52ba](https://github.com/0x676e67/wreq/commit/b8b52ba3c86a6e29ec6c4e7f9f6c12d8688a0049)) - *(ext)* Encapsulate all per-request extensions uniformly ([#801](https://github.com/0x676e67/wreq/issues/801)) - ([d77d340](https://github.com/0x676e67/wreq/commit/d77d340e2d9c0b8c7440805c248b09797fe62d10)) - *(ext)* Move `http2::ext::Protocol` extension into request config ([#798](https://github.com/0x676e67/wreq/issues/798)) - ([b7cfbe9](https://github.com/0x676e67/wreq/commit/b7cfbe97ea7e9d03b3b9adb32d49ea665ea34566)) - *(internal)* Normalize internal error handling APIs ([#773](https://github.com/0x676e67/wreq/issues/773)) - ([65b574a](https://github.com/0x676e67/wreq/commit/65b574a2ea6b42b5f5f9c347d49e5a6c89382125)) - *(internal)* Normalize internal APIs ([#772](https://github.com/0x676e67/wreq/issues/772)) - ([3cfa301](https://github.com/0x676e67/wreq/commit/3cfa301080cb9a0dd97256a170f6801f5ce1b977)) - *(lib)* Sort module declarations - ([adc8b58](https://github.com/0x676e67/wreq/commit/adc8b58635acb638e1bcccf51801c65be5d07760)) - *(pool)* Eliminate type duplication with aliases - ([5ea3b07](https://github.com/0x676e67/wreq/commit/5ea3b07a62f85c5beb46ba673d26bf76415c06e6)) - *(proxy)* Assign proper connector names for `Tunnel` and `Socks` ([#815](https://github.com/0x676e67/wreq/issues/815)) - ([22d2be1](https://github.com/0x676e67/wreq/commit/22d2be1073988f8f7c76f592b731b0156c72c898)) - *(rt/tokio)* Cleanup unused code - ([c1c5e34](https://github.com/0x676e67/wreq/commit/c1c5e34a15c484d6829e03a2a903bbbf3357ccd3)) - *(socks)* Rename 'with_local_dns' to 'with_dns_mode' - ([7430a6a](https://github.com/0x676e67/wreq/commit/7430a6a213b278cb2f6356b0ed8da26d2cd323a2)) - *(sync)* Simplify lifetime annotations - ([834258c](https://github.com/0x676e67/wreq/commit/834258c75455a35be1c978b43deda8104af73876)) - *(sync)* Remove dead code - ([a628d1c](https://github.com/0x676e67/wreq/commit/a628d1c17b3748379a493245cf71254ce6800fbb)) - *(test)* Remove miri exception configs - ([0511365](https://github.com/0x676e67/wreq/commit/0511365422af4d70d045f9c00e34fd77e9c15a8d)) - *(tls)* Add examples for root and self-signed certificates ([#792](https://github.com/0x676e67/wreq/issues/792)) - ([8691db0](https://github.com/0x676e67/wreq/commit/8691db07db75ea5f6a9dafd9bc0c44cd3dadab20)) - *(tls)* Remove the legacy curves configuration API ([#637](https://github.com/0x676e67/wreq/issues/637)) - ([2459de9](https://github.com/0x676e67/wreq/commit/2459de97819e3eeb7b0569d99387e63ee099c6a4)) - *(types)* Merge `GenericClientService` related types - ([303584c](https://github.com/0x676e67/wreq/commit/303584cbfc832c9528439ba320651aada1d10504)) - *(x509)* Cleanup mixed parsing of pem/der certificates - ([edc4e7d](https://github.com/0x676e67/wreq/commit/edc4e7df38150f899b353b0766653b867a384e05)) - *(x509)* Cleanup dead code - ([42b741b](https://github.com/0x676e67/wreq/commit/42b741bef019e63b0c05d573a550c6e423458bac)) - Sort module declarations - ([8699a4b](https://github.com/0x676e67/wreq/commit/8699a4b2cc06990e9848962563289fa4a7b4b059)) - Remove unused `#[allow]` attributes ([#809](https://github.com/0x676e67/wreq/issues/809)) - ([5bc5cca](https://github.com/0x676e67/wreq/commit/5bc5cca594b4717d343e04b7a7b348b371be7486)) - Fix typo - ([7b800c5](https://github.com/0x676e67/wreq/commit/7b800c5efbe48899d9ae7e6f9a129a0d8459a990)) - Cleanup dead code - ([5be4443](https://github.com/0x676e67/wreq/commit/5be4443f78d0cbad72ffa640414e094d20e0fe09)) - Cleanup unused macros and format definitions - ([9f925d3](https://github.com/0x676e67/wreq/commit/9f925d384f5723c68001041c00e4865172f68c8e)) - Cleanup redundant and unused type exports ([#704](https://github.com/0x676e67/wreq/issues/704)) - ([a583a7f](https://github.com/0x676e67/wreq/commit/a583a7ff9e179efccdf6d4877792e09016029e7f)) - Clean up redundant type exports ([#684](https://github.com/0x676e67/wreq/issues/684)) - ([4af36f5](https://github.com/0x676e67/wreq/commit/4af36f5bbaa0ea1614e70e8a0c60a9bbb2079e93)) - Fmt example code ([#656](https://github.com/0x676e67/wreq/issues/656)) - ([7ad2496](https://github.com/0x676e67/wreq/commit/7ad24960c352257104d60d5be477631446e8793e)) - Apply clippy fixes required by CI ([#649](https://github.com/0x676e67/wreq/issues/649)) - ([d1e43d4](https://github.com/0x676e67/wreq/commit/d1e43d4a8019f73cb13c69ddf6e6c0852e95c5d3)) - Fix clippy module inception - ([6e66dd6](https://github.com/0x676e67/wreq/commit/6e66dd6fd18bb586a24a6988faea0398f7093923)) - Update examples and clean up dead code - ([636b510](https://github.com/0x676e67/wreq/commit/636b510a63ed4a3ff71abffc6c5eef24245fa889)) - Remove unused comments - ([ff484b2](https://github.com/0x676e67/wreq/commit/ff484b2888333b5e17adb3bd7681271ba6e6d201)) ### Build - *(deps)* Update tokio requirement from 1 to 1.47.0 - ([e7bab63](https://github.com/0x676e67/wreq/commit/e7bab6356d30115c2251eff8f9b10f7d7de58778)) - *(deps)* Update dependencies - ([42eaba7](https://github.com/0x676e67/wreq/commit/42eaba7444f6d19bb85200b58c8c837a25fbf146)) - *(deps)* Reduce dependency on `tokio-util` ([#837](https://github.com/0x676e67/wreq/issues/837)) - ([69c178d](https://github.com/0x676e67/wreq/commit/69c178dc3bb2655ff7dd5e782e957898f8933011)) - *(deps)* Simplify dev dependencies - ([2b6ae59](https://github.com/0x676e67/wreq/commit/2b6ae5925d79c19f6adb13cf1a1bb8c051a33eda)) - *(deps)* Simplify dev dependencies - ([9743ca7](https://github.com/0x676e67/wreq/commit/9743ca7138723659a5fb7946947c59518c3d0123)) - *(deps)* Update cookie_store requirement from 0.21 to 0.22 ([#829](https://github.com/0x676e67/wreq/issues/829)) - ([8453aa2](https://github.com/0x676e67/wreq/commit/8453aa272d8756fbd9a69d061803926d86f6774c)) - *(deps)* Update socket2 requirement from 0.5.10 to 0.6.0 ([#778](https://github.com/0x676e67/wreq/issues/778)) - ([73bd5a0](https://github.com/0x676e67/wreq/commit/73bd5a027e2dddaaa313ae19ed363cbea637cbf6)) - *(deps)* Drop `tower-service` (redundant with `tower::Service`) ([#800](https://github.com/0x676e67/wreq/issues/800)) - ([9de6cb3](https://github.com/0x676e67/wreq/commit/9de6cb31e417b00210d277ffd8460da5eb8e0eea)) - *(deps)* Remove redundant `atomic-waker` dependency ([#776](https://github.com/0x676e67/wreq/issues/776)) - ([b0cc9cd](https://github.com/0x676e67/wreq/commit/b0cc9cd6e96108a522b9299aaa8581d5f1780848)) - *(deps)* Remove redundant `futures-core` dep ([#774](https://github.com/0x676e67/wreq/issues/774)) - ([b04e162](https://github.com/0x676e67/wreq/commit/b04e162995afbabbbbc1dcf47464e2dd372a7574)) - *(deps)* Replace `lru` with faster `schnellru` implementation ([#754](https://github.com/0x676e67/wreq/issues/754)) - ([100bab9](https://github.com/0x676e67/wreq/commit/100bab9cfebf2df645314f83102fc4fa079e4479)) - *(deps)* Remove support for `rustls-native-certs` ([#752](https://github.com/0x676e67/wreq/issues/752)) - ([144bc8a](https://github.com/0x676e67/wreq/commit/144bc8abac0a0a38b8ff2c44e5d6edb1ff2b7046)) - *(deps)* Optionally use `parking_lot` for lock implementation ([#750](https://github.com/0x676e67/wreq/issues/750)) - ([da30d6b](https://github.com/0x676e67/wreq/commit/da30d6beef7182e507659417c4357751866cbdd7)) - *(deps)* Prepare for Boring 5 upgrade ([#735](https://github.com/0x676e67/wreq/issues/735)) - ([77cfc8d](https://github.com/0x676e67/wreq/commit/77cfc8dcb7d4aec9df9ed7c3656633a4ffcc407e)) - *(deps)* Update tokio-tungstenite requirement from 0.26.2 to 0.27.0 ([#721](https://github.com/0x676e67/wreq/issues/721)) - ([63f7a4b](https://github.com/0x676e67/wreq/commit/63f7a4b68881a2e453fdf9413082fac7e5d4021e)) - *(deps)* Update webpki-root-certs requirement from 0.26.0 to 1.0.0 ([#631](https://github.com/0x676e67/wreq/issues/631)) - ([acb44fe](https://github.com/0x676e67/wreq/commit/acb44fe8c78b98f3c54dab05ab68035a6d449515)) - *(deps)* Remove `typed-builder` dependency ([#620](https://github.com/0x676e67/wreq/issues/620)) - ([5e037ac](https://github.com/0x676e67/wreq/commit/5e037ac61cbadc98bb37b3d851d3401e78023fb7)) - *(deps)* Update libc requirement from 2.0.11 to 0.2.172 ([#611](https://github.com/0x676e67/wreq/issues/611)) - ([888ef8a](https://github.com/0x676e67/wreq/commit/888ef8a2b709c113d2dbeac3457d35f61436b741)) - *(deps)* Update async-compression requirement from 0.4.21 to 0.4.23 ([#606](https://github.com/0x676e67/wreq/issues/606)) - ([6dc0026](https://github.com/0x676e67/wreq/commit/6dc002668c72038b87d1bd5e3edae4b47cc2f125)) - *(deps)* Update boring2 requirement from 4.15.11 to 4.15.12 ([#607](https://github.com/0x676e67/wreq/issues/607)) - ([bfe8c12](https://github.com/0x676e67/wreq/commit/bfe8c1256eded7b68826993e91d9729074d699f6)) - *(deps)* Update brotli requirement from 7.0.0 to 8.0.0 ([#601](https://github.com/0x676e67/wreq/issues/601)) - ([86849dd](https://github.com/0x676e67/wreq/commit/86849dd73b5753a3bf0cbddfd613686d6013ab9a)) - *(deps)* Update socket2 requirement from 0.5.8 to 0.5.9 ([#599](https://github.com/0x676e67/wreq/issues/599)) - ([dec8352](https://github.com/0x676e67/wreq/commit/dec8352ca33a74314e7a671858763ced272bc12f)) - *(deps)* Update lru requirement from 0.13 to 0.14 ([#597](https://github.com/0x676e67/wreq/issues/597)) - ([e557749](https://github.com/0x676e67/wreq/commit/e557749d078d50cf2fddf59df094ed5ce591128d)) - *(feature)* Drop redundant `sync_wrapper` ([#817](https://github.com/0x676e67/wreq/issues/817)) - ([a737f56](https://github.com/0x676e67/wreq/commit/a737f56e3c398378726666851773242470cb40a7)) - *(feature)* Rename `websocket` feature to `ws` ([#816](https://github.com/0x676e67/wreq/issues/816)) - ([d15b2d5](https://github.com/0x676e67/wreq/commit/d15b2d530e37299dc9e77559f2c2289424a4799d)) - *(sync)* Remove optional `parking_lot` support - ([b109eb9](https://github.com/0x676e67/wreq/commit/b109eb99c424d92f9912509105b50a26f02bee36)) - Drop `full` feature ([#803](https://github.com/0x676e67/wreq/issues/803)) - ([12b4d64](https://github.com/0x676e67/wreq/commit/12b4d64eba7c2c5c34f89b1a10247814f01be095)) - Drop deprecated `macos-system-configuration` feature ([#775](https://github.com/0x676e67/wreq/issues/775)) - ([7caa4ad](https://github.com/0x676e67/wreq/commit/7caa4ad5327d437ece815fda99635f99f9cd062c)) - Cleanup deprecated feature - ([8d1632b](https://github.com/0x676e67/wreq/commit/8d1632b73e0994091136c7a60a2e801e65e9b440)) ### Deps - *(boring)* Basic support for LoongArch ([#622](https://github.com/0x676e67/wreq/issues/622)) - ([bcc53cf](https://github.com/0x676e67/wreq/commit/bcc53cf260e31605376bc72fb7acae53fb385a4b)) - Prune unnecessary dependencies ([#681](https://github.com/0x676e67/wreq/issues/681)) - ([d9aecea](https://github.com/0x676e67/wreq/commit/d9aecead61dccb481b8d39744ece30d66d1ea41f)) ## New Contributors ❤️ * @incizzle made their first contribution in [#608](https://github.com/0x676e67/wreq/pull/608) ## [5.1.0](https://github.com/0x676e67/wreq/compare/v5.0.0..v5.1.0) - 2025-03-29 ### Features - *(cookie)* Optional enable of sending multiple cookies in `CookieStore` ([#578](https://github.com/0x676e67/wreq/issues/578)) - ([6678fbf](https://github.com/0x676e67/wreq/commit/6678fbfa22aa259a20fe1868bb41d94851765492)) - *(cookie)* Cookies feature optionally preserves order ([#573](https://github.com/0x676e67/wreq/issues/573)) - ([803852b](https://github.com/0x676e67/wreq/commit/803852b43e127f0c89aea2a81e75ad4d04c951bd)) - *(proxy)* Enhanced websocket level proxy options ([#569](https://github.com/0x676e67/wreq/issues/569)) - ([a6c9a75](https://github.com/0x676e67/wreq/commit/a6c9a75dd68f99095bbf70cb95d2955b89b2271b)) - *(request)* Optionally allow compression in request ([#581](https://github.com/0x676e67/wreq/issues/581)) - ([dc2c148](https://github.com/0x676e67/wreq/commit/dc2c1483dca066f4bc9b02f3504c5c86edd45438)) - *(x509)* Support for using a private key and X.509 certificate as a client certificate ([#588](https://github.com/0x676e67/wreq/issues/588)) - ([3fbcc89](https://github.com/0x676e67/wreq/commit/3fbcc89775fe0e65e5c5cfa86319350ab4cada7d)) - *(x509)* Auto detect and parse `DER`/`PEM` certificate formats ([#584](https://github.com/0x676e67/wreq/issues/584)) - ([3ab1681](https://github.com/0x676e67/wreq/commit/3ab168126ed4fe41c5dbe5e0bc56d2f87734d679)) - Expose `tls` and `websocket` modules ([#587](https://github.com/0x676e67/wreq/issues/587)) - ([a771463](https://github.com/0x676e67/wreq/commit/a771463508f66314f52a725bca6bb8de042843b7)) ### Bug Fixes - *(client)* Adapt sorting for duplicate headers such as cookies ([#576](https://github.com/0x676e67/wreq/issues/576)) - ([a786a85](https://github.com/0x676e67/wreq/commit/a786a8595079b1647c1d1a6ab571ffb199b11a5d)) - *(request)* Fix `try_clone` missing protocol extension ([#579](https://github.com/0x676e67/wreq/issues/579)) - ([0e9872d](https://github.com/0x676e67/wreq/commit/0e9872dd370a8a70d38139b30c14113495418b86)) ### Documentation - *(request)* Improve request header parameter docs ([#580](https://github.com/0x676e67/wreq/issues/580)) - ([f03c1c8](https://github.com/0x676e67/wreq/commit/f03c1c8d6aff7e2fba2aeb60a03e991f714e9662)) - *(response)* Link to `char::REPLACEMENT_CHARACTER` ([#586](https://github.com/0x676e67/wreq/issues/586)) - ([b0abcb6](https://github.com/0x676e67/wreq/commit/b0abcb636b5c5b86089cfbf1f39ebdc966da1e30)) - Update certificate store description ([#572](https://github.com/0x676e67/wreq/issues/572)) - ([f1b076f](https://github.com/0x676e67/wreq/commit/f1b076f8321987f9d4ece641b557261277128cbb)) - Improved emulation description ([#571](https://github.com/0x676e67/wreq/issues/571)) - ([5924815](https://github.com/0x676e67/wreq/commit/5924815a05b4512381815a2f4d66daf4e855f538)) - Update examples docs ([#570](https://github.com/0x676e67/wreq/issues/570)) - ([591e4b3](https://github.com/0x676e67/wreq/commit/591e4b3e1b63bc5911b6e1f64643c32c7d3475f0)) ### Performance - *(cookie)* Optimize the performance of cookies compression ([#574](https://github.com/0x676e67/wreq/issues/574)) - ([6c2280c](https://github.com/0x676e67/wreq/commit/6c2280c82a252f4de2289e74fc88a9d6058a6941)) - *(request)* Improve `json`/`form` request performance ([#583](https://github.com/0x676e67/wreq/issues/583)) - ([cce1fcf](https://github.com/0x676e67/wreq/commit/cce1fcfbad9b6f7d519b0c6f629087bded222ae4)) ### Styling - *(client)* Fmt import - ([f509c52](https://github.com/0x676e67/wreq/commit/f509c5298e4f1865f71a862e6882d420b9c06d24)) - *(client)* Fmt code - ([ca9bc96](https://github.com/0x676e67/wreq/commit/ca9bc96d85cfdd90e6f06c1b59b952a46946d98a)) - *(x509)* Fmt code - ([cc6fa5d](https://github.com/0x676e67/wreq/commit/cc6fa5d6bed622d569c50c5153d98e96664bac29)) - *(x509)* Format compatible code ([#589](https://github.com/0x676e67/wreq/issues/589)) - ([a12a414](https://github.com/0x676e67/wreq/commit/a12a414105433151a583a605b9e0a0767639143c)) ### Testing - *(badssl)* Dynamically update peer certificate SSL pinning test ([#582](https://github.com/0x676e67/wreq/issues/582)) - ([a87b95f](https://github.com/0x676e67/wreq/commit/a87b95fbe37318a5e0e3a0c3b2e90c39bde49654)) ### Miscellaneous Tasks - *(client)* Remove dead code - ([4de2978](https://github.com/0x676e67/wreq/commit/4de29785cd506fedb82ecfbb2355dcb966984d63)) - *(http)* Rename `ClientInner` to `ClientRef` - ([1d01390](https://github.com/0x676e67/wreq/commit/1d01390103b0e424dfacc211fcb9b56b0c848da6)) - *(tests)* Update client tests conditional ([#577](https://github.com/0x676e67/wreq/issues/577)) - ([684eb89](https://github.com/0x676e67/wreq/commit/684eb89a42febe7175c4f0fa5a2f2d8204514160)) ### Build - *(deps)* Upgrade dependencies ([#575](https://github.com/0x676e67/wreq/issues/575)) - ([cf6daf0](https://github.com/0x676e67/wreq/commit/cf6daf0662268f5f6d64bb06d4d8ea361cac46aa)) ## [5.0.0](https://github.com/0x676e67/wreq/compare/v3.0.6..v5.0.0) - 2025-03-23 ### Features - *(client)* Add a straightforward method for SSL pinning setup ([#556](https://github.com/0x676e67/wreq/issues/556)) - ([071d5ed](https://github.com/0x676e67/wreq/commit/071d5ed8ded32e5f40b6d21d2cea39920ddbe355)) - *(client)* Ignore the requirement to configure tls in order ([#545](https://github.com/0x676e67/wreq/issues/545)) - ([213b0ac](https://github.com/0x676e67/wreq/commit/213b0ac73b0cace1cb70dee443de2de1bcc32b16)) - *(cookie)* Impl `into_inner` for `Cookie` ([#542](https://github.com/0x676e67/wreq/issues/542)) - ([1f09ed5](https://github.com/0x676e67/wreq/commit/1f09ed5f46bb105618855e7a22f61b0a61454489)) - *(cookie)* Impl `Display` for `Cookie` ([#541](https://github.com/0x676e67/wreq/issues/541)) - ([729669c](https://github.com/0x676e67/wreq/commit/729669cd23b87e8c303e7ae70c4bf60c9ee0f68c)) - *(cookie)* Impl `into_owned` for cookie ([#535](https://github.com/0x676e67/wreq/issues/535)) - ([04d11ad](https://github.com/0x676e67/wreq/commit/04d11ada3cfe618927bad83304a886c39e7053bb)) - *(error)* Added `Error::is_connection_reset()` - ([8a68b1a](https://github.com/0x676e67/wreq/commit/8a68b1a299b3f44108a475e5837d109c635fbf24)) - *(proxy)* Enhanced client proxy options ([#534](https://github.com/0x676e67/wreq/issues/534)) - ([4edbfef](https://github.com/0x676e67/wreq/commit/4edbfefadbfec1a797c179d3442a1a7b3345ec3f)) - *(proxy)* Enhanced request level proxy options ([#533](https://github.com/0x676e67/wreq/issues/533)) - ([a69ac1b](https://github.com/0x676e67/wreq/commit/a69ac1ba37d4828d5f409ac6124497d7a84af42b)) - *(ws)* Impl `from_bytes_unchecked` of `Utf8Bytes` ([#550](https://github.com/0x676e67/wreq/issues/550)) - ([0663aa5](https://github.com/0x676e67/wreq/commit/0663aa5e44d389d1b34c0ee6efd1d2136c774f57)) - Remove shortcut for quickly make requests ([#560](https://github.com/0x676e67/wreq/issues/560)) - ([cb43f23](https://github.com/0x676e67/wreq/commit/cb43f23f9885a04b595c1caa4eef6323b63845aa)) ### Bug Fixes - *(client)* Preserve TLS settings when update client ([#552](https://github.com/0x676e67/wreq/issues/552)) - ([6a2e3e6](https://github.com/0x676e67/wreq/commit/6a2e3e60a6ac92977681c4c43308be05989c5dfe)) - *(client)* Preserve TLS `RootCertStore` settings when update client ([#551](https://github.com/0x676e67/wreq/issues/551)) - ([ad72976](https://github.com/0x676e67/wreq/commit/ad7297660a753a97d614fd9bb657303b04c0eba5)) - *(client)* Preserve TLS verify settings when update client ([#546](https://github.com/0x676e67/wreq/issues/546)) - ([21ad6e8](https://github.com/0x676e67/wreq/commit/21ad6e8beeeced18e928c35c6fee856047944321)) - *(proxy)* Re-enable NO_PROXY envs on Windows ([#544](https://github.com/0x676e67/wreq/issues/544)) - ([f5eb6fe](https://github.com/0x676e67/wreq/commit/f5eb6fe28d167485ceec79afee25180e9b268314)) ### Refactor - *(client)* Rename max_retry_count to http2_max_retry_count - ([be29947](https://github.com/0x676e67/wreq/commit/be29947166db5c2ac7bcd3700f6cc50fcc9118dc)) - *(client)* Delete tls fine-tuning config ([#530](https://github.com/0x676e67/wreq/issues/530)) - ([d7a75e3](https://github.com/0x676e67/wreq/commit/d7a75e393aa8d48b570d15aa66ce600a2ac8691c)) - *(cookie)* Redesign cookie store API signature ([#538](https://github.com/0x676e67/wreq/issues/538)) - ([2968839](https://github.com/0x676e67/wreq/commit/2968839c37c01950fd2be037c7bec1d64381f1f9)) - *(cookie)* `max_age` type conversion fails to avoid panic ([#536](https://github.com/0x676e67/wreq/issues/536)) - ([ceb0bd5](https://github.com/0x676e67/wreq/commit/ceb0bd5d05886fb172a33da2c23f69078ed147a0)) - *(tls)* Simplify RootCertStore wrapper implementation ([#553](https://github.com/0x676e67/wreq/issues/553)) - ([b24bc40](https://github.com/0x676e67/wreq/commit/b24bc4060e84734b0fa99d35f111c5638ec1bdb7)) - Unified naming of historical legacy APIs - ([c7c6a0d](https://github.com/0x676e67/wreq/commit/c7c6a0db32445dda27b285e4c7a812f4ca236b39)) - Unified naming of historical legacy APIs ([#554](https://github.com/0x676e67/wreq/issues/554)) - ([9022641](https://github.com/0x676e67/wreq/commit/902264184d938d8b8cb138dbc28e8eca1e25891d)) ### Documentation - *(client)* Update emulation method documentation - ([5dd33ab](https://github.com/0x676e67/wreq/commit/5dd33aba02be7d6b0136a5d6e839d9974f1303d3)) - *(client)* Deleting outdated documents ([#532](https://github.com/0x676e67/wreq/issues/532)) - ([2cffe47](https://github.com/0x676e67/wreq/commit/2cffe471deca62c86ed18346cbd7b12caf2e0579)) - *(cookie)* Delete irrelevant library documents - ([6c44c38](https://github.com/0x676e67/wreq/commit/6c44c38f589057f3a64bb7152a34ca62630b7586)) - *(response)* Clarify in docs that `Response::content_length()` is not based on the `Content-Length` header ([#558](https://github.com/0x676e67/wreq/issues/558)) - ([5c174c4](https://github.com/0x676e67/wreq/commit/5c174c48b4ec09544de379c5254fc11e74d5bd7b)) - *(response)* Clarify that content_length() is not based on the Content-Length header in the docs - ([7257f34](https://github.com/0x676e67/wreq/commit/7257f34ca23c7cd0b9f0a1aa6e0da3507ad58956)) - Update library examples - ([62d6266](https://github.com/0x676e67/wreq/commit/62d6266f425e83ad0998d1b2f290cb56d44df93f)) - Update features description ([#540](https://github.com/0x676e67/wreq/issues/540)) - ([bd18719](https://github.com/0x676e67/wreq/commit/bd1871957df8304a0a55485cc7c2eb3e5add00bc)) ### Performance - *(client)* Fine-tune request performance and testing ([#566](https://github.com/0x676e67/wreq/issues/566)) - ([a07c233](https://github.com/0x676e67/wreq/commit/a07c2332cc751a98d48e0a8cf3fca958e19f09e3)) - *(http)* Inline hotspot method ([#528](https://github.com/0x676e67/wreq/issues/528)) - ([2038231](https://github.com/0x676e67/wreq/commit/20382318693de4e2aaa4b55c3943c5ad1bd2689c)) ### Testing - *(badssl)* Update ssl pinning test ([#557](https://github.com/0x676e67/wreq/issues/557)) - ([b883d7f](https://github.com/0x676e67/wreq/commit/b883d7fb9b7b6c6f1b5b48271bd4d5c7de9666d8)) ### Miscellaneous Tasks - *(emulation)* Impl `default` for EmulationProvider - ([b726363](https://github.com/0x676e67/wreq/commit/b7263637f23bac976a54fe644b96f89047217647)) - *(tls)* Simplified `IntoCertStore` macro impl ([#562](https://github.com/0x676e67/wreq/issues/562)) - ([5052342](https://github.com/0x676e67/wreq/commit/505234223f28dd749f10414e1fee9161119e1d98)) - *(tls)* Simplified `IntoCertCompressionAlgorithm` macro impl ([#561](https://github.com/0x676e67/wreq/issues/561)) - ([a7606d9](https://github.com/0x676e67/wreq/commit/a7606d9d50cc295dfbd5374a55c6841f790ae6c2)) - Update example documentation crate package name - ([363e98b](https://github.com/0x676e67/wreq/commit/363e98b6b97809f2a6802a131e884cb302430da8)) - Update apache license copyright - ([50d73a3](https://github.com/0x676e67/wreq/commit/50d73a35afd3c482538a23f34e125bfbd9be6f69)) ### Build - *(action)* Added compression features tests ([#564](https://github.com/0x676e67/wreq/issues/564)) - ([5767ce8](https://github.com/0x676e67/wreq/commit/5767ce81d59b5f1d0e2e702c2200dfd3713b4f0b)) - *(action)* Added features tests ([#563](https://github.com/0x676e67/wreq/issues/563)) - ([b8f7968](https://github.com/0x676e67/wreq/commit/b8f7968f0ed52d6fe6282ef189fe8f8514ba1071)) - *(action)* Added check semver action ([#559](https://github.com/0x676e67/wreq/issues/559)) - ([a58e989](https://github.com/0x676e67/wreq/commit/a58e989819fb29e89823ee764d26df2646a840e2)) - *(deps)* Pin `async-compression` to version `0.4.21` ([#567](https://github.com/0x676e67/wreq/issues/567)) - ([0be61d7](https://github.com/0x676e67/wreq/commit/0be61d7db8641170ca143220de348b1e423d8f83)) - *(deps)* Pin `tokio-tungstenite` to version `0.26.2` ([#565](https://github.com/0x676e67/wreq/issues/565)) - ([a5ee2a2](https://github.com/0x676e67/wreq/commit/a5ee2a2d99fcb1c8afab7a2636c7c657132744ed)) - *(deps)* Update hickory-resolver requirement from 0.24 to 0.25 ([#549](https://github.com/0x676e67/wreq/issues/549)) - ([f7de3f5](https://github.com/0x676e67/wreq/commit/f7de3f5ba54c9bbb4701138a69adeaa563c9b4c0)) - *(deps)* Update typed-builder requirement from 0.20.0 to 0.21.0 ([#548](https://github.com/0x676e67/wreq/issues/548)) - ([099c257](https://github.com/0x676e67/wreq/commit/099c257ef3d244a464633deb04ccca6cd4a87898)) ## [3.0.6](https://github.com/0x676e67/wreq/compare/v3.0.5..v3.0.6) - 2025-03-10 ### Features - *(ws)* Improved WebSocket message creation ([#524](https://github.com/0x676e67/wreq/issues/524)) - ([508d869](https://github.com/0x676e67/wreq/commit/508d8695216a1ca28c91fe5d9e04cce745839a67)) ### Testing - *(zstd)* Test connection reuse with new zstd decompression ([#522](https://github.com/0x676e67/wreq/issues/522)) - ([a277f80](https://github.com/0x676e67/wreq/commit/a277f8036da135533efd55bd561941b992cfb1fa)) ## [3.0.5](https://github.com/0x676e67/wreq/compare/v3.0.3..v3.0.5) - 2025-03-09 ### Features - *(tls)* Allow overriding AES encryption for TLS ECH ([#515](https://github.com/0x676e67/wreq/issues/515)) - ([0045e3d](https://github.com/0x676e67/wreq/commit/0045e3d105a1c38ffb1ceb1cdc15cb2d4265e9ac)) ### Bug Fixes - *(decoder)* Handle multi-frame zstd response body decompression ([#517](https://github.com/0x676e67/wreq/issues/517)) - ([bbc02ae](https://github.com/0x676e67/wreq/commit/bbc02ae0a837138054321bfcb8223a3fafd2e286)) ### Miscellaneous Tasks - *(connect)* Remove `ServiceBuilder` dead code ([#518](https://github.com/0x676e67/wreq/issues/518)) - ([8cf0dc4](https://github.com/0x676e67/wreq/commit/8cf0dc4034707e73205cc5849c473e2a6ca87201)) - Update docs - ([d077c3d](https://github.com/0x676e67/wreq/commit/d077c3d40b43441ddebd8d3049b4d9094b23ec3b)) ## [3.0.3](https://github.com/0x676e67/wreq/compare/v3.0.1..v3.0.3) - 2025-03-07 ### Bug Fixes - *(decoder)* Fix conditional compilation of decompress features ([#507](https://github.com/0x676e67/wreq/issues/507)) - ([8ffa73b](https://github.com/0x676e67/wreq/commit/8ffa73bdd6a8aea1651f31f2a70c6ed727cd65f3)) ### Styling - Clippy fix example `set_root_cert_store` - ([9b3b49a](https://github.com/0x676e67/wreq/commit/9b3b49ac5172d09369b64a1b3b4cfe3550139fb8)) ### Miscellaneous Tasks - Remove pub(super) visibility from `method_has_defined_payload_semantics` - ([b689112](https://github.com/0x676e67/wreq/commit/b689112bdb1bd60798e264ba43b5d073009df0f1)) ### Build - *(deps)* Update async-compression requirement from 0.4.0 to 0.4.20 ([#505](https://github.com/0x676e67/wreq/issues/505)) - ([71562ce](https://github.com/0x676e67/wreq/commit/71562ce70b0418fbd0a516727bb6107f83585f89)) - *(deps)* Update bytes requirement from 1.0 to 1.10.1 ([#504](https://github.com/0x676e67/wreq/issues/504)) - ([c10f5e1](https://github.com/0x676e67/wreq/commit/c10f5e15c63660ac33413d6c929a11ac70302e53)) ## [3.0.1-rc4](https://github.com/0x676e67/wreq/compare/v3.0.1-rc3..v3.0.1-rc4) - 2025-03-05 ### Features - *(cert)* Expose `RootCertStoreBuilder` as public API ([#494](https://github.com/0x676e67/wreq/issues/494)) - ([849558f](https://github.com/0x676e67/wreq/commit/849558f2607e7b23521193c74e794cc192decf76)) ### Refactor - *(client)* Simplify DNS resolver initialization in ClientBuilder ([#499](https://github.com/0x676e67/wreq/issues/499)) - ([1368d07](https://github.com/0x676e67/wreq/commit/1368d075121a9cb9d2f9ca9cb674264e84c5e4e5)) - *(client)* `pool_max_size` signature changed from `Into>` to `usize` ([#498](https://github.com/0x676e67/wreq/issues/498)) - ([57223e2](https://github.com/0x676e67/wreq/commit/57223e2ed4996239b8cfa696c68f550104de9f65)) ### Documentation - *(emulation)* Improve emulation documentation - ([776f2db](https://github.com/0x676e67/wreq/commit/776f2dbd18fa5fb3f635dceb2d22e92af358405d)) - Update docs ([#496](https://github.com/0x676e67/wreq/issues/496)) - ([a4862e8](https://github.com/0x676e67/wreq/commit/a4862e870d002f71761863bae22ec81de2bc5f52)) ### Performance - *(clinet)* Reading `user-agent` to avoid full clone ([#495](https://github.com/0x676e67/wreq/issues/495)) - ([89fd750](https://github.com/0x676e67/wreq/commit/89fd750e8f239c0bb31cf8699d7d4a54440933c0)) - *(decoder)* Statically check compression headers ([#503](https://github.com/0x676e67/wreq/issues/503)) - ([c912d8d](https://github.com/0x676e67/wreq/commit/c912d8d428b6787f4203a06ff9d2fd7abc6fb3d2)) ### Styling - *(network)* Fmt code - ([5941b39](https://github.com/0x676e67/wreq/commit/5941b390b46de184ecb57160cd64d08a7ab708e0)) ### Miscellaneous Tasks - Revert `impl_debug` export - ([3fc3f69](https://github.com/0x676e67/wreq/commit/3fc3f697982cee4fc24e28e10cfba04ceeaf1773)) ## [3.0.1-rc3](https://github.com/0x676e67/wreq/compare/v3.0.1-rc2..v3.0.1-rc3) - 2025-03-04 ### Features - *(cookie)* Abstract public cookie store trait ([#493](https://github.com/0x676e67/wreq/issues/493)) - ([a565884](https://github.com/0x676e67/wreq/commit/a5658847433928673964b79a7937b35dc4db6296)) - *(proxy)* Supports `http`/`https` proxy custom headers ([#490](https://github.com/0x676e67/wreq/issues/490)) - ([02fdc5b](https://github.com/0x676e67/wreq/commit/02fdc5bcd1b40d27538163279f4424a666957eef)) ### Testing - Update badssl test ([#487](https://github.com/0x676e67/wreq/issues/487)) - ([8831a9e](https://github.com/0x676e67/wreq/commit/8831a9e42d67dd5234955fc4594f8d3e564b04cc)) ### Miscellaneous Tasks - Replace `get_or_insert_with(Vec::new)` to `get_or_insert_default()` - ([2ca23a1](https://github.com/0x676e67/wreq/commit/2ca23a17068ef5c1b132029abcb25b47db029db7)) ### Build - `MSRV 1.85` / `edition 2024` ([#488](https://github.com/0x676e67/wreq/issues/488)) - ([f5bcc71](https://github.com/0x676e67/wreq/commit/f5bcc71d70a86e52a19596988c1ed08f71c12769)) ## [3.0.1-rc2](https://github.com/0x676e67/wreq/compare/v3.0.1-rc1..v3.0.1-rc2) - 2025-03-03 ### Refactor - *(client)* Rename `as_mut` to `update` for clarity and consistency ([#482](https://github.com/0x676e67/wreq/issues/482)) - ([e8137ec](https://github.com/0x676e67/wreq/commit/e8137ec6448e53124b58d5c7e4bdb7eb1d923bb7)) ### Styling - *(client)* Fmt code - ([897a373](https://github.com/0x676e67/wreq/commit/897a373b460ea3e0c8558e9d72843ef28578e61a)) ### Testing - Add client cloned test ([#485](https://github.com/0x676e67/wreq/issues/485)) - ([4a5419b](https://github.com/0x676e67/wreq/commit/4a5419b56d57a54b1cfde121fee9f41acb6c411f)) - Add client emulation update test ([#484](https://github.com/0x676e67/wreq/issues/484)) - ([f72648f](https://github.com/0x676e67/wreq/commit/f72648feafe1440dc1ae942b75421faf940fff76)) - Add client headers update test ([#483](https://github.com/0x676e67/wreq/issues/483)) - ([730fdaa](https://github.com/0x676e67/wreq/commit/730fdaa3b18c7e0d2e2c732a408677ba8c483854)) ### Miscellaneous Tasks - *(client)* Update docs - ([bbcdd1f](https://github.com/0x676e67/wreq/commit/bbcdd1f15843c63aa8fee47ac0507620fb9468e6)) ### Build - Fix docs build ([#486](https://github.com/0x676e67/wreq/issues/486)) - ([915c36b](https://github.com/0x676e67/wreq/commit/915c36bb4a666be3acd26a4416a39534e661419b)) ## [3.0.1-rc1](https://github.com/0x676e67/wreq/compare/v2.0.3..v3.0.1-rc1) - 2025-03-03 ### Features - *(client)* Remove cross-origin redirect proxy support ([#477](https://github.com/0x676e67/wreq/issues/477)) - ([3a241ef](https://github.com/0x676e67/wreq/commit/3a241ef4b342b1bd46a8e4cd7ecbeb641d043b4f)) - *(client)* Added a remove cookie function ([#475](https://github.com/0x676e67/wreq/issues/475)) - ([7142963](https://github.com/0x676e67/wreq/commit/71429634012e03a710793591727cbf4bd5d8de28)) - *(client)* Remove `set_cookies_by_ref` ([#474](https://github.com/0x676e67/wreq/issues/474)) - ([56de727](https://github.com/0x676e67/wreq/commit/56de72716b1cd89f724f8720dc3fa2fb75ac0399)) - *(client)* Added a clear cookies function ([#472](https://github.com/0x676e67/wreq/issues/472)) - ([d934716](https://github.com/0x676e67/wreq/commit/d93471631440a28a0dfb63dad85f4acf3768cab2)) - *(client)* Adapt thread-safe update client configuration ([#404](https://github.com/0x676e67/wreq/issues/404)) - ([e6397d6](https://github.com/0x676e67/wreq/commit/e6397d68f216a86e75b46bb2f7b9345ecf58e08f)) - *(client)* Apply configuration sequentially ([#391](https://github.com/0x676e67/wreq/issues/391)) - ([775db82](https://github.com/0x676e67/wreq/commit/775db824653b162e4dfc6bb14c79b811206f79c2)) - *(imp)* Add `chrome 132`/`chrome 133` impersonate ([#423](https://github.com/0x676e67/wreq/issues/423)) - ([3430645](https://github.com/0x676e67/wreq/commit/34306457c0ba01f95e46b5b0bbe443a3abe3fb87)) - *(pool)* Connection pool distinguishes request versions ([#431](https://github.com/0x676e67/wreq/issues/431)) - ([22b0e92](https://github.com/0x676e67/wreq/commit/22b0e92835a786be030f405fd70ea311cecb6de4)) - *(proxy)* Add `socks4a` proxy protocol support ([#416](https://github.com/0x676e67/wreq/issues/416)) - ([1f98b6e](https://github.com/0x676e67/wreq/commit/1f98b6e2578ab55ff4fcfb86c66548a7161469a7)) - *(tls)* Encapsulate and simplify certificate loading ([#417](https://github.com/0x676e67/wreq/issues/417)) - ([a32207e](https://github.com/0x676e67/wreq/commit/a32207ef84057e042b69068fee2179b0a059cd51)) - *(tls)* Add ALPS use new endpoint extension ([#396](https://github.com/0x676e67/wreq/issues/396)) - ([20b988c](https://github.com/0x676e67/wreq/commit/20b988c04e4a8a334d702b74a54e46d149b9802a)) - *(websocket)* Added `read_buffer_size` optional config ([#457](https://github.com/0x676e67/wreq/issues/457)) - ([ccece59](https://github.com/0x676e67/wreq/commit/ccece597da6db3f085acf13718af93ea3acffab9)) - *(websocket)* Chain call wrapper `RequestBuilder` ([#432](https://github.com/0x676e67/wreq/issues/432)) - ([ea3dfe8](https://github.com/0x676e67/wreq/commit/ea3dfe88c7dbcf4b9f13a70ac29aa306f17fdf91)) - *(websocket)* Explicitly force the use of ws/wss protocol ([#383](https://github.com/0x676e67/wreq/issues/383)) - ([4fd10a9](https://github.com/0x676e67/wreq/commit/4fd10a951977580b74f60d5ede81833ae0f484cf)) - Removal of base url feature ([#411](https://github.com/0x676e67/wreq/issues/411)) - ([16dac1d](https://github.com/0x676e67/wreq/commit/16dac1d122381d27ed3f5948766a1d9a13ca8d9d)) - Add optional clear method to `CookieStore` implementation ([#400](https://github.com/0x676e67/wreq/issues/400)) - ([a357c9e](https://github.com/0x676e67/wreq/commit/a357c9e1eed9c9d51fd10d3eb98109104928cef5)) - Serializing impersonate enums uses legacy naming conventions ([#385](https://github.com/0x676e67/wreq/issues/385)) - ([0e3ddb0](https://github.com/0x676e67/wreq/commit/0e3ddb06d3690661806d6f1dc8731e8d337ad4a0)) - Add `HTTP/2` support for `WebSocket` ([#373](https://github.com/0x676e67/wreq/issues/373)) - ([b46daa9](https://github.com/0x676e67/wreq/commit/b46daa90fd11e475b7b8238e1ab5d573b8a531b2)) ### Bug Fixes - *(deps)* Fix alps use new endpoint negotiation ([#464](https://github.com/0x676e67/wreq/issues/464)) - ([21c6751](https://github.com/0x676e67/wreq/commit/21c675123e1f117633d604290c94e5aa333ec4ab)) - *(proxy)* Fix `no_proxy` on Windows ([#470](https://github.com/0x676e67/wreq/issues/470)) - ([16ec933](https://github.com/0x676e67/wreq/commit/16ec933045a707a244eebc98edb17ae1314766a6)) - Ignore Content-Length for methods without payload semantics ([#429](https://github.com/0x676e67/wreq/issues/429)) - ([bd5420c](https://github.com/0x676e67/wreq/commit/bd5420c4d526f05b4430bd7e60f5f5df27fffa11)) - Ensure HTTP version negotiation for non-TLS requests ([#397](https://github.com/0x676e67/wreq/issues/397)) - ([dd14d49](https://github.com/0x676e67/wreq/commit/dd14d49a2d579f9d36a49f38c5d9de373901d492)) ### Refactor - *(client)* Simplify client reference handling by removing unnecessary operations ([#476](https://github.com/0x676e67/wreq/issues/476)) - ([529928b](https://github.com/0x676e67/wreq/commit/529928b4bae30b2ec4fadd2c91185f3417919ea8)) - *(client)* Refactor client `HTTP1`/`HTTP2` configuration API ([#371](https://github.com/0x676e67/wreq/issues/371)) - ([fac8d2d](https://github.com/0x676e67/wreq/commit/fac8d2d9cf6df102e101c4f8d9fda72bd2382935)) - *(tls)* Refactor TLS connector structure ([#421](https://github.com/0x676e67/wreq/issues/421)) - ([bdd3942](https://github.com/0x676e67/wreq/commit/bdd394210ffa26d0e2956c73606436685bc962da)) - *(websocket)* Refactor websocket implementation ([#380](https://github.com/0x676e67/wreq/issues/380)) - ([3b91be4](https://github.com/0x676e67/wreq/commit/3b91be4225aa060b43c00103af6fe5fa14a093dd)) - *(websocket)* Improve error handling, rename APIs, and update API signatures ([#372](https://github.com/0x676e67/wreq/issues/372)) - ([44ec8c6](https://github.com/0x676e67/wreq/commit/44ec8c600119c46112b182b268263aa272139b10)) - Move device fingerprinting to rquest-util maintenance ([#480](https://github.com/0x676e67/wreq/issues/480)) - ([5eb8684](https://github.com/0x676e67/wreq/commit/5eb868442018da9e7be15f9844392093ff5baa21)) - Reduce dependency on `futures-core` / `futures-util` ([#449](https://github.com/0x676e67/wreq/issues/449)) - ([5a4f2be](https://github.com/0x676e67/wreq/commit/5a4f2be065bb1edc3c1e39fe9fe2b8c993078260)) - Replace `HttpContext` with `EmulationProvider` for clarity and accuracy ([#436](https://github.com/0x676e67/wreq/issues/436)) - ([6a9d80a](https://github.com/0x676e67/wreq/commit/6a9d80a5cfa85b13b0a3b7bd08422ba0c563cf4a)) - Replace "impersonate" with "emulation" for clarity and accuracy ([#434](https://github.com/0x676e67/wreq/issues/434)) - ([e2bac75](https://github.com/0x676e67/wreq/commit/e2bac75805fdefd79c3cba32cadd65107060558b)) - Replace unsafe methods with safe methods for certificate handler ([#399](https://github.com/0x676e67/wreq/issues/399)) - ([bdf1fc5](https://github.com/0x676e67/wreq/commit/bdf1fc57d2150e7e471331abd1d745e7f786dbd7)) - Replace unsafe methods with safe methods in `ConnectConfiguration` ([#398](https://github.com/0x676e67/wreq/issues/398)) - ([dda0d42](https://github.com/0x676e67/wreq/commit/dda0d42388623c14838396624b2d56a8b572c2f7)) - Improve client API design and documentation ([#387](https://github.com/0x676e67/wreq/issues/387)) - ([7a63ba6](https://github.com/0x676e67/wreq/commit/7a63ba6e10734b233bbcce87c42a4978fccb7b25)) - Rename method to accept_key for clarity - ([c32dadd](https://github.com/0x676e67/wreq/commit/c32daddb394d5b35009fc445c1e0f247a5c48ba0)) ### Documentation - *(client)* Update client `cloned` method documentation ([#409](https://github.com/0x676e67/wreq/issues/409)) - ([7d10ce6](https://github.com/0x676e67/wreq/commit/7d10ce6be0b26d7b99f24a720e171f84c8b9e41c)) - Added backport reference docs ([#382](https://github.com/0x676e67/wreq/issues/382)) - ([7f57bd5](https://github.com/0x676e67/wreq/commit/7f57bd5876020cb827c2ac3161e4ef080e96718d)) ### Performance - *(connect)* Delay connector layer initialization to improve performance ([#408](https://github.com/0x676e67/wreq/issues/408)) - ([4903458](https://github.com/0x676e67/wreq/commit/4903458b81b161aac51ded38a562f139e08d94c9)) - *(connector)* Optimize performance of switching TLS connector ([#406](https://github.com/0x676e67/wreq/issues/406)) - ([26f58e4](https://github.com/0x676e67/wreq/commit/26f58e4e39b1d9d0eb6525862a5ff146fff4ef5c)) - *(socks)* Socks connection process DNS uses non-blocking query ([#420](https://github.com/0x676e67/wreq/issues/420)) - ([0d40c75](https://github.com/0x676e67/wreq/commit/0d40c75b1edc117fa81431256ca7f6510618ea43)) - Always inline `into_tungstenite` ([#381](https://github.com/0x676e67/wreq/issues/381)) - ([b5e0b9f](https://github.com/0x676e67/wreq/commit/b5e0b9f0263248669940c702868c5afcdc01cc76)) ### Styling - Fmt code - ([e3ac7a7](https://github.com/0x676e67/wreq/commit/e3ac7a76ccdb98a3b143607f8d3f8f7293421b4e)) ### Testing - *(upgrade)* Add http2 upgrade test ([#384](https://github.com/0x676e67/wreq/issues/384)) - ([0724836](https://github.com/0x676e67/wreq/commit/0724836dbfae85bf118f4caf4de19ae3d878b60e)) - Add unit test for cookie getter and setter functionality ([#451](https://github.com/0x676e67/wreq/issues/451)) - ([b71032e](https://github.com/0x676e67/wreq/commit/b71032e0229aa86b737426b643fabfaf549a854b)) - Serialize tests that read/write the same environment variable ([#443](https://github.com/0x676e67/wreq/issues/443)) - ([b7560f9](https://github.com/0x676e67/wreq/commit/b7560f97998e4221472c32688ab7bea5df61edb6)) ### Miscellaneous Tasks - *(client)* Delete unnecessary clone - ([9793bcc](https://github.com/0x676e67/wreq/commit/9793bccbb2f4d6d45dfc90ec028222cdf065f29c)) - *(client)* Rename client builder http2 timer name from `timer` to `http2_timer` ([#407](https://github.com/0x676e67/wreq/issues/407)) - ([e06d9ce](https://github.com/0x676e67/wreq/commit/e06d9ce8dd4f9f1a5f89c0ff3372869275f526b5)) - *(connect)* Delete duplicate tls info acquisition logic - ([4b7877a](https://github.com/0x676e67/wreq/commit/4b7877a3805afb071931358e0a0f69c42e8b05c0)) - *(connect)* Delete connector unnecessary keepalive field - ([08b5904](https://github.com/0x676e67/wreq/commit/08b5904ffb0374f6c327442a314615e6893b6c63)) - *(example)* Update websocket example - ([2479972](https://github.com/0x676e67/wreq/commit/24799723f580badf92e81b3e972ad8cc2b0995f1)) - *(tls)* Move `conf` to `client/conf` module - ([988e679](https://github.com/0x676e67/wreq/commit/988e67949ca9162e6449d41700e5bbbccdb84d2d)) - *(tls)* Move `TlsConfig` to conf module - ([ffd1673](https://github.com/0x676e67/wreq/commit/ffd1673e3afa379086bc04b7a744e8733512388b)) - *(websocket)* Simplify error handling and improve code readability ([#418](https://github.com/0x676e67/wreq/issues/418)) - ([60fa74d](https://github.com/0x676e67/wreq/commit/60fa74dc0abba1862d23adc4965152b1896eb3e4)) - *(websocket)* Fmt code - ([a313ba0](https://github.com/0x676e67/wreq/commit/a313ba0f2707148e023f0126cc895788e3d42bfe)) - *(websocket)* Improved version protocol handler - ([81a0183](https://github.com/0x676e67/wreq/commit/81a0183b14dbe9596c6eb4466656247d92563e62)) - Update examples - ([7cc6b1e](https://github.com/0x676e67/wreq/commit/7cc6b1e5b3a836bcf0e33f9994bb5a162ed76ad2)) - Add Crates.io MSRV - ([cc8cc28](https://github.com/0x676e67/wreq/commit/cc8cc284e7e7b976622a47271b273fa03a33a82b)) - Update the compilation guide ([#466](https://github.com/0x676e67/wreq/issues/466)) - ([5ad4de9](https://github.com/0x676e67/wreq/commit/5ad4de96c5938c1d7c8ea399495b1f377ecf8f66)) - Update compilation-guide ([#456](https://github.com/0x676e67/wreq/issues/456)) - ([723e0c1](https://github.com/0x676e67/wreq/commit/723e0c16d6ac923b8cc51312b2c2424366c0d915)) - Merge v2 branch - ([8180cbc](https://github.com/0x676e67/wreq/commit/8180cbcc4f60d3ab6916ad07df8f1354e230c39f)) - Improve Debug implementation ([#422](https://github.com/0x676e67/wreq/issues/422)) - ([566a33b](https://github.com/0x676e67/wreq/commit/566a33b3102b546f7f7c36161f4f98ae78bf2cb7)) - Fmt code - ([8b3c8f6](https://github.com/0x676e67/wreq/commit/8b3c8f6b1f5e19400ae33fdce85e3169d98c80ba)) - Simplified error qualifier types ([#412](https://github.com/0x676e67/wreq/issues/412)) - ([35b4347](https://github.com/0x676e67/wreq/commit/35b4347a35453b531f8339a9efe62b80a0ecd164)) - Rename `Proxies` internal fields - ([dfe4a00](https://github.com/0x676e67/wreq/commit/dfe4a00c505dcd7ec5802b51dd685f25e6559831)) - Update docs - ([6eb42e8](https://github.com/0x676e67/wreq/commit/6eb42e83452aab5d7921c56d7c1120cad676d805)) - Move `http1`/`http2` config to `conf` mod - ([592038f](https://github.com/0x676e67/wreq/commit/592038ff1468ad0a59aff1057410c6cffc8d6e04)) - Update client docs - ([6a35a0a](https://github.com/0x676e67/wreq/commit/6a35a0aa8ea2ccd4483b160ee1a19f97b539c7c8)) - Fix `AlpnProtos` non upper case globals warning - ([265d938](https://github.com/0x676e67/wreq/commit/265d9388ae524fbed133136f114835f5175b9bd0)) - Fix non upper case globals name - ([af02660](https://github.com/0x676e67/wreq/commit/af02660acffa86d48f0246d75de3e291869e86f6)) - Remove dead code - ([00e939a](https://github.com/0x676e67/wreq/commit/00e939ac1a68950131713575d3eae60d1a1b621c)) - Fmt code - ([096eef0](https://github.com/0x676e67/wreq/commit/096eef07bea970ef4fff57073e456c8269b992a6)) - Fmt imports ([#388](https://github.com/0x676e67/wreq/issues/388)) - ([d73d1ac](https://github.com/0x676e67/wreq/commit/d73d1ac0dde1faeda4186aa17051849067e48c63)) - Fmt code - ([05a9d40](https://github.com/0x676e67/wreq/commit/05a9d406b6bf2beb8066994fcc7269a01f900183)) - Fmt code - ([ff3ad03](https://github.com/0x676e67/wreq/commit/ff3ad037e5ad4ca83d1928631a9d88d754ef1cb1)) - Clippy fix - ([895db54](https://github.com/0x676e67/wreq/commit/895db54492677791693f760b6498d4b1eb9b619b)) - Update websocket examples - ([4eefefd](https://github.com/0x676e67/wreq/commit/4eefefd464d4d0580651fdbe38c832d3f53b1e59)) - Improved WebSocket protocols handler ([#370](https://github.com/0x676e67/wreq/issues/370)) - ([2abe066](https://github.com/0x676e67/wreq/commit/2abe06620c5de829db87ce8e7589d9864aa6d2ec)) ### Build - *(deps)* Update windows-registry requirement from 0.4.0 to 0.5.0 ([#471](https://github.com/0x676e67/wreq/issues/471)) - ([288e33a](https://github.com/0x676e67/wreq/commit/288e33aac4cbf0b3d6b51df38eb88952778eb447)) - *(deps)* Update boring requirement from 4.15.7 to 4.15.8 ([#468](https://github.com/0x676e67/wreq/issues/468)) - ([3488f17](https://github.com/0x676e67/wreq/commit/3488f17e9019735af1ec934027c1ec7c8bd28780)) - *(deps)* Update boring requirement from 4.15.5 to 4.15.6 - ([04659bb](https://github.com/0x676e67/wreq/commit/04659bbae0f4ded2e4a0f45f69e69c23da2f7e8d)) - *(deps)* Update boring requirement from 4.15.3 to 4.15.5 ([#437](https://github.com/0x676e67/wreq/issues/437)) - ([b172177](https://github.com/0x676e67/wreq/commit/b1721771a8f1cfa5af7aa9006484b9bfd1c2fff2)) - *(deps)* Update boring requirement from 4.15.2 to 4.15.3 ([#425](https://github.com/0x676e67/wreq/issues/425)) - ([aff379e](https://github.com/0x676e67/wreq/commit/aff379e045dc1c8bda0eeec9d091c08e9f5db86b)) - *(deps)* Apple platform dependencies are minimized as much as possible ([#414](https://github.com/0x676e67/wreq/issues/414)) - ([858d911](https://github.com/0x676e67/wreq/commit/858d91196299e9a8f2851981d50b5421b530b580)) - *(deps)* MacOS platform dependency is minimized ([#413](https://github.com/0x676e67/wreq/issues/413)) - ([f85c7ee](https://github.com/0x676e67/wreq/commit/f85c7ee337a74ef2686a0cc01870cc05eee031fc)) - *(deps)* Update brotli requirement from 6.0.0 to 7.0.0 ([#401](https://github.com/0x676e67/wreq/issues/401)) - ([50614a7](https://github.com/0x676e67/wreq/commit/50614a74a02991124cf0a20ba09de993b79e1223)) - *(deps)* Update lru requirement from 0.12 to 0.13 ([#393](https://github.com/0x676e67/wreq/issues/393)) - ([b3cda7d](https://github.com/0x676e67/wreq/commit/b3cda7d7f9efd9b7c35a5cd0c5a8a8588bb54897)) - *(feature)* `apple-bindable-device` rename to `apple-network-device-binding` ([#426](https://github.com/0x676e67/wreq/issues/426)) - ([05a1adb](https://github.com/0x676e67/wreq/commit/05a1adb626a0614fd13a04fbeb7ae3d5304e4d8b)) - Fix no default feature build - ([8ed417d](https://github.com/0x676e67/wreq/commit/8ed417df8fbbb14ec9f319219d6ca750200bd192)) - Visualize macro conditional compilation ([#415](https://github.com/0x676e67/wreq/issues/415)) - ([01f1387](https://github.com/0x676e67/wreq/commit/01f138738785dd1391a06d1ff015ea7eacc727c1)) - Update compilation guide ([#395](https://github.com/0x676e67/wreq/issues/395)) - ([96c75a4](https://github.com/0x676e67/wreq/commit/96c75a4be224d2be0275d101d43eb219489d7494)) ### Deps - *(ipnet)* Bump version to v2.11.0 ([#390](https://github.com/0x676e67/wreq/issues/390)) - ([2022b25](https://github.com/0x676e67/wreq/commit/2022b256d1d88dd991a3ed48f7c4678eb0f60f7c)) - *(tokio)* Remove unused `rt` feature ([#389](https://github.com/0x676e67/wreq/issues/389)) - ([545e245](https://github.com/0x676e67/wreq/commit/545e2456db7353b2909c85d9b3186dbe6d8100e2)) ### Workflow - Update workflows check - ([321fba2](https://github.com/0x676e67/wreq/commit/321fba2939253f51637b5b18dd1dfc9990dc0d2d)) ## New Contributors ❤️ * @tahmid-23 made their first contribution in [#423](https://github.com/0x676e67/wreq/pull/423) ## [2.0.3](https://github.com/0x676e67/wreq/compare/v2.0.2..v2.0.3) - 2025-01-25 ### Documentation - Enhance documentation for `ImpersonateBuilder` methods ([#367](https://github.com/0x676e67/wreq/issues/367)) - ([d0dd33f](https://github.com/0x676e67/wreq/commit/d0dd33f22325b16138d743b03a39674daf8d89c8)) ### Miscellaneous Tasks - Update examples ([#368](https://github.com/0x676e67/wreq/issues/368)) - ([477e864](https://github.com/0x676e67/wreq/commit/477e864673d5e684070b54f44b48896760a05ef5)) ## [2.0.2](https://github.com/0x676e67/wreq/compare/v2.0.1..v2.0.2) - 2025-01-25 ### Features - Add implementations for `IntoCertCompressionAlgorithm` ([#363](https://github.com/0x676e67/wreq/issues/363)) - ([3e09a3f](https://github.com/0x676e67/wreq/commit/3e09a3f5fbea1f0a400ab3eaf9ca9832c4d595a4)) - Expose `ClientMut` as public API ([#362](https://github.com/0x676e67/wreq/issues/362)) - ([455cf51](https://github.com/0x676e67/wreq/commit/455cf51ba37c10a57f00ad6310f87aae8d3f2af3)) ### Refactor - Simplify `IntoStreamDependency` implementations using macros ([#364](https://github.com/0x676e67/wreq/issues/364)) - ([9322f05](https://github.com/0x676e67/wreq/commit/9322f0594d0b1cf74bef110bdd113c7267ae1707)) ### Miscellaneous Tasks - Remove unnecessary type conversions - ([9d9bb4f](https://github.com/0x676e67/wreq/commit/9d9bb4fce39f3f6c7b6cbf24e06041a714ec1898)) ## [2.0.1](https://github.com/0x676e67/wreq/compare/v2.0.0..v2.0.1) - 2025-01-24 ### Features - Implement `IntoStreamDependency` for tuple and `StreamDependency` ([#359](https://github.com/0x676e67/wreq/issues/359)) - ([d7724f7](https://github.com/0x676e67/wreq/commit/d7724f753e4375a68603ee781be0f010bb329de9)) ### Documentation - Update performance information - ([2cb8a46](https://github.com/0x676e67/wreq/commit/2cb8a4689422c8cddf51f09620d699f56e9d8111)) ### Miscellaneous Tasks - Update owner ([#358](https://github.com/0x676e67/wreq/issues/358)) - ([4ee1438](https://github.com/0x676e67/wreq/commit/4ee143824e5726a8bfaf1bcec14c2d59802ad71d)) ## [2.0.0](https://github.com/0x676e67/wreq/compare/v2.0.0-rc.1..v2.0.0) - 2025-01-23 ### Testing - *(badssl)* Update cipher list - ([6b01366](https://github.com/0x676e67/wreq/commit/6b0136632b5241fad5fcb9620c54eac98f237ee9)) ### Miscellaneous Tasks - *(tls)* Load and wrap the certificate into `RootCertStore` ([#356](https://github.com/0x676e67/wreq/issues/356)) - ([adddada](https://github.com/0x676e67/wreq/commit/adddada9037b09ccb38a6eeea67f7adac328a38c)) - *(tls)* Move `tls/ext/cert` to `tls/cert` ([#355](https://github.com/0x676e67/wreq/issues/355)) - ([eae2d93](https://github.com/0x676e67/wreq/commit/eae2d9364063ab5585b34e137eedb90fb5da18dd)) - Move macros to lib mod ([#354](https://github.com/0x676e67/wreq/issues/354)) - ([6209589](https://github.com/0x676e67/wreq/commit/6209589bdd23cf38227745100c43d744f0c030b8)) ## [2.0.0-rc.1](https://github.com/0x676e67/wreq/compare/v1.5.0..v2.0.0-rc.1) - 2025-01-22 ### Features - *(mimic)* Added possibility to choose Client and OS to impersonate ([#290](https://github.com/0x676e67/wreq/issues/290)) - ([63cb5c5](https://github.com/0x676e67/wreq/commit/63cb5c53a735f172114afcab6c816762faedd934)) - Rename `RootCertsStore` to `RootCertStore` ([#353](https://github.com/0x676e67/wreq/issues/353)) - ([152142f](https://github.com/0x676e67/wreq/commit/152142f00caf25b6d9c198155f417a84a6eead90)) - `Impersonate`/`ImpersonateOS` impl serde ([#352](https://github.com/0x676e67/wreq/issues/352)) - ([98c61c8](https://github.com/0x676e67/wreq/commit/98c61c885478f1d0d1f81ae1f9cff75bbbe0e95e)) - Add tests for `3DES` and `DH2048` cipher support ([#351](https://github.com/0x676e67/wreq/issues/351)) - ([bd73ddc](https://github.com/0x676e67/wreq/commit/bd73ddcb58bcfb936297cd338c8be589d2ce8c95)) - Remove impersonate from str feature ([#350](https://github.com/0x676e67/wreq/issues/350)) - ([96387ec](https://github.com/0x676e67/wreq/commit/96387ec22c009883f1486e3c09586cbbc7f94477)) - Add `read_timeout` option with override support in Request ([#334](https://github.com/0x676e67/wreq/issues/334)) - ([5d115a5](https://github.com/0x676e67/wreq/commit/5d115a5b5145213d3ec9f8408d88609aa43bf00a)) - Disable boring module exports - ([bb63196](https://github.com/0x676e67/wreq/commit/bb631960f9326a1c60e3300fd7f2425af1faef4b)) - Disable boring module exports ([#319](https://github.com/0x676e67/wreq/issues/319)) - ([7d30324](https://github.com/0x676e67/wreq/commit/7d3032433b561c0452c7b22a6fc5d5ba2ca37e84)) - Remove internal headers cache ([#318](https://github.com/0x676e67/wreq/issues/318)) - ([846ad15](https://github.com/0x676e67/wreq/commit/846ad15348c5a7767a3c3c6d971a0a6e430b24e6)) - Send `json` to avoid repeated query of `CONTENT_TYPE` ([#311](https://github.com/0x676e67/wreq/issues/311)) - ([bd2c519](https://github.com/0x676e67/wreq/commit/bd2c519156c66482ddd34b8aa4bf50fd36d3a213)) ### Bug Fixes - *(network)* Fix `NetworkScheme` debug format ([#332](https://github.com/0x676e67/wreq/issues/332)) - ([d0df934](https://github.com/0x676e67/wreq/commit/d0df93457dd100e75ffbf4fb8b61581cd24d79f6)) ### Refactor - Refactor client and impersonate configurations ([#321](https://github.com/0x676e67/wreq/issues/321)) - ([513f196](https://github.com/0x676e67/wreq/commit/513f1962503c32cdfeb748780cca26d3965be840)) - Simplify client internal settings ([#320](https://github.com/0x676e67/wreq/issues/320)) - ([b7763cf](https://github.com/0x676e67/wreq/commit/b7763cf75e01b119cf96cd8cc02bb52888295052)) ### Documentation - *(websocket)* Update docs - ([5028926](https://github.com/0x676e67/wreq/commit/5028926e889c38ac72c36e1c4cad79926efc07cb)) - Update network scheme docs - ([2ae744c](https://github.com/0x676e67/wreq/commit/2ae744cb185c2fbb512b72ac1d607c4be11408b1)) - Update `Client` docs - ([8af9f1a](https://github.com/0x676e67/wreq/commit/8af9f1ad4e07ca62f9ea1bbf2c9e54d82869da0a)) ### Performance - Improve network scheme to avoid unnecessary clone ([#333](https://github.com/0x676e67/wreq/issues/333)) - ([a1cb889](https://github.com/0x676e67/wreq/commit/a1cb88944ea6d537349f4d5d3af50f00bb6beaa6)) ### Styling - Destructive updates, standard naming style ([#315](https://github.com/0x676e67/wreq/issues/315)) - ([247a26f](https://github.com/0x676e67/wreq/commit/247a26f1b883f4ebe95e4df1815e44472387b317)) - Format code style - ([bd1a837](https://github.com/0x676e67/wreq/commit/bd1a83742e35a88e83c1e7d05f8b74080e67025d)) - Format code style ([#314](https://github.com/0x676e67/wreq/issues/314)) - ([509977f](https://github.com/0x676e67/wreq/commit/509977f22846d8f22ad0b9588dbb1f4272121143)) ### Miscellaneous Tasks - *(http)* Fmt code - ([d66b156](https://github.com/0x676e67/wreq/commit/d66b156a2a21d29c4d4f1c02cd04fa8f44feb72c)) - *(rewin)* Inline hotspot code - ([23cc53b](https://github.com/0x676e67/wreq/commit/23cc53b04f1825d0a729aeedd9dc93bcaebe0561)) - *(rt)* Inline hotspot code - ([8cd9199](https://github.com/0x676e67/wreq/commit/8cd9199ea680c59bcbc4681cec8e8a962b37e37f)) - Optional enable http2 tracing ([#335](https://github.com/0x676e67/wreq/issues/335)) - ([83918e1](https://github.com/0x676e67/wreq/commit/83918e1dcc1922a1989b7a5f0070081b0efe3c49)) - Fmt code - ([2feee9c](https://github.com/0x676e67/wreq/commit/2feee9c1da1004530f563a30bfd6e43eb88bd7c0)) - Simplify dependency version settings - ([f4f1e76](https://github.com/0x676e67/wreq/commit/f4f1e761166887b12cc192a22c29d685eb4046eb)) - Update examples - ([dece4f0](https://github.com/0x676e67/wreq/commit/dece4f093c5842b5387f0ab2da9aa2bff27db699)) - Format code - ([85b6795](https://github.com/0x676e67/wreq/commit/85b67951cee90ad3a98a9fceafd5382728c3a98f)) - Fmt code - ([269d11d](https://github.com/0x676e67/wreq/commit/269d11dfe3356ac97ed73d31f4690417ad3f3a65)) ### Deps - *(boring2)* Pin 4.13.0 version ([#331](https://github.com/0x676e67/wreq/issues/331)) - ([9272524](https://github.com/0x676e67/wreq/commit/9272524fc73e6a32a682e00bec39ff1474ed1703)) - *(hyper2)* Pin 1.5.0 version ([#330](https://github.com/0x676e67/wreq/issues/330)) - ([a638cd3](https://github.com/0x676e67/wreq/commit/a638cd3a2c248f9bb3eb39f5a077da1b2610e7d9)) - *(tower)* Pin version v0.5.2 - ([0973fef](https://github.com/0x676e67/wreq/commit/0973fefe13bd2d8656a0d5ca66bba8f398eed0f9)) - *(tower-layer)* Remove unused deps ([#322](https://github.com/0x676e67/wreq/issues/322)) - ([e446b61](https://github.com/0x676e67/wreq/commit/e446b61015076209c8b882bb01b2d92eda54cc2e)) ### Workflows - *(linux)* Remove unused deps install - ([4fe26e8](https://github.com/0x676e67/wreq/commit/4fe26e8d7fcbf3dcbabae77d51f4ca37be15573e)) - Add `rc` version check - ([708e77b](https://github.com/0x676e67/wreq/commit/708e77b697b546bb59b8b777b51a65dc88c9da24)) ## New Contributors ❤️ * @bkn9hs made their first contribution in [#328](https://github.com/0x676e67/wreq/pull/328) * @UwUDev made their first contribution in [#290](https://github.com/0x676e67/wreq/pull/290) ## [1.5.0](https://github.com/0x676e67/wreq/compare/v1.3.6..v1.5.0) - 2025-01-11 ### Features - *(client)* Add chain settings of client - ([42b08a1](https://github.com/0x676e67/wreq/commit/42b08a15c669573b6e955967e9218b20ee869960)) - *(client)* Optional cross-origin redirect proxy authentication ([#304](https://github.com/0x676e67/wreq/issues/304)) - ([fcdac5d](https://github.com/0x676e67/wreq/commit/fcdac5d643e65e53597a9d7de6a21bffddb6032c)) - *(client)* Expose default headers as public API ([#296](https://github.com/0x676e67/wreq/issues/296)) - ([00e4199](https://github.com/0x676e67/wreq/commit/00e419908cc16376015be20ffc426a57ec327b40)) - *(multipart)* Expose a Form::into_stream() method on async multipart forms ([#303](https://github.com/0x676e67/wreq/issues/303)) - ([f46563f](https://github.com/0x676e67/wreq/commit/f46563f294239bd6924ca4d01ee9c3a07df8a515)) - *(proxy)* Remove system proxy cache ([#309](https://github.com/0x676e67/wreq/issues/309)) - ([7992c93](https://github.com/0x676e67/wreq/commit/7992c9321979d2f61bc96bbb54a84248a1bb566b)) - *(tls)* Optional disable SSL renegotiation ([#306](https://github.com/0x676e67/wreq/issues/306)) - ([c9c0dd3](https://github.com/0x676e67/wreq/commit/c9c0dd301301003e206ff9f3230532b879e2c994)) ### Bug Fixes - Fix `Request` `try_clone` missing variables ([#301](https://github.com/0x676e67/wreq/issues/301)) - ([ca1c0fa](https://github.com/0x676e67/wreq/commit/ca1c0fa19c8d15b153e5e021f851e73c1489f23f)) ### Refactor - *(websocket)* Change parameters to `Cow` types for improved flexibility ([#298](https://github.com/0x676e67/wreq/issues/298)) - ([aff5af9](https://github.com/0x676e67/wreq/commit/aff5af9a6ab7e64269d7b113fe42b1c40325282f)) - Rename mod `scheme` with `network` - ([dceb375](https://github.com/0x676e67/wreq/commit/dceb37573b65ac172d367b8a5bcd3dd891a34431)) ### Documentation - *(tls)* Update docs - ([f7b564b](https://github.com/0x676e67/wreq/commit/f7b564b4ed115a67a3db5c260a53f93bf27bcb48)) ### Performance - *(pool)* Reduce lock scope to decrease contention ([#308](https://github.com/0x676e67/wreq/issues/308)) - ([6b0c27c](https://github.com/0x676e67/wreq/commit/6b0c27ce0b6d6bb123dde3fc114496b37ad3536f)) ### Miscellaneous Tasks - *(websocket)* Simplify URL scheme matching and error handling logic ([#302](https://github.com/0x676e67/wreq/issues/302)) - ([901b397](https://github.com/0x676e67/wreq/commit/901b397c87dfffaf80e250492d6c3b73022066f4)) - *(websocket)* Remove deprecated function ([#297](https://github.com/0x676e67/wreq/issues/297)) - ([427edf6](https://github.com/0x676e67/wreq/commit/427edf6e5dbaa0969239bf6073d4c5a4d56baf7a)) - Annotating default values ​​improves maintainability - ([a043290](https://github.com/0x676e67/wreq/commit/a043290c1e925a002cbbf4c6d2848a6e3073a909)) - Update websocket bad url handler - ([38eee48](https://github.com/0x676e67/wreq/commit/38eee48b0948c95cd1e3f24eb66284f787545ad0)) - Add `#[inline]` to `cookie_store_mut` - ([6fc11c5](https://github.com/0x676e67/wreq/commit/6fc11c5f4ad81ded8d37cff685e79476b603a888)) - Simplify template macro usage for platform-specific config ([#299](https://github.com/0x676e67/wreq/issues/299)) - ([675f198](https://github.com/0x676e67/wreq/commit/675f1985acf54eb27834393e80e3b0fa2c170aca)) ### Build - *(deps)* Update windows-registry requirement from 0.3.0 to 0.4.0 ([#295](https://github.com/0x676e67/wreq/issues/295)) - ([5a6fab4](https://github.com/0x676e67/wreq/commit/5a6fab4f3a50765afc155f1641cd2558af5c8693)) - *(deps)* Update env_logger requirement from 0.10.0 to 0.11.6 ([#294](https://github.com/0x676e67/wreq/issues/294)) - ([a483462](https://github.com/0x676e67/wreq/commit/a483462cd97e6ebf6a6df932b39c44578b48bfb8)) - Fix conditional compilation ([#307](https://github.com/0x676e67/wreq/issues/307)) - ([358a6ec](https://github.com/0x676e67/wreq/commit/358a6ecec2e59bb91ac962ffe7423041b1cb5ce4)) ## [1.3.6](https://github.com/0x676e67/wreq/compare/v1.3.5..v1.3.6) - 2025-01-08 ### Features - *(websocket)* Add `with_builder` method to modify request builder before sending ([#288](https://github.com/0x676e67/wreq/issues/288)) - ([ff9e9f2](https://github.com/0x676e67/wreq/commit/ff9e9f2cb5f1817c6b0187aaa6095a87e386a3d2)) - Support `Apple` devices to bind device interface ([#293](https://github.com/0x676e67/wreq/issues/293)) - ([a71a460](https://github.com/0x676e67/wreq/commit/a71a46065b4f96200decc47891333ce699631b3f)) ### Bug Fixes - *(test)* Resolve test failures due to invalid upstream certificate site - ([1897e3a](https://github.com/0x676e67/wreq/commit/1897e3aa51b38f032bf246f57e04df3e3aa5f434)) ### Performance - *(pool)* Reduce `Dst` cloning overhead with `Arc` for `PoolKey` ([#289](https://github.com/0x676e67/wreq/issues/289)) - ([1946826](https://github.com/0x676e67/wreq/commit/194682691d448d1196cf37a34b3e89a3a4af76e9)) ### Testing - *(connector-layer)* Sync upstream connector layers tests ([#285](https://github.com/0x676e67/wreq/issues/285)) - ([9d772f0](https://github.com/0x676e67/wreq/commit/9d772f03cac1c9679afe134fb8e5926df1db199b)) ### Miscellaneous Tasks - Remove unused crate path prefix - ([d0ca971](https://github.com/0x676e67/wreq/commit/d0ca971ca58b93c3d1a1f90174a7abd633404eda)) - Sync upstream `From> for Response` - ([954a807](https://github.com/0x676e67/wreq/commit/954a80789bc4fb69fefaa74a2db19767fe2f5bce)) - Fmt code - ([f3aeb61](https://github.com/0x676e67/wreq/commit/f3aeb61a72943abb33ce33bb1824d46545c3230b)) - Improved type convert ([#284](https://github.com/0x676e67/wreq/issues/284)) - ([7ab1f2f](https://github.com/0x676e67/wreq/commit/7ab1f2f25734b9af78607b66e0406d644c39fb49)) ### Revert - Remove `From> for Response` ([#282](https://github.com/0x676e67/wreq/issues/282)) - ([1e69245](https://github.com/0x676e67/wreq/commit/1e69245677517daaa8ec10ca64d347457925cb38)) ## New Contributors ❤️ * @honeyspoon made their first contribution in [#282](https://github.com/0x676e67/wreq/pull/282) ## [1.3.5](https://github.com/0x676e67/wreq/compare/v1.3.3..v1.3.5) - 2025-01-06 ### Features - *(multipart)* Sync upstream file multipart ([#278](https://github.com/0x676e67/wreq/issues/278)) - ([49a3f06](https://github.com/0x676e67/wreq/commit/49a3f06c40942c8b0a600058e769c21dc9d7200a)) - *(request)* Insert header differentiates between append and overwrite ([#274](https://github.com/0x676e67/wreq/issues/274)) - ([c0026ca](https://github.com/0x676e67/wreq/commit/c0026caaa69ead0d42efba051308c87be21f4ab7)) - *(request)* Add general HTTP authentication method ([#270](https://github.com/0x676e67/wreq/issues/270)) - ([5c3facb](https://github.com/0x676e67/wreq/commit/5c3facb9c575658b2171e154b8386d54921b0af6)) ### Bug Fixes - *(redirect)* Fix redirect test - ([9f4bd3f](https://github.com/0x676e67/wreq/commit/9f4bd3fc241aaec158b4cd4e7377fb959459f9c6)) - *(test)* Fix proxy test - ([475752e](https://github.com/0x676e67/wreq/commit/475752e49e438ab3100c9e54082ea9b18bfdb33a)) - *(timeout)* Fix timeout test - ([0bf0422](https://github.com/0x676e67/wreq/commit/0bf0422a6b950e9c72ad642927a1781531f17e03)) - Fix migration hyper1 missing `TokioTimer` ([#275](https://github.com/0x676e67/wreq/issues/275)) - ([a2e8b47](https://github.com/0x676e67/wreq/commit/a2e8b47a80a3272bc621a7d83fd7c8262be6a6d1)) ### Documentation - Update `http2`/`network` docs ([#273](https://github.com/0x676e67/wreq/issues/273)) - ([5edaa93](https://github.com/0x676e67/wreq/commit/5edaa9311c255ceb1204c7bb6c90d2f716f4628b)) ### Testing - *(timeout)* Ignore the test in Tunnel VPN environment ([#279](https://github.com/0x676e67/wreq/issues/279)) - ([156fd1b](https://github.com/0x676e67/wreq/commit/156fd1b6b4f2b8a495dc6b446bd612881bacf3a5)) - Ignore doc test ([#276](https://github.com/0x676e67/wreq/issues/276)) - ([5275c6b](https://github.com/0x676e67/wreq/commit/5275c6b1eee50108061682758d67524c7a40547f)) - Remove unused wasm test - ([25166c9](https://github.com/0x676e67/wreq/commit/25166c977aceb05e752d7b973af6ef3a72cbca4e)) ### Miscellaneous Tasks - *(cookie)* Use `RwLock` types that do not poison themselves ([#268](https://github.com/0x676e67/wreq/issues/268)) - ([dcbd79d](https://github.com/0x676e67/wreq/commit/dcbd79dd324483442ccb715ac277b7ec82be93d3)) - Add all features tests - ([138c43a](https://github.com/0x676e67/wreq/commit/138c43aacb7d753c1ebde15effa6a457a8260dd1)) - Sync upstream tests - ([b782282](https://github.com/0x676e67/wreq/commit/b78228289d86fb93c1e301bf5b367a0f698b15d8)) - Remove unused feature - ([668009d](https://github.com/0x676e67/wreq/commit/668009d641294f8ad227083318447455f3995c00)) - Cargo clippy fix all-features - ([1e45f60](https://github.com/0x676e67/wreq/commit/1e45f60d23d8d03a0567ba2c9bb0b1e414714b4e)) - Remove unused code - ([aa427f5](https://github.com/0x676e67/wreq/commit/aa427f5ecf01762c5cd45ae1690f6654eb20dc46)) ### Build - Fix linux build ([#277](https://github.com/0x676e67/wreq/issues/277)) - ([014e026](https://github.com/0x676e67/wreq/commit/014e02647a4c1f2264f7151576c7350425e59cb7)) ### Deps - Replace `futures_core` with `futures_util` ([#269](https://github.com/0x676e67/wreq/issues/269)) - ([ce9ac8d](https://github.com/0x676e67/wreq/commit/ce9ac8d36ba901b3271ddb879dc34bc65e1dd723)) ## [1.3.3](https://github.com/0x676e67/wreq/compare/v1.3.2..v1.3.3) - 2025-01-05 ### Features - *(mimic)* Add Tor browser `Firefox 128` mimic ([#267](https://github.com/0x676e67/wreq/issues/267)) - ([f69f660](https://github.com/0x676e67/wreq/commit/f69f6605de49c13f44006355d31ad9abaac3e060)) - *(mimic)* Optional mimic http2 ([#262](https://github.com/0x676e67/wreq/issues/262)) - ([6e44e17](https://github.com/0x676e67/wreq/commit/6e44e17695f91336a19b69cd0ec12843d9a8ca7a)) ### Miscellaneous Tasks - Simplify http2 configuration - ([34700d1](https://github.com/0x676e67/wreq/commit/34700d1ccae4977f2a0a5b34cd4e9a10b68d6ecc)) ### Deps - *(pool)* Replace `futures_channel::mpsc` with `tokio::sync::mpsc` in Hyper ([#264](https://github.com/0x676e67/wreq/issues/264)) - ([f4895fb](https://github.com/0x676e67/wreq/commit/f4895fb8dbb47d7d10563259a500aae57fcf7bb6)) ## [1.3.2](https://github.com/0x676e67/wreq/compare/v1.3.0..v1.3.2) - 2025-01-04 ### Miscellaneous Tasks - Fix typo - ([0a095ce](https://github.com/0x676e67/wreq/commit/0a095cef2ff9443898c11531be32aa18984a10e2)) - Rename and update access scope - ([607da50](https://github.com/0x676e67/wreq/commit/607da5005d9e2020582d961e0f0906b90b658681)) ## [1.3.0](https://github.com/0x676e67/wreq/compare/v1.2.6..v1.3.0) - 2025-01-04 ### Refactor - *(tls)* Refactor Application-layer protocol settings ([#260](https://github.com/0x676e67/wreq/issues/260)) - ([bc8b824](https://github.com/0x676e67/wreq/commit/bc8b8246779509209077506511ad2e8ccd580ba5)) - Rename `HttpVersionPref` to `AlpnProtos` ([#258](https://github.com/0x676e67/wreq/issues/258)) - ([e99ec7a](https://github.com/0x676e67/wreq/commit/e99ec7a8aaf8047a726293099cedf8919bf622ba)) ### Documentation - *(tls)* Update docs - ([db3ee6c](https://github.com/0x676e67/wreq/commit/db3ee6c8418afabc05659c76626f775931537369)) - *(tls)* Update docs - ([ad389e5](https://github.com/0x676e67/wreq/commit/ad389e5c92327e41eb4a3aa239c63d17bd51ec9d)) - *(tls)* Update docs ([#261](https://github.com/0x676e67/wreq/issues/261)) - ([309e62f](https://github.com/0x676e67/wreq/commit/309e62f47bdd68b5f89cb41bcfa8629517a00e79)) ### Miscellaneous Tasks - *(mimic)* Always inline settings module - ([630e28f](https://github.com/0x676e67/wreq/commit/630e28f529baa21a2d5bf780be2003c3dfac6618)) - *(tls)* Always inline alps proto len - ([5b33bc5](https://github.com/0x676e67/wreq/commit/5b33bc560cf394ef8022a14acd2602307a7f9535)) - *(tls)* Cleaner bind calls - ([3ddbb64](https://github.com/0x676e67/wreq/commit/3ddbb64d0f2c7492fc1a6a9a8ff81f23f4e152d1)) - *(tls)* Renaming cumbersome API names - ([1021cb1](https://github.com/0x676e67/wreq/commit/1021cb10eb0338685b313cb606a1576153ad07cf)) - Improve verbose certificate configuration ([#256](https://github.com/0x676e67/wreq/issues/256)) - ([67eb333](https://github.com/0x676e67/wreq/commit/67eb333f965724cf1fd40c6314c274aa1ab08c72)) ## [1.2.6](https://github.com/0x676e67/wreq/compare/v1.2.5..v1.2.6) - 2025-01-03 ### Miscellaneous Tasks - *(tls/ext)* Clearer naming - ([a0f5e64](https://github.com/0x676e67/wreq/commit/a0f5e643dc55379b193e3d644038c79ef81c7a7b)) - Inline suggestions - ([978198d](https://github.com/0x676e67/wreq/commit/978198d4154c80052f7d889d99fbc6de2435a07b)) - Simplify method signatures - ([9bdc01d](https://github.com/0x676e67/wreq/commit/9bdc01d75cc8d767470cbacb09980792907d86f2)) - Internal request for redundant method boundary ([#253](https://github.com/0x676e67/wreq/issues/253)) - ([a252cd1](https://github.com/0x676e67/wreq/commit/a252cd1784c982b378da0afb32793684558326ac)) ### Pref - Build request failures return errors instead of panic ([#254](https://github.com/0x676e67/wreq/issues/254)) - ([1dbc67c](https://github.com/0x676e67/wreq/commit/1dbc67c1eed981da6c81f02f535df286f43c571a)) ## [1.2.5](https://github.com/0x676e67/wreq/compare/v1.2.1..v1.2.5) - 2025-01-02 ### Features - *(client)* Improved set cookie operation ([#252](https://github.com/0x676e67/wreq/issues/252)) - ([e94d742](https://github.com/0x676e67/wreq/commit/e94d74253a3f2b603c82db95343ceca3ec8ff812)) - *(tls)* Expose `CertCompressionAlgorithm` as public API ([#247](https://github.com/0x676e67/wreq/issues/247)) - ([0a6cbc6](https://github.com/0x676e67/wreq/commit/0a6cbc6660d3b3321d3df219bc5d807c2652c553)) - *(tls)* Expose `TlsExtension` as public API ([#246](https://github.com/0x676e67/wreq/issues/246)) - ([98a18b3](https://github.com/0x676e67/wreq/commit/98a18b347568ff20db485e78a577ac812c9be38f)) ### Bug Fixes - Align the cfg compilation with the socket2 ([#245](https://github.com/0x676e67/wreq/issues/245)) - ([3122a32](https://github.com/0x676e67/wreq/commit/3122a329f4bfc1acafd8b6b0ad323c6e23db29e5)) - Fix default TLS configuration hostname not set ([#244](https://github.com/0x676e67/wreq/issues/244)) - ([44b8216](https://github.com/0x676e67/wreq/commit/44b8216858fb1386ca1104b4d56234455e934e2d)) ### Refactor - Rename verbose identifiers for clarity - ([f1ebb79](https://github.com/0x676e67/wreq/commit/f1ebb7906f3f81e7047ad6bbc1387c12ccfe5ef5)) - Responsibility-based module division - ([c3129ca](https://github.com/0x676e67/wreq/commit/c3129cad6b7405b2c52d4750e337060e4c1175c3)) ### Documentation - Update docs ([#243](https://github.com/0x676e67/wreq/issues/243)) - ([18d8934](https://github.com/0x676e67/wreq/commit/18d89342d4194ab37f5dfe00a3ba65509bc4ff7a)) ### Performance - Improve HTTP request in HTTPS connector ([#242](https://github.com/0x676e67/wreq/issues/242)) - ([2a99fd4](https://github.com/0x676e67/wreq/commit/2a99fd4ed667a77a8f9fba9607372750202a5c70)) ### Miscellaneous Tasks - *(client)* Avoid explicit type declarations - ([44d22ef](https://github.com/0x676e67/wreq/commit/44d22ef2de58cbd92720505c216e7490498be36b)) - *(tls)* Simplify certificate loading configuration ([#249](https://github.com/0x676e67/wreq/issues/249)) - ([87275fc](https://github.com/0x676e67/wreq/commit/87275fc96d0cb6f7dee38f4945377a43d95ba377)) - Add build all features - ([1148155](https://github.com/0x676e67/wreq/commit/114815563c007d56f343d4d55e92005ce487f309)) - Some insignificant update - ([ad20677](https://github.com/0x676e67/wreq/commit/ad20677e88a0d13cec44f5e2690d0e0c9df506fa)) - Rename to - ([a97be9f](https://github.com/0x676e67/wreq/commit/a97be9fdaaf708adb4fc165c1ec8ba5cb11f4a47)) - Fix closure capture ownership - ([e0c55f0](https://github.com/0x676e67/wreq/commit/e0c55f0bd11a1061dfe9f7f422fada7e87cc08d9)) ## New Contributors ❤️ * @sudorf0 made their first contribution ## [1.2.1](https://github.com/0x676e67/wreq/compare/v1.2.0..v1.2.1) - 2024-12-31 ### Miscellaneous Tasks - Using normal array storage - ([3ce9040](https://github.com/0x676e67/wreq/commit/3ce9040e791ab31ea9a8992e9219c771e56863ca)) ## New Contributors ❤️ * @coutureone made their first contribution * @8176917 made their first contribution ## [1.2.0](https://github.com/0x676e67/wreq/compare/v1.1.2..v1.2.0) - 2024-12-31 ### Features - *(client)* Add HTTP2 `Priority` frame configuration ([#238](https://github.com/0x676e67/wreq/issues/238)) - ([8c75d75](https://github.com/0x676e67/wreq/commit/8c75d7507a35e6dd7ad7d045c7e5ae1e772598dd)) - Add `Firefox 117` impersonate ([#239](https://github.com/0x676e67/wreq/issues/239)) - ([cae2f6d](https://github.com/0x676e67/wreq/commit/cae2f6df217780ecaa4fd073ef12af597913e321)) ## [1.1.2](https://github.com/0x676e67/wreq/compare/v1.1.1..v1.1.2) - 2024-12-31 ### Features - Add verify hostname configuration ([#237](https://github.com/0x676e67/wreq/issues/237)) - ([3478e11](https://github.com/0x676e67/wreq/commit/3478e1110bc5d4819eec4d66bf2a09369199ca29)) ### Miscellaneous Tasks - Update comment - ([2252652](https://github.com/0x676e67/wreq/commit/22526524f0ccf36763fd2bd90a439c5e95efafd3)) ## [1.1.1](https://github.com/0x676e67/wreq/compare/v1.1.0..v1.1.1) - 2024-12-30 ### Bug Fixes - *(decoder)* Fix decoding extra empty frame ([#234](https://github.com/0x676e67/wreq/issues/234)) - ([d8118bc](https://github.com/0x676e67/wreq/commit/d8118bc3d141726d2f5e7a8232c8a07f5865efa2)) ### Performance - *(tls)* Use `Bytes` to optimize session key storage space ([#231](https://github.com/0x676e67/wreq/issues/231)) - ([1bd9db0](https://github.com/0x676e67/wreq/commit/1bd9db0d8aceb128a899ad5a0c0a651e10632b10)) - Improve unnecessary convert when setting cookies ([#233](https://github.com/0x676e67/wreq/issues/233)) - ([2720bc4](https://github.com/0x676e67/wreq/commit/2720bc4e231530825051faf945f67b4d6fe9bb06)) - `default_headers` will swap default headers ([#232](https://github.com/0x676e67/wreq/issues/232)) - ([3a737f0](https://github.com/0x676e67/wreq/commit/3a737f0eb5cdf40d178c72863a2148a2119b2cca)) ### Miscellaneous Tasks - Remove escape characters - ([0de340c](https://github.com/0x676e67/wreq/commit/0de340cbc495eacb733658e4f249797bda5f32b3)) - Remove unused import - ([ab0ea9c](https://github.com/0x676e67/wreq/commit/ab0ea9cffccaec71080898ed6fd8ad7432ad2dc3)) - Cargo clippy --fix - ([7c5369d](https://github.com/0x676e67/wreq/commit/7c5369dc4ee32cebaf7ecb77f946df541fa2eee9)) - Remove unused code - ([aa9c7d8](https://github.com/0x676e67/wreq/commit/aa9c7d872fff06e5abe3eb5ffbc98c80ca481930)) ## [1.1.0](https://github.com/0x676e67/wreq/compare/v1.0.1..v1.1.0) - 2024-12-27 ### Features - *(request)* Insert when `json`/`form` does not have `CONTENT_TYPE` header ([#230](https://github.com/0x676e67/wreq/issues/230)) - ([80c338a](https://github.com/0x676e67/wreq/commit/80c338a835ed9b7015bc63415a44905aa64c61b2)) - Without compression enabled, no compression header is sent ([#229](https://github.com/0x676e67/wreq/issues/229)) - ([79355d7](https://github.com/0x676e67/wreq/commit/79355d752334955eb27994f8e2c2acef9e828d66)) ### Bug Fixes - Username in URL plus basic_auth() results in two Authorization headers ([#228](https://github.com/0x676e67/wreq/issues/228)) - ([8398835](https://github.com/0x676e67/wreq/commit/8398835855dfd07fe162a8747b703a82aef4ee84)) ## [1.0.1](https://github.com/0x676e67/wreq/compare/v1.0.0..v1.0.1) - 2024-12-27 ### Miscellaneous Tasks - Cargo clippy --fix - ([389e32a](https://github.com/0x676e67/wreq/commit/389e32a05f97f6dcdbecf8235049da5ce8e37914)) - Update alpn protocol order ([#226](https://github.com/0x676e67/wreq/issues/226)) - ([d920df3](https://github.com/0x676e67/wreq/commit/d920df3a9bbf02678664f90fab2b815f49c9c067)) ## [1.0.0](https://github.com/0x676e67/wreq/compare/v1.0.0-rc.3..v1.0.0) - 2024-12-25 ### Features - *(client)* Add `no-keepalive` for `Client` ([#221](https://github.com/0x676e67/wreq/issues/221)) - ([20ac5bf](https://github.com/0x676e67/wreq/commit/20ac5bfc17712dc703e479c6e88ac071ae760bdd)) - Request specific `address`/`interface` override ([#223](https://github.com/0x676e67/wreq/issues/223)) - ([7ea06e1](https://github.com/0x676e67/wreq/commit/7ea06e1ac1b0073311596c643f1d92dbafeffa2b)) ### Miscellaneous Tasks - Argo clippy --fix - ([8d766f6](https://github.com/0x676e67/wreq/commit/8d766f6601503d7a2a2ad62e7d416c67ae6d46f8)) ## [1.0.0-rc.3](https://github.com/0x676e67/wreq/compare/v1.0.0-rc.2..v1.0.0-rc.3) - 2024-12-25 ### Features - Optional to enable impersonate customization ([#217](https://github.com/0x676e67/wreq/issues/217)) - ([f68de0b](https://github.com/0x676e67/wreq/commit/f68de0b6d5048014b83d887005a5c838f5eb1d31)) ### Performance - Avoiding Unnecessary Copies ([#219](https://github.com/0x676e67/wreq/issues/219)) - ([6f6c660](https://github.com/0x676e67/wreq/commit/6f6c6609aaf78d508d5e7184fd92ce99d6d0f70e)) ### Miscellaneous Tasks - *(util/clent)* Remove extra clones - ([72697ca](https://github.com/0x676e67/wreq/commit/72697ca2455487bf856ab256433b3b7779dea433)) - Fix clippy accidentally deleted code ([#220](https://github.com/0x676e67/wreq/issues/220)) - ([200e3f4](https://github.com/0x676e67/wreq/commit/200e3f4e487c8010a37c929c2ceefaf2dc61996d)) - Update macros ([#218](https://github.com/0x676e67/wreq/issues/218)) - ([2f977a1](https://github.com/0x676e67/wreq/commit/2f977a19196a67893b9dd4d74daf6b76632187fe)) - Remove unnecessary `Arc` wrapper from `redirect`/`base_url` ([#216](https://github.com/0x676e67/wreq/issues/216)) - ([3787346](https://github.com/0x676e67/wreq/commit/3787346539188082a8bf58536cf26baae32780e1)) ## [1.0.0-rc.2](https://github.com/0x676e67/wreq/compare/v1.0.0-rc.1..v1.0.0-rc.2) - 2024-12-24 ### Features - Allow pluggable tower layers in connector service stack ([#214](https://github.com/0x676e67/wreq/issues/214)) - ([4b07f13](https://github.com/0x676e67/wreq/commit/4b07f139570e3f072b68c654bfd5b29a5ea47341)) ### Bug Fixes - Propagate Body::size_hint when wrapping bodies ([#213](https://github.com/0x676e67/wreq/issues/213)) - ([e05a781](https://github.com/0x676e67/wreq/commit/e05a781a7b2be9a39cb6c9a8689c389e9a8f92ec)) ### Miscellaneous Tasks - Remove `clone` from `Dst` - ([9885d91](https://github.com/0x676e67/wreq/commit/9885d91c7cc199b4edfb1296581b27bda368b148)) - Remove `new` method for `InnerRequestBuilder` ([#212](https://github.com/0x676e67/wreq/issues/212)) - ([6b64a60](https://github.com/0x676e67/wreq/commit/6b64a6010c9ae3835427aace4c25ea25eaee4588)) - Cargo clippy --fix - ([908b284](https://github.com/0x676e67/wreq/commit/908b2842c27b3179f8f9509715c8a0ee46f0cb77)) ## [1.0.0-rc.1](https://github.com/0x676e67/wreq/compare/v0.33.5..v1.0.0-rc.1) - 2024-12-24 ### Features - *(body)* Improve interop with hyper for `Body` type - ([ef73639](https://github.com/0x676e67/wreq/commit/ef7363920143efec31b5400d0ea408699f1053e7)) - *(client)* Request specific proxy override ([#211](https://github.com/0x676e67/wreq/issues/211)) - ([a547b0e](https://github.com/0x676e67/wreq/commit/a547b0e4c11bdd9ce990af891eaaed9d1c004ab1)) - *(client)* Add impl `Service>` for `Client` ([#202](https://github.com/0x676e67/wreq/issues/202)) - ([88dcf59](https://github.com/0x676e67/wreq/commit/88dcf59056c16d8b6fc6bec3a082d1be1c4e3df7)) - *(client)* Export `http1`/`http2` Builder as public API - ([2ce96f6](https://github.com/0x676e67/wreq/commit/2ce96f6f61daa5a08a055ebfc05d4cf231126323)) - *(client)* Export `http1`/`http2` Builder as public API ([#199](https://github.com/0x676e67/wreq/issues/199)) - ([fb3d72b](https://github.com/0x676e67/wreq/commit/fb3d72b78deca6f51e201ab803a7e3644c9286a7)) - *(client)* Add the maximum safe retry count for HTTP/2 connections ([#196](https://github.com/0x676e67/wreq/issues/196)) - ([2f8ff8c](https://github.com/0x676e67/wreq/commit/2f8ff8ca783f1ef88950f391b29034aa03636cff)) - Support request setting HTTP override ALPN ([#188](https://github.com/0x676e67/wreq/issues/188)) - ([f3af980](https://github.com/0x676e67/wreq/commit/f3af9801761915ac2f031314e9d46ff31538050e)) - Hyper v1 upgrade ([#187](https://github.com/0x676e67/wreq/issues/187)) - ([3441ee7](https://github.com/0x676e67/wreq/commit/3441ee76640b3d9273e7b3617972ef683655cc3a)) ### Bug Fixes - *(http2)* Fix http2 header frame initial `stream_id` settings ([#185](https://github.com/0x676e67/wreq/issues/185)) - ([2f773be](https://github.com/0x676e67/wreq/commit/2f773be0da6e963ca823ddbe0e2d9583a8b62aa7)) - Fix http protocol auto-negotiation ([#189](https://github.com/0x676e67/wreq/issues/189)) - ([d144b63](https://github.com/0x676e67/wreq/commit/d144b6356a01b561d50f774243fa3555ab9d7b52)) ### Miscellaneous Tasks - *(pool)* Use `Mutex` types that do not poison themselves ([#192](https://github.com/0x676e67/wreq/issues/192)) - ([dec4d82](https://github.com/0x676e67/wreq/commit/dec4d8265356a065ff8a406344898ebc19895e71)) - *(tls)* Disable custom TLS builder ([#208](https://github.com/0x676e67/wreq/issues/208)) - ([bb12473](https://github.com/0x676e67/wreq/commit/bb12473723a73139226c6b4845acc85815b543c7)) - *(tls)* Compile-time calculation of extended permutation ([#207](https://github.com/0x676e67/wreq/issues/207)) - ([871ab3b](https://github.com/0x676e67/wreq/commit/871ab3bc4838842d60c300291e1c6c4f83d1b58c)) - Refactor connect network request extension ([#210](https://github.com/0x676e67/wreq/issues/210)) - ([f4e67ef](https://github.com/0x676e67/wreq/commit/f4e67ef76340c6b5d21944385339b723829c697a)) - By default, impersonate from a string is disabled ([#206](https://github.com/0x676e67/wreq/issues/206)) - ([35f7f11](https://github.com/0x676e67/wreq/commit/35f7f11c67638af54e79565d679d55068f162f7a)) - Removed TLS config examples to prevent misconfigurations by inexperienced users ([#205](https://github.com/0x676e67/wreq/issues/205)) - ([48d1f5b](https://github.com/0x676e67/wreq/commit/48d1f5b86a885a86a3be2af1694d6328b360f1f9)) - Disable the exposure of internal connect dst API ([#203](https://github.com/0x676e67/wreq/issues/203)) - ([35994c2](https://github.com/0x676e67/wreq/commit/35994c25ded24cfcb57877cf4e1b859e39b989f7)) - Remove unused code - ([663e346](https://github.com/0x676e67/wreq/commit/663e346bce7bfd0090374f99df1b6152ca7eb644)) - Remove unused code - ([0d4f06f](https://github.com/0x676e67/wreq/commit/0d4f06f7ab5769aa80828004f7da74df6a63afe9)) - Deleted permutation storage - ([39e1ef6](https://github.com/0x676e67/wreq/commit/39e1ef6ccd2382c8b9a00873341092df4876df7f)) - Use shorter feature name - ([4246a0f](https://github.com/0x676e67/wreq/commit/4246a0fd6d72ee68d0441de57b3c84f2e9c5b879)) - Remove dead code - ([f516b0a](https://github.com/0x676e67/wreq/commit/f516b0a85e48edfd08848fa1be3af7451fd2a7fd)) - Refactor connect layer detail handle ([#198](https://github.com/0x676e67/wreq/issues/198)) - ([eff1fee](https://github.com/0x676e67/wreq/commit/eff1fee3489f01d47bd406f8632a303863dc1522)) - Refactor connect mod - ([7ecbd25](https://github.com/0x676e67/wreq/commit/7ecbd25f2611161a539bd57e8d6b4945f6ab433a)) - Remove unused code - ([4ef7db6](https://github.com/0x676e67/wreq/commit/4ef7db685884d91aad7221753a9280f6fd1e5891)) - Cleaned up some unnecessary code ([#194](https://github.com/0x676e67/wreq/issues/194)) - ([1304ec1](https://github.com/0x676e67/wreq/commit/1304ec14e003c96a3d9815a43502d0d886e0ca61)) - Simplified TLS TCP stream abstraction ([#193](https://github.com/0x676e67/wreq/issues/193)) - ([273ca6c](https://github.com/0x676e67/wreq/commit/273ca6cdc732419703162a178526fa899db9087c)) - Remove unused code ([#191](https://github.com/0x676e67/wreq/issues/191)) - ([d586d56](https://github.com/0x676e67/wreq/commit/d586d563add0343cd4172974afc035b563c1897a)) - Cargo fmt --all - ([6a114f9](https://github.com/0x676e67/wreq/commit/6a114f974593e95cb21b917f54716b779a4a41d3)) - Static calc extension permutation ([#184](https://github.com/0x676e67/wreq/issues/184)) - ([1da5d42](https://github.com/0x676e67/wreq/commit/1da5d42ebbcff2eaf304dacd58e90f9b6412023f)) - Macros simplify some debug implement ([#183](https://github.com/0x676e67/wreq/issues/183)) - ([5a92fa5](https://github.com/0x676e67/wreq/commit/5a92fa58714b635c4cbc53299b8b49b9b9d11155)) - Remove dead code ([#182](https://github.com/0x676e67/wreq/issues/182)) - ([65391fb](https://github.com/0x676e67/wreq/commit/65391fb83729bcfd39ef548ebd9d24218c86c4f3)) ### Deps - *(tokio-util)* V0.7.0 ([#190](https://github.com/0x676e67/wreq/issues/190)) - ([303abf6](https://github.com/0x676e67/wreq/commit/303abf64952d97aabd21243b9824c9d345c25343)) ## New Contributors ❤️ * @invalid-email-address made their first contribution ## [0.33.5](https://github.com/0x676e67/wreq/compare/v0.33.3..v0.33.5) - 2024-12-19 ### Features - *(client)* Http1 sends lowercase request headers by default to improve performance ([#179](https://github.com/0x676e67/wreq/issues/179)) - ([b296e0e](https://github.com/0x676e67/wreq/commit/b296e0eab4b4213516830471cf1b42de2481049f)) - Add `firefox 133` impersonate ([#181](https://github.com/0x676e67/wreq/issues/181)) - ([6710421](https://github.com/0x676e67/wreq/commit/6710421bc53916f6762053e27f1103e7f54cdd06)) ## [0.33.3](https://github.com/0x676e67/wreq/compare/v0.33.1..v0.33.3) - 2024-12-16 ### Bug Fixes - *(proxy)* Fix `ws`/`wss` upgrade support for `http`/`https` proxy ([#176](https://github.com/0x676e67/wreq/issues/176)) - ([8c3881c](https://github.com/0x676e67/wreq/commit/8c3881c87a7cbfb91701f37eb697c04b2863649d)) ## [0.33.1](https://github.com/0x676e67/wreq/compare/v0.33.0..v0.33.1) - 2024-12-16 ### Miscellaneous Tasks - Avoiding setup bloat when customizing your DNS resolver ([#174](https://github.com/0x676e67/wreq/issues/174)) - ([bc870c5](https://github.com/0x676e67/wreq/commit/bc870c542710ec548c2292ba3440490357b76e33)) - Show clear errors when TLS connector build fails ([#173](https://github.com/0x676e67/wreq/issues/173)) - ([f722ce6](https://github.com/0x676e67/wreq/commit/f722ce6578d872008a4a7c64fbbba8ddddb14db4)) ## [0.33.0] - 2024-12-15 ### Features - *(async/client)* Add try get user agent - ([c72eed6](https://github.com/0x676e67/wreq/commit/c72eed679d380693e39155d63b63284f51bccc7a)) - *(client)* Request specific cookie store override ([#171](https://github.com/0x676e67/wreq/issues/171)) - ([1357a3c](https://github.com/0x676e67/wreq/commit/1357a3ccfd09b874c2937dde5c0988281a3747c9)) - *(client)* Add support for base URL parameter - ([6101905](https://github.com/0x676e67/wreq/commit/610190586a67b54ea5feb88d2cdbbc215bc8b9fa)) - *(client)* Add support for base URL parameter ([#159](https://github.com/0x676e67/wreq/issues/159)) - ([30530ce](https://github.com/0x676e67/wreq/commit/30530ce80149abb2da1c00d6ef8f752aea963d06)) - *(client)* Request specific redirect policy override ([#147](https://github.com/0x676e67/wreq/issues/147)) - ([cfedb58](https://github.com/0x676e67/wreq/commit/cfedb583f0df0f28c12799b2cc0e93ab2d86b10c)) - *(client)* Set `content-length` in advance for header sorting ([#144](https://github.com/0x676e67/wreq/issues/144)) - ([755cabd](https://github.com/0x676e67/wreq/commit/755cabde8c4edf91c7822ef4c08e7ce95bc2f3fe)) - *(client)* Add proxy management APIs: set, append, and clear proxies ([#132](https://github.com/0x676e67/wreq/issues/132)) - ([966fb0f](https://github.com/0x676e67/wreq/commit/966fb0f05c514b5c11c8ad18b158444a5b882f2e)) - *(client)* Add address/interface level connection pool ([#123](https://github.com/0x676e67/wreq/issues/123)) - ([877c30f](https://github.com/0x676e67/wreq/commit/877c30fc6c308fc116062622ea48f5e2568d9c19)) - *(client)* Support proxy-level connection pool ([#122](https://github.com/0x676e67/wreq/issues/122)) - ([6e4aff1](https://github.com/0x676e67/wreq/commit/6e4aff11a5268d9c39f91bd42585b610fe3f51db)) - *(client)* Limit number of connections in pool ([#118](https://github.com/0x676e67/wreq/issues/118)) - ([326d415](https://github.com/0x676e67/wreq/commit/326d41536b07592b2ba0b591b57aa7cd77e5108f)) - *(client)* Greatly improve the speed of creating clients ([#108](https://github.com/0x676e67/wreq/issues/108)) - ([27e8a55](https://github.com/0x676e67/wreq/commit/27e8a55f698fda9d0e4c42964f1bc5d580bd539b)) - *(client)* Added async client creation to reduce blocking of async runtime ([#105](https://github.com/0x676e67/wreq/issues/105)) - ([b7f36dd](https://github.com/0x676e67/wreq/commit/b7f36dd1961304bf332780b4ec04330cb9fcb975)) - *(client)* Optional configuration of Client TLS extension ([#78](https://github.com/0x676e67/wreq/issues/78)) - ([bab6cb6](https://github.com/0x676e67/wreq/commit/bab6cb6b766806096e083832c837f1353a22b99b)) - *(client)* Default send header names as title case (only http1) ([#61](https://github.com/0x676e67/wreq/issues/61)) - ([bf91fff](https://github.com/0x676e67/wreq/commit/bf91fffcbd91f4d92a53c5ad5bb1c5acf48606ee)) - *(client)* Adaptively select and upgrade the websocket connector ([#48](https://github.com/0x676e67/wreq/issues/48)) - ([b76070c](https://github.com/0x676e67/wreq/commit/b76070c4c3d0f48909a0be8e686ef7bd95093341)) - *(client)* Add `impersonate_with_headers` allows optionally setting request headers ([#128](https://github.com/0x676e67/wreq/issues/128)) - ([eca7cd4](https://github.com/0x676e67/wreq/commit/eca7cd4abbf030da57b92e5eb2dfa0b35ad153ee)) - *(client)* Suggest `inline` to the compiler ([#122](https://github.com/0x676e67/wreq/issues/122)) - ([532ca84](https://github.com/0x676e67/wreq/commit/532ca84a96f085ad04fc7706c310198317ad5ed0)) - *(client)* Simplify client configuration ([#110](https://github.com/0x676e67/wreq/issues/110)) - ([c12dce6](https://github.com/0x676e67/wreq/commit/c12dce66658ba610d670090744d0397ff0068c07)) - *(client)* Simplify the header configuration process - ([4a3f544](https://github.com/0x676e67/wreq/commit/4a3f54414892313eeabc5f3e602e844d1978c8aa)) - *(client)* Allow binding interface ([#92](https://github.com/0x676e67/wreq/issues/92)) - ([3156086](https://github.com/0x676e67/wreq/commit/31560869cc1d02323bc8c330b3415fe3f02ad389)) - *(client)* Add custom header order support ([#83](https://github.com/0x676e67/wreq/issues/83)) - ([4680b8a](https://github.com/0x676e67/wreq/commit/4680b8a69c7d9a33d07b13d44ddfa92a2df28c2a)) - *(client)* Add ability to set proxies/address after client has been initialised ([#34](https://github.com/0x676e67/wreq/issues/34)) - ([837266d](https://github.com/0x676e67/wreq/commit/837266dcb80a0b8b5670675b851b580206ae78a1)) - *(client)* Support client proxy settings ([#32](https://github.com/0x676e67/wreq/issues/32)) - ([30c0e2a](https://github.com/0x676e67/wreq/commit/30c0e2a6e4bfd1327b0ac1ad6f9e9c35e69dc632)) - *(client)* Support impersonate webSocket - ([d3c6dbf](https://github.com/0x676e67/wreq/commit/d3c6dbf272e7b6778b37b10f13cd71df67c1e791)) - *(client)* Optional enable permute_extensions - ([1aa849f](https://github.com/0x676e67/wreq/commit/1aa849fd4ad77c30815a9a9cd71838a0274f628f)) - *(client)* Optional enable_ech_grease, only effective for Chrome - ([335e038](https://github.com/0x676e67/wreq/commit/335e03848228292cfc74d3dc90695bc68db8a7d4)) - *(client)* Support configured IPv4 or IPv6 address (depending on host's preferences) before connection - ([b1f6203](https://github.com/0x676e67/wreq/commit/b1f620332640b57cc71a5cfbe718b1e81f93a1e5)) - *(connect)* Reduce unnecessary connection overhead ([#62](https://github.com/0x676e67/wreq/issues/62)) - ([225ffb9](https://github.com/0x676e67/wreq/commit/225ffb9ef3834e78570f53b62e62e9c6df451d34)) - *(connect)* Add PSK extension ([#52](https://github.com/0x676e67/wreq/issues/52)) - ([04a95ab](https://github.com/0x676e67/wreq/commit/04a95ab8d3f2feac429df28cb2ad258edd8ad45e)) - *(connector)* Using session cache to delay initialization of connector ([#78](https://github.com/0x676e67/wreq/issues/78)) - ([8bdb826](https://github.com/0x676e67/wreq/commit/8bdb8264d1fe039d3366e78880005470c3fb98fb)) - *(connector)* Enable encrypted client hello - ([4a577a1](https://github.com/0x676e67/wreq/commit/4a577a18a06b2fb930e1c2b13cd92ec0c6b05e24)) - *(dns)* Export dns resolver `HickoryDnsResolver` ([#55](https://github.com/0x676e67/wreq/issues/55)) - ([6907f48](https://github.com/0x676e67/wreq/commit/6907f48ae16f538164c3550802a9a269eeeca2d1)) - *(dns)* Optional `LookupIpStrategy` for `hickory_dns` ([#33](https://github.com/0x676e67/wreq/issues/33)) - ([7e6847a](https://github.com/0x676e67/wreq/commit/7e6847af02f8c8fb38ac0b38e80ca233b9b0d243)) - *(dns)* Enable happy eyeballs when using hickory-dns ([#115](https://github.com/0x676e67/wreq/issues/115)) - ([e300a2d](https://github.com/0x676e67/wreq/commit/e300a2d314364a8cf4a269891c065f01a9f2b99b)) - *(extension)* Set application protocol (ALPN) for http1 ([#104](https://github.com/0x676e67/wreq/issues/104)) - ([9ba260f](https://github.com/0x676e67/wreq/commit/9ba260f5dd0e818f9ec1acc176606ff4bd527d10)) - *(feature)* Optional enable websocket - ([28270bf](https://github.com/0x676e67/wreq/commit/28270bf02cb26513c36f927497ff5ef898d373a9)) - *(http2)* Exposing Http2Settings fields ([#75](https://github.com/0x676e67/wreq/issues/75)) - ([15ead8e](https://github.com/0x676e67/wreq/commit/15ead8ec5bd32e1bf47844bd6c87c463ace103db)) - *(http2)* Add `http2_max_frame_size` settings ([#73](https://github.com/0x676e67/wreq/issues/73)) - ([9a69087](https://github.com/0x676e67/wreq/commit/9a6908756613fdd00b65895958998bbb1e73e493)) - *(http2)* Add headers frame default priority ([#106](https://github.com/0x676e67/wreq/issues/106)) - ([e1927dc](https://github.com/0x676e67/wreq/commit/e1927dcb05af5db69221cf60b6f6156c25e5e97d)) - *(http2)* Optimize http2 frame order settings ([#80](https://github.com/0x676e67/wreq/issues/80)) - ([e381f66](https://github.com/0x676e67/wreq/commit/e381f66b4e4289a867d1dd9ce1b7981b32a07f21)) - *(impersonate)* Add Chrome 130 impersonate ([#65](https://github.com/0x676e67/wreq/issues/65)) - ([ebeba7d](https://github.com/0x676e67/wreq/commit/ebeba7de534dc1da6c772bafca3af0f208fc9c42)) - *(impersonate)* Add `Safari iPad 18` impersonate ([#10](https://github.com/0x676e67/wreq/issues/10)) - ([304b1bd](https://github.com/0x676e67/wreq/commit/304b1bd5f1d9561b190f283be89a7f15ef587f53)) - *(impersonate)* Add Safari 18 impersonate - ([acbcbf8](https://github.com/0x676e67/wreq/commit/acbcbf8c578fdb8aff077036ade0b12f403df2df)) - *(impersonate)* Add Chrome 128 impersonate ([#130](https://github.com/0x676e67/wreq/issues/130)) - ([c787890](https://github.com/0x676e67/wreq/commit/c78789056b64e7f383f3a73b6913398b3d9857c4)) - *(impersonate)* Add `Safari17_0` impersonate ([#71](https://github.com/0x676e67/wreq/issues/71)) - ([62f998e](https://github.com/0x676e67/wreq/commit/62f998e89766714def861e732308096dba8da1a4)) - *(impersonate)* Reuse Safari cipher list in groups ([#65](https://github.com/0x676e67/wreq/issues/65)) - ([06efa36](https://github.com/0x676e67/wreq/commit/06efa366832a579bc389378d5af955ab0f226eed)) - *(impersonate)* Export the Impersonate custom extension configuration ([#64](https://github.com/0x676e67/wreq/issues/64)) - ([9233546](https://github.com/0x676e67/wreq/commit/9233546c429ffa590e7e6143e07c7769cef45ef3)) - *(impersonate)* Optimize reuse of impersonate configuration ([#61](https://github.com/0x676e67/wreq/issues/61)) - ([f369748](https://github.com/0x676e67/wreq/commit/f3697488aa0896bb68a8da496dc52242f9a98aa5)) - *(impersonate)* Add Edge_127 impersonate ([#59](https://github.com/0x676e67/wreq/issues/59)) - ([c9f8861](https://github.com/0x676e67/wreq/commit/c9f8861d1e46e7526c6d8fac22126e74ed5987f0)) - *(impersonate)* Optimize TLS connector context handle ([#37](https://github.com/0x676e67/wreq/issues/37)) - ([dc3aadc](https://github.com/0x676e67/wreq/commit/dc3aadc2b897404569f2d2b3c34312788834acb2)) - *(impersonate)* Add Safari_17_5 impersonate - ([bb44019](https://github.com/0x676e67/wreq/commit/bb44019174143d9277c1743668f1a194d32e022e)) - *(impersonate)* Add Safari_17_5 impersonate ([#28](https://github.com/0x676e67/wreq/issues/28)) - ([aa975df](https://github.com/0x676e67/wreq/commit/aa975df80a7515d629471dea3da9c1b50bfe9448)) - *(impersonate)* Add Safari_IOS_17_4_1 impersonate - ([8be0f37](https://github.com/0x676e67/wreq/commit/8be0f37945360ef0e835afb351502a3385e03d39)) - *(impersonate)* Add Safari_IOS_16_5 impersonate - ([ebfb961](https://github.com/0x676e67/wreq/commit/ebfb9616b7b3f0e9d89b5e320f6997414853f383)) - *(impersonate)* Specification version number match - ([0c23082](https://github.com/0x676e67/wreq/commit/0c23082929fadf77dcc0dab6b668a541655c4994)) - *(impersonate)* Add Chrome124 impersonate - ([f63d081](https://github.com/0x676e67/wreq/commit/f63d081b24b6820e13e63b867f3306387780e181)) - *(impersonate)* Add Safari_17_4_1 impersonate - ([bd9f4c1](https://github.com/0x676e67/wreq/commit/bd9f4c129c24088261aff358943f74db1c27067a)) - *(impersonate)* Add Safari_IOS_17_2 impersonate - ([e84fb19](https://github.com/0x676e67/wreq/commit/e84fb1970565701d6b838c3e80b0e9288a98122c)) - *(impersonate)* Add Chrome123 impersonate - ([eb6744b](https://github.com/0x676e67/wreq/commit/eb6744b785424609cd1079d06164badf583199c8)) - *(impersonate)* Improve fingerprint OkHttp fingerprint UserAgent - ([4ce6850](https://github.com/0x676e67/wreq/commit/4ce68504b73b3c57388e2e818cc81fcd3525c06a)) - *(impersonate)* Optimize the overhead of parsing request headers at runtime - ([b0af7fa](https://github.com/0x676e67/wreq/commit/b0af7fa875310144a783298e39ad6c08a844efd2)) - *(impersonate)* Add Edge122 impersonate - ([2e73827](https://github.com/0x676e67/wreq/commit/2e73827ac1c935e423741f620874f1c997c2cf97)) - *(impersonate)* Optimize the overhead of parsing request headers at runtime - ([63b4dbf](https://github.com/0x676e67/wreq/commit/63b4dbf1b2db96476ab003077572c75321f01a40)) - *(impersonate)* Add Safari17_2_1 impersonate - ([44f5933](https://github.com/0x676e67/wreq/commit/44f593391b3097e07ef8c64382f33451a07e201d)) - *(impersonate)* Add Edge101 impersonate - ([5e66c0d](https://github.com/0x676e67/wreq/commit/5e66c0da426f21f73d42e2fbf79113bdbc039a8f)) - *(impersonate)* Add Edge99 impersonate - ([ea51acf](https://github.com/0x676e67/wreq/commit/ea51acf5cee796f50fce1c39f9d0b3d52fc197c5)) - *(impersonate)* Add Safari16_5 impersonate - ([9a919ff](https://github.com/0x676e67/wreq/commit/9a919ff72b6baf750949a35cf10e0eab961dee6b)) - *(impersonate)* Add Chrome117 impersonate - ([0d0ee83](https://github.com/0x676e67/wreq/commit/0d0ee83421269bb8d5948984e9a02cc9d5f7cb44)) - *(impersonate)* Improve safari fingerprint impersonate - ([0b62959](https://github.com/0x676e67/wreq/commit/0b62959fbf6ffd35d91d710e6ce8f3846bc6026d)) - *(impersonate)* Add Chrome101 impersonate - ([02a0a17](https://github.com/0x676e67/wreq/commit/02a0a1704e3e015c8884d70b8c0404c19858c42f)) - *(impersonate)* Add Chrome100 impersonate - ([2c1549b](https://github.com/0x676e67/wreq/commit/2c1549b1a5e6647fd9732c01ff4325616a6be941)) - *(impersonate)* Add Chrome120 impersonate - ([fe63a86](https://github.com/0x676e67/wreq/commit/fe63a86290e0d7b397e1789de805eb89dc91e2d0)) - *(impersonate)* Add Safari16 impersonate - ([4e4701f](https://github.com/0x676e67/wreq/commit/4e4701f3309fc34da40b1fbd65e9b4f944ee2a9f)) - *(impersonate)* Add Safari15_6_1 impersonate - ([86e17a0](https://github.com/0x676e67/wreq/commit/86e17a05097cdd82dbaa90c1c53d7c82a7042a5a)) - *(impersonate)* Add Safari 15_3/15_5 Impersonate - ([0af1670](https://github.com/0x676e67/wreq/commit/0af1670952a94b7fbd63222b89656e8ec1889e97)) - *(impersonate)* Add Chrome v116 Impersonate - ([13971bd](https://github.com/0x676e67/wreq/commit/13971bdaf3d9c0c5c6c6e7455c0bd51a82cbcffd)) - *(impersonate)* Add Chrome v119 Impersonate - ([1ce01d7](https://github.com/0x676e67/wreq/commit/1ce01d77263b478992a67aeb05245949386029fd)) - *(impersonate)* Use the default locations of trusted certificates for verification. - ([6b20712](https://github.com/0x676e67/wreq/commit/6b207127ead62ab81aba9984e9e62e8042504233)) - *(impersonate)* Remove max_concurrent_streams for v118 - ([fbcf65f](https://github.com/0x676e67/wreq/commit/fbcf65faa6277e6f9946f65145ce2c29581e3220)) - *(impersonate)* Add Chrome v118 Impersonate - ([f9a097d](https://github.com/0x676e67/wreq/commit/f9a097dd5d5c8a9b070fa6e2d7629a40d1dd791b)) - *(impersonate)* Add Safari 12 Impersonate - ([b5454f7](https://github.com/0x676e67/wreq/commit/b5454f7263849309544698eceefb2833419f669e)) - *(impersonate)* Support more OkHttp fingerprints - ([43e00ed](https://github.com/0x676e67/wreq/commit/43e00ed237c4aafb9a6abfe3f22d74c000343647)) - *(impersonate)* Add OkHttp5-alpha Impersonate - ([a172d90](https://github.com/0x676e67/wreq/commit/a172d90a1ac6952314403441b9d20f0e2eae748a)) - *(impersonate)* Add OkHttp3 Impersonate - ([754f58d](https://github.com/0x676e67/wreq/commit/754f58dedaf67502b7b0364b8554ab629b8e0c09)) - *(impersonate)* Support disable certs verification - ([cffe303](https://github.com/0x676e67/wreq/commit/cffe303cd1acfc99eab0ca43752c3c343d37a540)) - *(multipart)* Adds support for manually setting size - ([2ca0e26](https://github.com/0x676e67/wreq/commit/2ca0e26cfa0f7ffd6061a453fe71b06d490c3ea9)) - *(proxy)* Optional disable internal proxy cache ([#92](https://github.com/0x676e67/wreq/issues/92)) - ([45da58f](https://github.com/0x676e67/wreq/commit/45da58fcb047efebe583d736c8d5fed18742ec0f)) - *(proxy)* Add support for SOCKS4 ([#27](https://github.com/0x676e67/wreq/issues/27)) - ([533059a](https://github.com/0x676e67/wreq/commit/533059a2023fef19bb7276bcc6bf58323353b09d)) - *(proxy)* Use instead of for reading proxy settings on Windows ([#116](https://github.com/0x676e67/wreq/issues/116)) - ([4918e4d](https://github.com/0x676e67/wreq/commit/4918e4d6b813e4f9a7f2b9188a9a28d9a458e1f0)) - *(proxy)* Adds NO_PROXY environment variable support ([#877](https://github.com/0x676e67/wreq/issues/877)) - ([6914091](https://github.com/0x676e67/wreq/commit/691409158273505eb43353c3936759df0ddd7b28)) - *(redirect)* Expose method for accessing the previous and next request ([#148](https://github.com/0x676e67/wreq/issues/148)) - ([bdbc7f1](https://github.com/0x676e67/wreq/commit/bdbc7f1c40d0e3c64b946a3137f5e91530c2acf1)) - *(request)* Add `with_host_header` method for populating Host header ([#142](https://github.com/0x676e67/wreq/issues/142)) - ([33b7e21](https://github.com/0x676e67/wreq/commit/33b7e21e7f2683a6a65e3d92321e07516b52e5af)) - *(tls)* Dynamically configure WebSocket TLS connection alpn protos ([#104](https://github.com/0x676e67/wreq/issues/104)) - ([1918892](https://github.com/0x676e67/wreq/commit/1918892a1f9956274983a023c1572a80f1b514e6)) - *(tls)* No additional WebSocket connector is needed for HTTP/1 client ([#81](https://github.com/0x676e67/wreq/issues/81)) - ([a4ffa85](https://github.com/0x676e67/wreq/commit/a4ffa85e1f350126c7b5a7f8b954588e9c6b6f63)) - *(tls)* Update session ticket setting - ([0942894](https://github.com/0x676e67/wreq/commit/0942894ac9a9507d76150ec7c4a9800f2981be65)) - *(tls)* Implement Debug for TlsSettings ([#80](https://github.com/0x676e67/wreq/issues/80)) - ([a88712a](https://github.com/0x676e67/wreq/commit/a88712a4448d8dd72d5f678cce731b7b4d3dc67c)) - *(tls)* Add option `session_ticket` extension ([#79](https://github.com/0x676e67/wreq/issues/79)) - ([ea5c8f1](https://github.com/0x676e67/wreq/commit/ea5c8f1273abf6ff93f1aa5e3dc7869de29378b0)) - *(tls)* Expose more custom TL settings ([#76](https://github.com/0x676e67/wreq/issues/76)) - ([ef880a7](https://github.com/0x676e67/wreq/commit/ef880a7feb30c124ee5833b22ae2ee0e6cd4503a)) - *(tls)* Simplify TLS version settings ([#66](https://github.com/0x676e67/wreq/issues/66)) - ([c584368](https://github.com/0x676e67/wreq/commit/c58436853b2aeea690404ff95c389f7f37f8fc24)) - *(tls)* Optional webpki root certificates feature ([#40](https://github.com/0x676e67/wreq/issues/40)) - ([d0de915](https://github.com/0x676e67/wreq/commit/d0de91513332e7ff64c4ef4347d701ee5bda0576)) - *(tls)* Avoid repeated loading of native root CA ([#37](https://github.com/0x676e67/wreq/issues/37)) - ([2ad61c7](https://github.com/0x676e67/wreq/commit/2ad61c7619064b863e184f3bf18eb207ade1c1e7)) - *(tls)* Optional built-in root certificates feature ([#36](https://github.com/0x676e67/wreq/issues/36)) - ([016bb5d](https://github.com/0x676e67/wreq/commit/016bb5d20e95d27e25022cfc5396ebf4484f0d2f)) - *(tls)* Some `Chrome`/`Edge` versions have `ECH` enabled by default ([#9](https://github.com/0x676e67/wreq/issues/9)) - ([fecd878](https://github.com/0x676e67/wreq/commit/fecd87820d8014af9abad29befcb405a3ac8593f)) - *(tls)* Some `Chrome`/`Edge` versions have `ECH` enabled by default ([#8](https://github.com/0x676e67/wreq/issues/8)) - ([a68fa56](https://github.com/0x676e67/wreq/commit/a68fa56c75a2c28efcfca324488c1340889b6674)) - *(tls)* Enable permute extensions for `Chrome`/`Edge` 106 and above ([#6](https://github.com/0x676e67/wreq/issues/6)) - ([20e61f0](https://github.com/0x676e67/wreq/commit/20e61f081bbd8b6da9113714c7cec8aaf11aec22)) - *(tls)* Add preconfigured TLS settings ([#118](https://github.com/0x676e67/wreq/issues/118)) - ([440bbdf](https://github.com/0x676e67/wreq/commit/440bbdf2eed0f47ad781715d4c41d11c8d782e6d)) - *(tls)* Add option to configure TLS server name indication (SNI) ([#117](https://github.com/0x676e67/wreq/issues/117)) - ([9847c41](https://github.com/0x676e67/wreq/commit/9847c41e91a4d8cc229eba65df9fe83d98800d94)) - *(tls)* Optimize tls configuration process ([#113](https://github.com/0x676e67/wreq/issues/113)) - ([87219ca](https://github.com/0x676e67/wreq/commit/87219ca951cb620e10cf1a61bdb41d573dd3b285)) - *(tls)* Add `CA Certificate` settings ([#112](https://github.com/0x676e67/wreq/issues/112)) - ([0b39bb0](https://github.com/0x676e67/wreq/commit/0b39bb0c91ab403ab60ee32bd47c8b263c00cd17)) - *(tls)* Reuse https connector layer ([#107](https://github.com/0x676e67/wreq/issues/107)) - ([5c32b6d](https://github.com/0x676e67/wreq/commit/5c32b6d24bdecace26e07e1e6e45ed17ea3dcd1b)) - *(tls)* Add zstd support for chrome models and derivatives ([#93](https://github.com/0x676e67/wreq/issues/93)) - ([0204bb4](https://github.com/0x676e67/wreq/commit/0204bb4a25b3b56b6ef4f4b56a06e837873b4339)) - *(websocket)* Add websocket handshake with a specified websocket key ([#50](https://github.com/0x676e67/wreq/issues/50)) - ([cf46944](https://github.com/0x676e67/wreq/commit/cf469447eebab3ab112c965f722e9b20314b8d0e)) - *(websocket)* Improve websocket API usage ([#49](https://github.com/0x676e67/wreq/issues/49)) - ([72070aa](https://github.com/0x676e67/wreq/commit/72070aa29529d718ea19625fc8e43909dee1c5b7)) - *(websocket)* Improve websocket upgrade ([#73](https://github.com/0x676e67/wreq/issues/73)) - ([348f04c](https://github.com/0x676e67/wreq/commit/348f04cd634b1b17267c2f0ff75851768590b6a4)) - *(websocket)* Add upgrade with custom handshake key - ([b02396b](https://github.com/0x676e67/wreq/commit/b02396b64187cd770166c68a7556e56a2513ba06)) - *(websocket)* Export header method - ([4ab0b0a](https://github.com/0x676e67/wreq/commit/4ab0b0a1664fb7969e9089a7f658ef36b01cad0c)) - *(websocket)* Export header method - ([290d163](https://github.com/0x676e67/wreq/commit/290d16395fd3c9b1f9509bbec0e978655cb20b9f)) - *(websocket)* Export `UpgradedRequestBuilder` - ([fac7251](https://github.com/0x676e67/wreq/commit/fac7251e922e802042bc6984928fa7d3c798e685)) - *(websocket)* Support configuration websocket - ([319dd6a](https://github.com/0x676e67/wreq/commit/319dd6a9fc6f6f18295e276bcd21d6ed63c0c9ee)) - Add loading of dynamic root certificate store ([#170](https://github.com/0x676e67/wreq/issues/170)) - ([44a5784](https://github.com/0x676e67/wreq/commit/44a578440a23f2c4bebabe137564c009f62b9049)) - Add `Edge 131` impersonate ([#158](https://github.com/0x676e67/wreq/issues/158)) - ([9dd73ab](https://github.com/0x676e67/wreq/commit/9dd73ab6c9d9839f9ad1a6381f5f78d7ef400108)) - Add `Safari 18.1.1` impersonate ([#157](https://github.com/0x676e67/wreq/issues/157)) - ([2c23ab0](https://github.com/0x676e67/wreq/commit/2c23ab002466f93c4dfcebaa2c4c7658ff18a7e1)) - Add `Safari 18.2` impersonate ([#151](https://github.com/0x676e67/wreq/issues/151)) - ([638864c](https://github.com/0x676e67/wreq/commit/638864c78cdeff1c5d107ca12933a255f35cbedb)) - Impl `IntoUrl` for `&Url` ([#146](https://github.com/0x676e67/wreq/issues/146)) - ([a1c2343](https://github.com/0x676e67/wreq/commit/a1c2343c76c811c55f6e54a81e7bbea8884c0e0e)) - Implement IntoUrl for Cow<'a, str> ([#145](https://github.com/0x676e67/wreq/issues/145)) - ([6c0b14c](https://github.com/0x676e67/wreq/commit/6c0b14ca224c42ed3d57bfe1acf21017dfbb3acf)) - Support changing cookie provider after initialization ([#114](https://github.com/0x676e67/wreq/issues/114)) - ([f1c5a07](https://github.com/0x676e67/wreq/commit/f1c5a07f2943ef0c4fc418d2e73ff558eafb7df1)) - Support changing interface after initialization - ([61ed45a](https://github.com/0x676e67/wreq/commit/61ed45a8acfaf1a2a47b09937b79b45364c1d0b1)) - Support changing interface after initialization ([#103](https://github.com/0x676e67/wreq/issues/103)) - ([81d79da](https://github.com/0x676e67/wreq/commit/81d79da1ef340386c5c10811a07b42b68af79d52)) - Support changing redirect policy after initialization ([#102](https://github.com/0x676e67/wreq/issues/102)) - ([1c4bc66](https://github.com/0x676e67/wreq/commit/1c4bc6634e5a9ff12a6e6dc4a240c5e056882f29)) - Support changing header order after initialization ([#101](https://github.com/0x676e67/wreq/issues/101)) - ([d5dd02b](https://github.com/0x676e67/wreq/commit/d5dd02bf96707cc83874cd25271ac94df9adfbf1)) - Support changing impersonate fingerprint after initialization ([#100](https://github.com/0x676e67/wreq/issues/100)) - ([50393ee](https://github.com/0x676e67/wreq/commit/50393ee3051af81f971a0215ce841498bef6ff29)) - Changing request headers after client initialization ([#97](https://github.com/0x676e67/wreq/issues/97)) - ([9954095](https://github.com/0x676e67/wreq/commit/99540955a55e9c89a2eb5bfc2cdd1cd64b5fc466)) - Add `Chrome 131` impersonate ([#94](https://github.com/0x676e67/wreq/issues/94)) - ([a425faf](https://github.com/0x676e67/wreq/commit/a425faf4c7fc6251b0bd4720621d50bd4321e7b3)) - Expose `hickory-resolver` as public API ([#93](https://github.com/0x676e67/wreq/issues/93)) - ([4bd5636](https://github.com/0x676e67/wreq/commit/4bd5636ab961023ee7d1d0acb3e359e3c665c733)) - Expose `tokio-boring` as public API ([#88](https://github.com/0x676e67/wreq/issues/88)) - ([5b28f91](https://github.com/0x676e67/wreq/commit/5b28f91857480ed1536891003b14998c404f5b82)) - Optionl BoringSSL PQ experimental feature ([#84](https://github.com/0x676e67/wreq/issues/84)) - ([3be7f0f](https://github.com/0x676e67/wreq/commit/3be7f0f10d3ca392734f201f24a3b0c901930a44)) - Improve unnecessary header sorting storage overhead ([#44](https://github.com/0x676e67/wreq/issues/44)) - ([8e8f88e](https://github.com/0x676e67/wreq/commit/8e8f88e2426a190a92ad438ec6a1240126eb38ef)) - Improve header sort ([#43](https://github.com/0x676e67/wreq/issues/43)) - ([d547d73](https://github.com/0x676e67/wreq/commit/d547d73f70784ccfd330f20f9f6c7486cb1752db)) - Add file function to async::multipart ([#32](https://github.com/0x676e67/wreq/issues/32)) - ([432e44e](https://github.com/0x676e67/wreq/commit/432e44eb78adc2e38c33bd55c072bd88f8bdd0fd)) - Add zstd support - ([d087d5c](https://github.com/0x676e67/wreq/commit/d087d5c02e1fdf8ce3022d2734880ec319e880d5)) - Update safari impersonate - ([ee38133](https://github.com/0x676e67/wreq/commit/ee38133de5b91e9f82e6e860f4bf0ccc6095a908)) - Enable client to be a service without ownership ([#1556](https://github.com/0x676e67/wreq/issues/1556)) - ([7a11d39](https://github.com/0x676e67/wreq/commit/7a11d397eb5990dc2346cf95ae0f186231d38388)) - Add Response::text() - ([2fbc201](https://github.com/0x676e67/wreq/commit/2fbc20167d6656850069c6496c73969c78b0a8d2)) - Set default headers - ([f4437ea](https://github.com/0x676e67/wreq/commit/f4437ea7b1c2a208fe07d17184d473b32b176ce4)) ### Bug Fixes - *(client)* Return an error instead of panic when parsing invalid URL ([#164](https://github.com/0x676e67/wreq/issues/164)) - ([0daacd1](https://github.com/0x676e67/wreq/commit/0daacd1d7c6fcd1e44aee84dfbdbf4d384acc948)) - *(client)* Fix retry request via connection pool extension ([#138](https://github.com/0x676e67/wreq/issues/138)) - ([2971538](https://github.com/0x676e67/wreq/commit/2971538ebaaf0005ebc4b9d336d8243e7a613b23)) - *(client)* Fix redirect via connection pool extension ([#137](https://github.com/0x676e67/wreq/issues/137)) - ([6c3a0cb](https://github.com/0x676e67/wreq/commit/6c3a0cbd45a539ebc17b38c0841d25be3ef00307)) - *(client)* Fix redirect header sorting ([#135](https://github.com/0x676e67/wreq/issues/135)) - ([275baf6](https://github.com/0x676e67/wreq/commit/275baf63cecf609701bebd1d08c51cb1a27510cb)) - *(client)* Fix http redirect via proxy ([#134](https://github.com/0x676e67/wreq/issues/134)) - ([c71dd91](https://github.com/0x676e67/wreq/commit/c71dd915511b2b354d3f795f2c29779aec8e237d)) - *(client)* Fix `ClientBuilder` not `Send` + `Sync` ([#51](https://github.com/0x676e67/wreq/issues/51)) - ([c6312fc](https://github.com/0x676e67/wreq/commit/c6312fc6c8cbe6a11a67399e73d203b4f7091f8b)) - *(client)* Optional setting of default accept ([#133](https://github.com/0x676e67/wreq/issues/133)) - ([fc4df7c](https://github.com/0x676e67/wreq/commit/fc4df7ced3d564d1f4b1475cfc9a68e808be342a)) - *(client)* Fix the header sending order, set accept before request ([#131](https://github.com/0x676e67/wreq/issues/131)) - ([2beae56](https://github.com/0x676e67/wreq/commit/2beae56c0a0e9119e270864ca4efbbc0d557a917)) - *(client)* Fix http version setting order ([#120](https://github.com/0x676e67/wreq/issues/120)) - ([60f3521](https://github.com/0x676e67/wreq/commit/60f352157a3483104170d10bc0f1367110b24d34)) - *(client)* `headers_order` error - ([1801359](https://github.com/0x676e67/wreq/commit/1801359894ac277c9cb6fd4c48f1c459b3adab2f)) - *(connect)* Unnecessarily panic when parsing invalid URI ([#166](https://github.com/0x676e67/wreq/issues/166)) - ([b42559b](https://github.com/0x676e67/wreq/commit/b42559beed13ab5fcfe881dc2cae36f932b54f14)) - *(connector)* Initialize pool key extension when creating a client ([#126](https://github.com/0x676e67/wreq/issues/126)) - ([d6e3878](https://github.com/0x676e67/wreq/commit/d6e38788498a56e0f89162bb15210d3bd82e7ab1)) - *(connector)* Fix TLS session failure when changing address ([#55](https://github.com/0x676e67/wreq/issues/55)) - ([ed39758](https://github.com/0x676e67/wreq/commit/ed39758a9155652b4f7fd63900c4eaf60590c92c)) - *(extension)* Fix configure chrome new curves ([#67](https://github.com/0x676e67/wreq/issues/67)) - ([bd872e4](https://github.com/0x676e67/wreq/commit/bd872e4d221938f88d1a42b5816d62c8834f8427)) - *(hickory-dns)* Fix initialization when `/etc/resolv.conf` is missing ([#163](https://github.com/0x676e67/wreq/issues/163)) - ([97ed7d6](https://github.com/0x676e67/wreq/commit/97ed7d63773f411e4bdea66aa6dfea6f536ac2c1)) - *(http)* Compatible with some CDN servers, Http1 retains case by default when sending headers([#56](https://github.com/0x676e67/wreq/issues/56)) - ([f653f9c](https://github.com/0x676e67/wreq/commit/f653f9c6563d28abf4ebf96ce3882daaa03c84ed)) - *(impersonate)* Fix safari header order ([#72](https://github.com/0x676e67/wreq/issues/72)) - ([f9be4a4](https://github.com/0x676e67/wreq/commit/f9be4a482c5fa63664f6b23a8f8139a48fa80c5d)) - *(impersonate)* Fix `safari15_3`/`safari15_5` http2 fingerprint ([#70](https://github.com/0x676e67/wreq/issues/70)) - ([63ef44e](https://github.com/0x676e67/wreq/commit/63ef44e86ddad718547000e1352898fdaa7697c6)) - *(impersonate)* Add Safari17_5 from string - ([1ce9a61](https://github.com/0x676e67/wreq/commit/1ce9a610df3afd35a235fde333aed0ded34dabb9)) - *(impersonate)* Fix v116 impersonate - ([427f6a2](https://github.com/0x676e67/wreq/commit/427f6a22025934ae0e759840b5d7c16b4015d2fe)) - *(proxy)* Make HTTP(S)_PROXY variables take precedence over ALL_PROXY ([#87](https://github.com/0x676e67/wreq/issues/87)) - ([e28b30a](https://github.com/0x676e67/wreq/commit/e28b30a3da8e4fcb075c07da6e677ffbb80ed681)) - *(response)* `copy_to()` and `text()` return `reqwest::Result` - ([2c60511](https://github.com/0x676e67/wreq/commit/2c60511bcee3c633467b6be46f3d1e27af5f0905)) - *(tls)* Fix SNI verification ([#87](https://github.com/0x676e67/wreq/issues/87)) - ([0cfb181](https://github.com/0x676e67/wreq/commit/0cfb181a895bbd32f8ad48b1eeb376172a077232)) - *(tls)* Fix unsafe code block warnings ([#52](https://github.com/0x676e67/wreq/issues/52)) - ([127a1a9](https://github.com/0x676e67/wreq/commit/127a1a923b2203e31de41d171acd37e14aa5fb9f)) - *(tls)* Fix CA certificate conditional compilation ([#41](https://github.com/0x676e67/wreq/issues/41)) - ([27b4119](https://github.com/0x676e67/wreq/commit/27b411915be3314338427186fac5760a615c4f11)) - *(tls)* Fix default tls configuration to use websocket ([#30](https://github.com/0x676e67/wreq/issues/30)) - ([889867c](https://github.com/0x676e67/wreq/commit/889867c6194a7fb812d1a3ec957e30f0757bfcc1)) - *(tls)* Fix default TLS SNI context configuration conflict ([#13](https://github.com/0x676e67/wreq/issues/13)) - ([94db0fc](https://github.com/0x676e67/wreq/commit/94db0fca006ca65d0d13f04eb23237512113937b)) - *(tls)* Fix setting config TLS version - ([6544c11](https://github.com/0x676e67/wreq/commit/6544c111048bcf0513cd7a6ba8ba148f65502ac9)) - *(tls)* Fix optional config TLS size version - ([bb16145](https://github.com/0x676e67/wreq/commit/bb16145fa799f3b078ed50a695cbd27a02f0457e)) - *(websocket)* Fix websocket upgrade builder ([#134](https://github.com/0x676e67/wreq/issues/134)) - ([111d928](https://github.com/0x676e67/wreq/commit/111d92877982dded4dd2b5c63318dff43631c967)) - Improve TLS connector creation, fix client creation taking too long ([#107](https://github.com/0x676e67/wreq/issues/107)) - ([26f254c](https://github.com/0x676e67/wreq/commit/26f254c5b805ddaf6cf423b55aad5e74760796da)) - Fix decompressing deflate with zlib specific wrapper fails ([#99](https://github.com/0x676e67/wreq/issues/99)) - ([c865b9c](https://github.com/0x676e67/wreq/commit/c865b9cf5dad766c9da35e85757a0a26e2f3efbf)) - Update Chrome version from 129 to 130 ([#68](https://github.com/0x676e67/wreq/issues/68)) - ([f27704a](https://github.com/0x676e67/wreq/commit/f27704a876dd28b929a534e212b53218141a789e)) - Fix incorrect Accept-Encoding header combinations in Accepts::as_str ([#89](https://github.com/0x676e67/wreq/issues/89)) - ([1373a01](https://github.com/0x676e67/wreq/commit/1373a018b3c374a28e37aed8a3da9fd563a8f665)) - Set nodelay correctly to handle when a tls feature is enabled but connection is to an http server ([#2062](https://github.com/0x676e67/wreq/issues/2062)) - ([1485ce6](https://github.com/0x676e67/wreq/commit/1485ce6f754413a81a9673252349f953c1d86e82)) - Split connect timeout for multiple IPs ([#1940](https://github.com/0x676e67/wreq/issues/1940)) - ([2a881fb](https://github.com/0x676e67/wreq/commit/2a881fb50489b21aa6c879eea0cb339755240fb5)) - Strip BOM in `Response::text_with_charset` ([#1898](https://github.com/0x676e67/wreq/issues/1898)) - ([3abcc7c](https://github.com/0x676e67/wreq/commit/3abcc7c4f537c16ad9937f8cc60fb23cb506ac85)) - Strip BOM in Response::text_with_charset - ([d820ad2](https://github.com/0x676e67/wreq/commit/d820ad237feade4527743067c8f6fc3e19972c7b)) - Wasm client: pass response header to builder by reference ([#1350](https://github.com/0x676e67/wreq/issues/1350)) - ([c9217d8](https://github.com/0x676e67/wreq/commit/c9217d8d1bc6c65605ad4909cb45a1cb72b778a0)) - Respect https_only option when redirecting ([#1313](https://github.com/0x676e67/wreq/issues/1313)) - ([bdc57be](https://github.com/0x676e67/wreq/commit/bdc57beabbf3fe77c2196d17ef3f7640d37b81cf)) - Upgrade to http2 if the server reports that it supports it ([#1166](https://github.com/0x676e67/wreq/issues/1166)) - ([2940740](https://github.com/0x676e67/wreq/commit/2940740493ce55e8baee44a47fd759d9e3aa3187)) - Tests::support::server - ([07d6bca](https://github.com/0x676e67/wreq/commit/07d6bca08f0ef8deb752eb17e87ecca1e2c441ae)) ### Refactor - *(client)* Removed confusing way to enable `hickory-dns` ([#34](https://github.com/0x676e67/wreq/issues/34)) - ([769d797](https://github.com/0x676e67/wreq/commit/769d7979f583ac435d808a8831c806638e009c7a)) - *(client)* Turn off default redirect ([#4](https://github.com/0x676e67/wreq/issues/4)) - ([2b80121](https://github.com/0x676e67/wreq/commit/2b80121e69cb15f74885516429406df457eb1c56)) - *(client)* Simplify Headers Frame priority settings ([#126](https://github.com/0x676e67/wreq/issues/126)) - ([3449c2f](https://github.com/0x676e67/wreq/commit/3449c2f54ed4fcc9d94bfc484b2b739dd892e474)) - *(client)* Set_proxies accepts an slice of references ([#119](https://github.com/0x676e67/wreq/issues/119)) - ([a25ada0](https://github.com/0x676e67/wreq/commit/a25ada0a0cf297ab43b48fd7915d3c24f740028d)) - *(hickory-dns)* Async `new_resolver` ([#84](https://github.com/0x676e67/wreq/issues/84)) - ([73ff128](https://github.com/0x676e67/wreq/commit/73ff1286ac383372f84f5a37e653c237032c2192)) - *(impersonate)* Simplify Impersonate enum parsing with macro ([#71](https://github.com/0x676e67/wreq/issues/71)) - ([b3efecf](https://github.com/0x676e67/wreq/commit/b3efecf6221510b6ac9d55a0b651f321d0557635)) - *(impersonate)* Reuse code - ([dbc6d66](https://github.com/0x676e67/wreq/commit/dbc6d662b2feb33231c1e37b780c6645761d23bb)) - *(impersonate)* Refactor unnecessary settings - ([716a190](https://github.com/0x676e67/wreq/commit/716a190617dbe73b6fd771e05748179221cdaac6)) - *(impersonate)* Revert to SslVerifyMode::NONE - ([f921d58](https://github.com/0x676e67/wreq/commit/f921d5814ac12027fdf5c05af0ebe5518348ff60)) - *(impersonate)* Update SSL verify mode - ([3ca497c](https://github.com/0x676e67/wreq/commit/3ca497cc74f7f846e7ca25068dbcf049e523c31e)) - *(proxy)* Remove internal proxy sys cache ([#26](https://github.com/0x676e67/wreq/issues/26)) - ([714b48f](https://github.com/0x676e67/wreq/commit/714b48fbe3d070126054ef96b58f8b85b208db7f)) - *(tls)* Simplified TLS version mappr ([#70](https://github.com/0x676e67/wreq/issues/70)) - ([2e2ebf9](https://github.com/0x676e67/wreq/commit/2e2ebf9a7bec8492de1d01d2b19bc5526e4164ac)) - *(tls)* Refactor internal `TLS`/`HTTP2` module ([#69](https://github.com/0x676e67/wreq/issues/69)) - ([7f10e51](https://github.com/0x676e67/wreq/commit/7f10e519f1ae74cca2a59bb88b6bba312fea029f)) - *(tls)* Simplify TLS custom settings ([#46](https://github.com/0x676e67/wreq/issues/46)) - ([499fe4a](https://github.com/0x676e67/wreq/commit/499fe4aa3486d9dcc4292b5cf9153b1c987dd2f4)) - *(tls)* Public and reuse tls/http2 templates ([#42](https://github.com/0x676e67/wreq/issues/42)) - ([e082581](https://github.com/0x676e67/wreq/commit/e08258124fba80dc9d6f2a1f4d1804c9685a9fb6)) - *(tls)* Simplify TLS/HTTP2 configuration ([#7](https://github.com/0x676e67/wreq/issues/7)) - ([c44d01f](https://github.com/0x676e67/wreq/commit/c44d01f42350e0bb736a7e360147fb4763559551)) - *(tls)* Simplify TLS configuration ([#5](https://github.com/0x676e67/wreq/issues/5)) - ([56840ab](https://github.com/0x676e67/wreq/commit/56840ab4652f809a429325919aeedec9d5010634)) - *(tls)* Refactored changes and refactored TLS build - ([c1b1e09](https://github.com/0x676e67/wreq/commit/c1b1e097f6e690000a35df16eb537029f1253c57)) - *(tls)* Refactor TLS connection layer configuration ([#111](https://github.com/0x676e67/wreq/issues/111)) - ([db4e566](https://github.com/0x676e67/wreq/commit/db4e566f9c494c7905b8e9022b68426d0b96e4ae)) - *(tls)* Simplify TLS connector configuration ([#103](https://github.com/0x676e67/wreq/issues/103)) - ([322d030](https://github.com/0x676e67/wreq/commit/322d030968a0106220be5c0e6c4641680ddba3cd)) - *(tls)* Major module changes ([#91](https://github.com/0x676e67/wreq/issues/91)) - ([76114b0](https://github.com/0x676e67/wreq/commit/76114b0a6674b0afd2d8cb5927fe2d6f58705458)) - *(websocket)* Major changes, abstract WebSocket message structure ([#94](https://github.com/0x676e67/wreq/issues/94)) - ([266f0cb](https://github.com/0x676e67/wreq/commit/266f0cbf72c40262912be32c0a144a185fcac50e)) - Unified naming API ([#150](https://github.com/0x676e67/wreq/issues/150)) - ([da5e052](https://github.com/0x676e67/wreq/commit/da5e052c9f31fb908c30c21953ee01c6344b68fe)) - Do not create default request headers unless necessary ([#120](https://github.com/0x676e67/wreq/issues/120)) - ([1d40d7e](https://github.com/0x676e67/wreq/commit/1d40d7e576eb796ce9d74815ab9937ca1cb17640)) - Reduce `unsafe` scope for improved safety and readability ([#115](https://github.com/0x676e67/wreq/issues/115)) - ([79e6cb8](https://github.com/0x676e67/wreq/commit/79e6cb8b055d71b35d630ef11908b3fb8707e2e7)) - Delete unnecessary clone ([#98](https://github.com/0x676e67/wreq/issues/98)) - ([c5c6004](https://github.com/0x676e67/wreq/commit/c5c6004785c1c14721c6643af67fcdc728757f68)) - Integrate tls/http2 unified configuration module ([#77](https://github.com/0x676e67/wreq/issues/77)) - ([cef5650](https://github.com/0x676e67/wreq/commit/cef5650fa3fe208a97fddf8fd27715893770a020)) - Normalize DNS module exports ([#64](https://github.com/0x676e67/wreq/issues/64)) - ([b0a1ba6](https://github.com/0x676e67/wreq/commit/b0a1ba6f6de1964c31145a3a23ec8175cf195925)) - Refactor custom root CA certificate loading source ([#38](https://github.com/0x676e67/wreq/issues/38)) - ([cfd3603](https://github.com/0x676e67/wreq/commit/cfd36030927c617c38d0bfd0fd6e09c4112d4a45)) - Rename the `client` module to `http` - ([5568b31](https://github.com/0x676e67/wreq/commit/5568b31cb3df741bb1f8f507f2b7858b00395263)) - Enabled `accept-encoding` will be determined by the `feature` ([#95](https://github.com/0x676e67/wreq/issues/95)) - ([85de77b](https://github.com/0x676e67/wreq/commit/85de77b1eca6272dfba13d61f8392563b561c835)) - Enabling `accept-encoding` will be determined by the feature - ([4bf9465](https://github.com/0x676e67/wreq/commit/4bf94652db2b776a0df366d9f2e3c8d44daf7c52)) - Blocking feature doesn't need multi-threaded tokio runtime ([#90](https://github.com/0x676e67/wreq/issues/90)) - ([7ab0c67](https://github.com/0x676e67/wreq/commit/7ab0c678d7ffc6f23b4b039db702e380492f4df8)) - Change Debug of Error to output url as str ([#88](https://github.com/0x676e67/wreq/issues/88)) - ([b9b684b](https://github.com/0x676e67/wreq/commit/b9b684b2212878ef84a5c18da3f5122bcd74ecab)) - Remove unused crates - ([9fb269e](https://github.com/0x676e67/wreq/commit/9fb269e5f38a0e200000db2ac0a3786d859575f2)) - Remove unused crates ([#54](https://github.com/0x676e67/wreq/issues/54)) - ([c0c273d](https://github.com/0x676e67/wreq/commit/c0c273d4e648a0441ab9efee63927ff263e9f27a)) - Migrate trust-dns to hickory-dns - ([ae7d775](https://github.com/0x676e67/wreq/commit/ae7d7753f005120182e9a00486beb7f196b8c5fd)) - Migrate trust-dns to hickory-dns - ([712600a](https://github.com/0x676e67/wreq/commit/712600a2e11cf21e850183391d1e77caedc297bd)) - Disable ssl verify - ([5680bb0](https://github.com/0x676e67/wreq/commit/5680bb0a290d6556ba2f358293dca31824c68af8)) ### Documentation - Improve `TLS`/`HTTP2` custom configuration documentation ([#67](https://github.com/0x676e67/wreq/issues/67)) - ([8a72439](https://github.com/0x676e67/wreq/commit/8a72439a3c9aa2c8c06492d8928330bac518d6e3)) - Update docs ([#54](https://github.com/0x676e67/wreq/issues/54)) - ([a010145](https://github.com/0x676e67/wreq/commit/a01014519b499621fec2fb03a7e9d3c333c1855d)) - Update docs ([#82](https://github.com/0x676e67/wreq/issues/82)) - ([41816f8](https://github.com/0x676e67/wreq/commit/41816f8b26e42be0166c8df9cb6492c71be77056)) - Fix docs build ([#81](https://github.com/0x676e67/wreq/issues/81)) - ([2045cea](https://github.com/0x676e67/wreq/commit/2045cea5e05abcfeb7c91d94a1e0497eb22bfe19)) - Add cfg notes about http3 builder methods ([#2070](https://github.com/0x676e67/wreq/issues/2070)) - ([c65dd7f](https://github.com/0x676e67/wreq/commit/c65dd7f783d8aae8ee47e751353d1befeb9dea20)) - Remove redundant link targets ([#2019](https://github.com/0x676e67/wreq/issues/2019)) - ([50dbaf3](https://github.com/0x676e67/wreq/commit/50dbaf391087cfa951accc765126b4f5d017d8a3)) - Fix building on docs.rs ([#1789](https://github.com/0x676e67/wreq/issues/1789)) - ([7fdd014](https://github.com/0x676e67/wreq/commit/7fdd014d46d9bf07555a2321166f3029e9a25ac8)) - Fix wording on main docs page ([#1765](https://github.com/0x676e67/wreq/issues/1765)) - ([673449a](https://github.com/0x676e67/wreq/commit/673449aa823394d224815b8cc168e059e4c4ebe1)) - Fix some typos ([#1562](https://github.com/0x676e67/wreq/issues/1562)) - ([81fc85a](https://github.com/0x676e67/wreq/commit/81fc85a68949bd0ff73cfd9f292393b5c5ed42ed)) - Fix broken doc comment example. ([#1584](https://github.com/0x676e67/wreq/issues/1584)) - ([e9ba0a9](https://github.com/0x676e67/wreq/commit/e9ba0a9dc79f63c3655f334df23b50b9a841e326)) - Fix some typos ([#1531](https://github.com/0x676e67/wreq/issues/1531)) - ([6ca5f3e](https://github.com/0x676e67/wreq/commit/6ca5f3e50c979909b786a4f1e2c73611164254c7)) - Provide basic auth example ([#1362](https://github.com/0x676e67/wreq/issues/1362)) - ([be8ab7b](https://github.com/0x676e67/wreq/commit/be8ab7b951610cbc85764198943ab053e8608454)) - Fix some typos ([#1346](https://github.com/0x676e67/wreq/issues/1346)) - ([597833d](https://github.com/0x676e67/wreq/commit/597833d906f2453a6976e6ed6ed71af91c534382)) - Adds amplifying note about private key formats ([#1335](https://github.com/0x676e67/wreq/issues/1335)) - ([eb9e343](https://github.com/0x676e67/wreq/commit/eb9e343142b7fe7392408141dab7145cb4a30ba2)) - Build wasm32-unknown-unknown docs ([#998](https://github.com/0x676e67/wreq/issues/998)) - ([cff487f](https://github.com/0x676e67/wreq/commit/cff487ff58630cf0ac59f3e46cbf20cf50a28b3f)) - Make encoding_rs link clickable ([#674](https://github.com/0x676e67/wreq/issues/674)) - ([a9dd94a](https://github.com/0x676e67/wreq/commit/a9dd94a99fdb30a77992ea0afa552f266efbd8a3)) ### Styling - *(connect)* Replace all non-refutable if let patterns with let statements ([#44](https://github.com/0x676e67/wreq/issues/44)) - ([ec598d8](https://github.com/0x676e67/wreq/commit/ec598d8b9262680b570ac15fff1623a0e050edb8)) - *(impersonate)* Remove dead code ([#51](https://github.com/0x676e67/wreq/issues/51)) - ([61c6055](https://github.com/0x676e67/wreq/commit/61c605531881215c8ab95f8eda557969c7d6d6fb)) - *(tls)* Remove unused closure - ([a39ba21](https://github.com/0x676e67/wreq/commit/a39ba2198e5a7144b60567f9cb815c1fc7d85d2e)) ### Testing - Fix test_badssl_no_built_in_roots - ([427ff74](https://github.com/0x676e67/wreq/commit/427ff74adf2266413413b2ab4da6c5669efadf33)) - Add more badssl tests for rustls - ([8027a28](https://github.com/0x676e67/wreq/commit/8027a2894af496ce25c7f2a035e265cc8bf9bf59)) - Response::text() - ([33c7ce4](https://github.com/0x676e67/wreq/commit/33c7ce4ce2f65587ea60c011151a5605887e97f3)) - Add tests for setting default headers - ([2bd558d](https://github.com/0x676e67/wreq/commit/2bd558d8c74a03622dbb02d194440aa13c0a9048)) - Use verbose output - ([f5b4dd4](https://github.com/0x676e67/wreq/commit/f5b4dd4123f4f2098895be3833e81cdf9b5a8460)) - Fixed up issue with reading a Body and finished RequestBuilder tests - ([59ba7cf](https://github.com/0x676e67/wreq/commit/59ba7cf23b48c94c7223cf0f2047e9e7b1e0a275)) - Added some trivial tests for the RequestBuilder - ([980488f](https://github.com/0x676e67/wreq/commit/980488f918a70f24a859f3776f4b4dd947c3758e)) ### Miscellaneous Tasks - *(client)* Client `set_redirect_policy` rename to `set_redirect` ([#149](https://github.com/0x676e67/wreq/issues/149)) - ([0ed4a76](https://github.com/0x676e67/wreq/commit/0ed4a76067b87568a33a110be6d742b946875ede)) - *(client)* Accept request header is appended by default ([#125](https://github.com/0x676e67/wreq/issues/125)) - ([06ccdc7](https://github.com/0x676e67/wreq/commit/06ccdc70c685ef5a8817fcbef177566ec7be50b4)) - *(client)* Impersonate does not clone request headers unless necessary - ([2043388](https://github.com/0x676e67/wreq/commit/204338837c20ac0bffd585b4f7238b5b58650254)) - *(docs)* Fix missing link for 'blocking' - ([4574019](https://github.com/0x676e67/wreq/commit/457401904596260c712c0b9f4f27e6d47b4a2141)) - *(request)* Avoid panic when adding host header - ([80e4871](https://github.com/0x676e67/wreq/commit/80e48718e634dd6696688d415e858c46acffbc81)) - *(request)* Delete WASM legacy API ([#141](https://github.com/0x676e67/wreq/issues/141)) - ([ddcd980](https://github.com/0x676e67/wreq/commit/ddcd9806d49dbcf47e55389bf5dc97871d566377)) - *(tls)* Rename `http_version_pref` to `alpn_protos` ([#131](https://github.com/0x676e67/wreq/issues/131)) - ([4b7edba](https://github.com/0x676e67/wreq/commit/4b7edba4a792504382567d18451074a249b0a2bc)) - *(tls)* Export extension as public API - ([05a6a6f](https://github.com/0x676e67/wreq/commit/05a6a6fec7390736d71d818c1b8aa20f96d3e95f)) - *(tls)* Remove redundant settings ([#109](https://github.com/0x676e67/wreq/issues/109)) - ([ecda80c](https://github.com/0x676e67/wreq/commit/ecda80cf576de73e854ed7e5efca3843fdb6d062)) - Move `ImpersonateSettings` to implement location - ([99ea68b](https://github.com/0x676e67/wreq/commit/99ea68b161ed7ac8e3b384464cb270034b831bce)) - Simplify root certificate load ([#169](https://github.com/0x676e67/wreq/issues/169)) - ([68e9f26](https://github.com/0x676e67/wreq/commit/68e9f26a946c781bd1c06fd67dbfb3c13894350d)) - Simplify root certificate load - ([566f2fb](https://github.com/0x676e67/wreq/commit/566f2fb7a4a0e5cb7d1899db5257e509d5d9f142)) - To avoid ambiguity, `ca_cert_store` is renamed to `root_certs_store` ([#162](https://github.com/0x676e67/wreq/issues/162)) - ([b76ef15](https://github.com/0x676e67/wreq/commit/b76ef15e2fdc206cd949fd44e7a147ee52e91ac3)) - Update macro export scope - ([3115132](https://github.com/0x676e67/wreq/commit/3115132eee19a7e303adaadce87c8740a222f167)) - Update impersonate template - ([82d7b93](https://github.com/0x676e67/wreq/commit/82d7b9331ddc24d546115a54ac594f84dc49f137)) - Macro static creation of impersonate template ([#156](https://github.com/0x676e67/wreq/issues/156)) - ([7383d66](https://github.com/0x676e67/wreq/commit/7383d6630a20dd104825bdb6a9fed80482ee3450)) - Do not pre-append `content-length` in non-header sorting state ([#152](https://github.com/0x676e67/wreq/issues/152)) - ([075f973](https://github.com/0x676e67/wreq/commit/075f97304ffb8f3889dee5a22c4220818afecbb4)) - Simplify the impersonate template - ([92f52d1](https://github.com/0x676e67/wreq/commit/92f52d1e596d69f6b8690704ab74ac2def7740b3)) - Fix typo - ([650256c](https://github.com/0x676e67/wreq/commit/650256c42aa6cf9582e83e8d750bb1b50ca5d134)) - Introduce macro for conditional header initialization ([#127](https://github.com/0x676e67/wreq/issues/127)) - ([b8a2e48](https://github.com/0x676e67/wreq/commit/b8a2e488796c509901f90f32c4549c78c3bcdc49)) - Refactor struct fields to use Cow<'static, T> for better efficiency ([#124](https://github.com/0x676e67/wreq/issues/124)) - ([8b79c5b](https://github.com/0x676e67/wreq/commit/8b79c5b4182e6e4e861b37b6db76f3a9c4a4a81b)) - Cache template request headers ([#121](https://github.com/0x676e67/wreq/issues/121)) - ([3b65d8f](https://github.com/0x676e67/wreq/commit/3b65d8faca44fc6d241e59140db92238c6eef49b)) - Update - ([7d1bbbc](https://github.com/0x676e67/wreq/commit/7d1bbbc8c97247be5d43957ed68438465f311388)) - Simplify impersonate template - ([871a7af](https://github.com/0x676e67/wreq/commit/871a7af7074b7dbe1bfffa93445d98da3a3fc08e)) - Simplify pre-configured TLS settings - ([2ca512e](https://github.com/0x676e67/wreq/commit/2ca512ee0b793ffcce22927c2e3fbb91e36ec05a)) - Remove tunnel proxy user agent setting ([#116](https://github.com/0x676e67/wreq/issues/116)) - ([04fa9fa](https://github.com/0x676e67/wreq/commit/04fa9fafb5b6bc6401fe738109e58f7e0473fc11)) - Reuse redirect policies whenever possible - ([49bb717](https://github.com/0x676e67/wreq/commit/49bb7174a2b84d88855805d1dcea5966e6133cdb)) - Inline some hot code - ([a07cf10](https://github.com/0x676e67/wreq/commit/a07cf105fb84a97264d4af71fd7f5962790b6f48)) - Use custom connector builder - ([6c51bd1](https://github.com/0x676e67/wreq/commit/6c51bd1d4b8592181a2fa59164d054b96fbe41d6)) - Disable dynamic distribution loading of connector builder ([#113](https://github.com/0x676e67/wreq/issues/113)) - ([6814489](https://github.com/0x676e67/wreq/commit/6814489773f67c84cc83f316e98ab6da38913b5b)) - Disable dynamic distribution loading of certificates ([#112](https://github.com/0x676e67/wreq/issues/112)) - ([75095ba](https://github.com/0x676e67/wreq/commit/75095ba8d3085bfd52bb92e581ec76ec7b923bb2)) - Undo the dynamic distribution configuration headers ([#111](https://github.com/0x676e67/wreq/issues/111)) - ([a7c9376](https://github.com/0x676e67/wreq/commit/a7c937603966bae1b811d3cb9b67f3958279e579)) - Cargo clippy --fix ([#106](https://github.com/0x676e67/wreq/issues/106)) - ([065f294](https://github.com/0x676e67/wreq/commit/065f294a1b67ac9bb979966f955500e4f93a4098)) - Remove unnecessary tls feature - ([7f70c48](https://github.com/0x676e67/wreq/commit/7f70c48f63d27409b509dc620b4451e061548ef2)) - 1.80 as MSRV ([#74](https://github.com/0x676e67/wreq/issues/74)) - ([9814951](https://github.com/0x676e67/wreq/commit/98149512c90cc51d51d14cf3e0cfe8d26899b49d)) - 1.70 as MSRV - ([34bc71d](https://github.com/0x676e67/wreq/commit/34bc71d13ccab181869ae377ff0d3c8ae0779f64)) - 1.70 as MSRV ([#53](https://github.com/0x676e67/wreq/issues/53)) - ([29adc92](https://github.com/0x676e67/wreq/commit/29adc923bd197f8d92cf03d964d689c7b01e27de)) - A few simple cleanups/lints ([#1849](https://github.com/0x676e67/wreq/issues/1849)) - ([280af15](https://github.com/0x676e67/wreq/commit/280af156459845a6b4535aa9045979861b67c310)) - Update changelog for 0.11.15 - ([bf7ff55](https://github.com/0x676e67/wreq/commit/bf7ff556494bc5e35164c325faad49e1cdd3c8e9)) - Fix appveyor build for backtrace-sys dependency ([#526](https://github.com/0x676e67/wreq/issues/526)) - ([2a64140](https://github.com/0x676e67/wreq/commit/2a64140de82d93ca2b3a804c07f16e7a5bf66fa1)) - Update gitignore - ([3bc907f](https://github.com/0x676e67/wreq/commit/3bc907f7deaeff0a9f9e02c7c3f9e4c4495aeafe)) ### Revert - *(client)* Remove use of unused TLS Server Name Indication - ([a935f99](https://github.com/0x676e67/wreq/commit/a935f992194542b3dd4b6204963eeb3b53d5f8d0)) - *(impersonate)* Revert Edge122 configure new curves ([#66](https://github.com/0x676e67/wreq/issues/66)) - ([ba5cd48](https://github.com/0x676e67/wreq/commit/ba5cd48a3982b370924c06c82bf26e93191a146b)) - *(impersonate)* Remove chrome99 impersonate ([#38](https://github.com/0x676e67/wreq/issues/38)) - ([8f9ebdd](https://github.com/0x676e67/wreq/commit/8f9ebdd608ac4f8a21bcc59fce6c8710dd03d757)) - *(tls)* Revert tls_built_in_root_certs option ([#105](https://github.com/0x676e67/wreq/issues/105)) - ([d0cda0b](https://github.com/0x676e67/wreq/commit/d0cda0be402797c265e209a7b9fee55db89a2faa)) - Remove `proxies_maybe_http_auth` state - ([52791a6](https://github.com/0x676e67/wreq/commit/52791a69dba7d61620257c0736c809683e1b3626)) ### Body - Don't call poll_ready on tx when 0 bytes remaining. ([#479](https://github.com/0x676e67/wreq/issues/479)) - ([d62f8c2](https://github.com/0x676e67/wreq/commit/d62f8c2bbd39d6cf5562c2f3c0aad32bad81d331)) ### CI - Enable dependabot for GitHub Action Workflow ([#1831](https://github.com/0x676e67/wreq/issues/1831)) - ([eca2a2f](https://github.com/0x676e67/wreq/commit/eca2a2f23f97409e6828e171b13d0eb3bc34465c)) - Make a single final job that depends on all others ([#1291](https://github.com/0x676e67/wreq/issues/1291)) - ([b9cf2db](https://github.com/0x676e67/wreq/commit/b9cf2db69756cde5e3091cc6a06cff1deb2e3764)) - Check documentation ([#1246](https://github.com/0x676e67/wreq/issues/1246)) - ([9293cd2](https://github.com/0x676e67/wreq/commit/9293cd206143d48bb68033b7de835ca2c6cdeea3)) ### Doc - `stream` feature is needed for `wrap_stream` and `From` for `Body` ([#1456](https://github.com/0x676e67/wreq/issues/1456)) - ([9339c54](https://github.com/0x676e67/wreq/commit/9339c543235ca09664e388284811746020350b4b)) ### Error - Add functions to check more error types. ([#945](https://github.com/0x676e67/wreq/issues/945)) - ([668e89b](https://github.com/0x676e67/wreq/commit/668e89b78ae1e7a0e88fb7f99649b7c907d2f0da)) ### Examples - Allow passing URL via CLI - ([7388b67](https://github.com/0x676e67/wreq/commit/7388b676df8431b63edc337ce8dc3032953fe07e)) ### Feature - Auto detect MacOS proxy settings ([#1955](https://github.com/0x676e67/wreq/issues/1955)) - ([70d100c](https://github.com/0x676e67/wreq/commit/70d100c1b81dc8856e7cfb7b31b682c2028ca877)) ### From for Response ([#360](https://github.com/0x676e67/wreq/issues/360)) - ([4857a59](https://github.com/0x676e67/wreq/commit/4857a5917dd5445a3f5ed04edcff01b95eda7823)) ### Impersonate - Bugfix `chrome_123`, `chrome_124` headers - ([429bb1d](https://github.com/0x676e67/wreq/commit/429bb1d763d5a4c37a0104efe7c03ecdc6434071)) ### Lint - Fix unused `Identity` if only using `default-tls` ([#1164](https://github.com/0x676e67/wreq/issues/1164)) - ([287a6d1](https://github.com/0x676e67/wreq/commit/287a6d18528418381dbb28e7bd6728b1ac24b5d3)) ### Response.copy_to - Fix docs markup - ([4aa34bb](https://github.com/0x676e67/wreq/commit/4aa34bb5916a70e8216e5198cea278d42967d74b)) ### WASM - Add `try_clone` implementations to `Request` and `RequestBuilder` ([#1286](https://github.com/0x676e67/wreq/issues/1286)) - ([c4388fc](https://github.com/0x676e67/wreq/commit/c4388fcff9401d23169c6731901457e89039bf53)) - Set RequestCredentials to None by default ([#1249](https://github.com/0x676e67/wreq/issues/1249)) - ([42b3160](https://github.com/0x676e67/wreq/commit/42b31600c30609cb8df90c799fbfbd0c305e422d)) ### [#1095] - Implement `basic_auth` for WASM - ([28840af](https://github.com/0x676e67/wreq/commit/28840afd46fe3b81b7c77dde4537ad702826c7f7)) ### Actions - Remove --all flag from rustfmt ([#795](https://github.com/0x676e67/wreq/issues/795)) - ([b3d5f78](https://github.com/0x676e67/wreq/commit/b3d5f78b8f3ddd36a4fc6568e8a091f947dd0ff5)) ### Async - Add conversions from static slices to Body - ([87f03e1](https://github.com/0x676e67/wreq/commit/87f03e167c0deba25f1ca40376a5b69d598cb88f)) ### Async/client - Return a impl Future on execute() - ([4fba983](https://github.com/0x676e67/wreq/commit/4fba983e5e6722a457a10988e20e5277faf01e4c)) ### Async/reponse - Return a impl Future on json() - ([5e38b41](https://github.com/0x676e67/wreq/commit/5e38b419f00d6526e67078b8dd52054859a5ede5)) ### Async/request - Add methods to split and reassemble a RequestBuilder ([#1770](https://github.com/0x676e67/wreq/issues/1770)) - ([119366e](https://github.com/0x676e67/wreq/commit/119366e95720aa1b35e5bf79cd91255d6050e360)) - Add a basic example for send() - ([0c84e6b](https://github.com/0x676e67/wreq/commit/0c84e6b9e9a7f48edc3b591bf7e28caa4f246ecd)) - Return a impl Future on send() - ([8b62f47](https://github.com/0x676e67/wreq/commit/8b62f47ac3f5de43fbbe0445d0958eb8710f9174)) ### Blocking - Add tcp_keepalive option ([#1100](https://github.com/0x676e67/wreq/issues/1100)) - ([a2133ae](https://github.com/0x676e67/wreq/commit/a2133aec3b313bb370c0cf88173de33ce7cba465)) - Opt-out CPUs auto-detection in debug mode ([#807](https://github.com/0x676e67/wreq/issues/807)) - ([7622c75](https://github.com/0x676e67/wreq/commit/7622c750648fe5453e83f7fa57e73732eb699638)) ### Boring - Upgrade latest version - ([ec7f212](https://github.com/0x676e67/wreq/commit/ec7f212a554044c0a407e779f1db7343e6be392a)) ### Boringssl - Add SSL_set_permute_extensions - ([29538bc](https://github.com/0x676e67/wreq/commit/29538bc02e88866e5b8016539bbce1e41b4c6883)) ### Bug - Fix custom content-type overidden by json method ([#1833](https://github.com/0x676e67/wreq/issues/1833)) - ([b13ca4b](https://github.com/0x676e67/wreq/commit/b13ca4b3399b42e7bbdafc374a129ea09bf33b17)) - Fix custom content-type overidden by json method - ([2364364](https://github.com/0x676e67/wreq/commit/23643640ac72e26061314b15c1f6372df4117413)) ### Build - *(deps)* Bump actions/checkout from 3 to 4 ([#35](https://github.com/0x676e67/wreq/issues/35)) - ([07e700d](https://github.com/0x676e67/wreq/commit/07e700d41482eeb7b3e571608439241b43f96bec)) - *(deps)* Bump softprops/action-gh-release from 1 to 2 ([#36](https://github.com/0x676e67/wreq/issues/36)) - ([ff76de9](https://github.com/0x676e67/wreq/commit/ff76de993a07df45b4b8be690ce725fc2e344e89)) - Fix `android`/`fuchsia`/`linux` --no-default-features build ([#110](https://github.com/0x676e67/wreq/issues/110)) - ([40e2b8a](https://github.com/0x676e67/wreq/commit/40e2b8a10748b3b32ea9076c4ca69d14d9596324)) - Fix `--no-default-features` build - ([0d0fef0](https://github.com/0x676e67/wreq/commit/0d0fef05250bdfc915671e9cf86cd229621964be)) ### Cargo - Update to rustls 0.16 - ([3033f11](https://github.com/0x676e67/wreq/commit/3033f11639c2ef0eab86286083b40586079d2662)) ### Client - Add convenience method for DELETE - ([a3983f3](https://github.com/0x676e67/wreq/commit/a3983f3122b2d1495ea36bb5a8fd019a7605ae56)) ### Dep - Upgrade trust-dns-resolver from v0.22 to v0.23 ([#1965](https://github.com/0x676e67/wreq/issues/1965)) - ([0292486](https://github.com/0x676e67/wreq/commit/0292486abab25914c046b71ab6d6da24206614d3)) ### Dependencies - Upgrade base64 to latest version ([#692](https://github.com/0x676e67/wreq/issues/692)) - ([3090a68](https://github.com/0x676e67/wreq/commit/3090a68d5383c572deba077d37d44e1c0424ac11)) ### Deps - *(async-tungstenite)* Downgrade `async-tungstenite` to `0.27.0` ([#161](https://github.com/0x676e67/wreq/issues/161)) - ([f26f8c4](https://github.com/0x676e67/wreq/commit/f26f8c4eccde38c91cb0ee9e55825b26429680a4)) - *(async-tungstenite)* 0.28.0 ([#24](https://github.com/0x676e67/wreq/issues/24)) - ([a924df3](https://github.com/0x676e67/wreq/commit/a924df32110b68ec020e04d20a21f3c032bd087a)) - *(base64)* Bump version to v0.22.x ([#46](https://github.com/0x676e67/wreq/issues/46)) - ([65e5b6d](https://github.com/0x676e67/wreq/commit/65e5b6d775c6cf252a96b06febd82317067057e1)) - *(boring)* V4.x ([#76](https://github.com/0x676e67/wreq/issues/76)) - ([8eb0bf4](https://github.com/0x676e67/wreq/commit/8eb0bf45f9a7333f79d882dca935cbbc3c52e8dc)) - *(boring-sys)* Bump version to v2.0.6 - ([1f4fcc6](https://github.com/0x676e67/wreq/commit/1f4fcc6dd7fe4a35616f7c7f6a9480c1a9411a9f)) - *(boring-sys)* Bump version to v2.0.5 - ([e62c99d](https://github.com/0x676e67/wreq/commit/e62c99df8b33174d7b2616406786b341cc7e8add)) - *(boring-sys)* Bump version to v2.0.4 - ([fa9b28c](https://github.com/0x676e67/wreq/commit/fa9b28c1679c02f0cfcffadb7ace9bdb753a623f)) - *(boring-sys)* Bump version to v2.0.3 - ([1a79070](https://github.com/0x676e67/wreq/commit/1a7907054e33cda15bd89cccf49aa06938525f98)) - *(boring/hyper/h2)* Migration patch crate name ([#109](https://github.com/0x676e67/wreq/issues/109)) - ([676d7b3](https://github.com/0x676e67/wreq/commit/676d7b3038cc12499b5dac4befaf5c1448ca6684)) - *(brotli)* 7.0.0 ([#22](https://github.com/0x676e67/wreq/issues/22)) - ([94e2fdd](https://github.com/0x676e67/wreq/commit/94e2fdd605f969a185bc104d62e8e3e7b6f44b78)) - *(chore)* Update to the latest rustls ([#969](https://github.com/0x676e67/wreq/issues/969)) - ([1a2c102](https://github.com/0x676e67/wreq/commit/1a2c10256a924ff8753f683c4200b8b4d05a2cdd)) - *(cookie_store)* Bump version to v0.21.x ([#47](https://github.com/0x676e67/wreq/issues/47)) - ([fbf0bdc](https://github.com/0x676e67/wreq/commit/fbf0bdcee4b9a58d565b1083fb7c61fc29ef64c7)) - *(h2)* Use h2 dependencies export by hyper ([#63](https://github.com/0x676e67/wreq/issues/63)) - ([6effc9d](https://github.com/0x676e67/wreq/commit/6effc9d2445fdeefb63d271441b65b163a6f4ee1)) - *(hyper)* Bump version to v0.14.60 ([#74](https://github.com/0x676e67/wreq/issues/74)) - ([6842220](https://github.com/0x676e67/wreq/commit/6842220dc1bf28eeee2834b3952c48a8a2bbc1d8)) - *(hyper)* Bump version to v0.14.50 ([#45](https://github.com/0x676e67/wreq/issues/45)) - ([c0cbf29](https://github.com/0x676e67/wreq/commit/c0cbf294ec1c86d63b13b8592b3ef32e121dc1e6)) - *(hyper)* Bump version to v0.14.33 - ([b7fa5f3](https://github.com/0x676e67/wreq/commit/b7fa5f344b0b8b9957b197df7ad79309e3acc593)) - *(hyper)* Bump version to v0.14.28 - ([bdcbe40](https://github.com/0x676e67/wreq/commit/bdcbe40a74357630cf96398af1994d950acb2bc6)) - *(hyper_imp)* Bump version to v0.14.30 - ([4ba5b00](https://github.com/0x676e67/wreq/commit/4ba5b0059956761b6774f55e181a05b806425b26)) - *(ipnet)* 2.10.0 ([#15](https://github.com/0x676e67/wreq/issues/15)) - ([f708a86](https://github.com/0x676e67/wreq/commit/f708a86a4ece4598a1788750a5c6a3a3fa6ab1e5)) - *(ipnet)* V2.9.0 ([#56](https://github.com/0x676e67/wreq/issues/56)) - ([b14d428](https://github.com/0x676e67/wreq/commit/b14d4284028b0ee551716d2000a6a305c3d59a95)) - *(mime)* V0.3.17 ([#57](https://github.com/0x676e67/wreq/issues/57)) - ([1f76f27](https://github.com/0x676e67/wreq/commit/1f76f2788d8779a7e29baca4acf4b3a124b1b25d)) - *(percent-encoding)* V2.3 ([#75](https://github.com/0x676e67/wreq/issues/75)) - ([31ce45c](https://github.com/0x676e67/wreq/commit/31ce45cfb7691ff0e0684a92eef78dad6feda652)) - *(system-configuration)* V0.6.0 - ([8f68af5](https://github.com/0x676e67/wreq/commit/8f68af567683dc449df4b014bc6d7771f3065727)) - *(tokio-socks)* 0.5.2 ([#23](https://github.com/0x676e67/wreq/issues/23)) - ([d05a3f5](https://github.com/0x676e67/wreq/commit/d05a3f552b2ded4eeaa7f65d8b96f8ec96e570c7)) - *(tungstenite)* Backport dependencies - ([1c9da5b](https://github.com/0x676e67/wreq/commit/1c9da5be63e837284ba49870c160a9e8dcccad59)) - *(url)* V2.5 ([#58](https://github.com/0x676e67/wreq/issues/58)) - ([5d71c95](https://github.com/0x676e67/wreq/commit/5d71c95816ef018fd113280e6626dbd408d0d2d2)) - *(windows-registry)* 0.3.0 ([#25](https://github.com/0x676e67/wreq/issues/25)) - ([cb9cf99](https://github.com/0x676e67/wreq/commit/cb9cf99ed1cc2d7904be6455e178cb6ef8f618ef)) - *(winreg)* V0.52.0 - ([74144c2](https://github.com/0x676e67/wreq/commit/74144c25e220b85b51e4f635a4a25fd7c086fc2f)) - Remove unnecessary libc dependencies ([#53](https://github.com/0x676e67/wreq/issues/53)) - ([6a24c13](https://github.com/0x676e67/wreq/commit/6a24c13ab7ee0d1e448654993daa9ddb36e4c87a)) - Update winrege 0.10 -> 0.50 ([#1869](https://github.com/0x676e67/wreq/issues/1869)) - ([e02df1f](https://github.com/0x676e67/wreq/commit/e02df1f448d845fe01e6ea82c76ec89a59e5d568)) - Update rustls v0.20.1 -> v0.21.0 ([#1791](https://github.com/0x676e67/wreq/issues/1791)) - ([a0b5ea5](https://github.com/0x676e67/wreq/commit/a0b5ea5d7179778ce3e02117863b23b452b84d48)) - Update async-compression v0.3.13 => v0.4.0 ([#1828](https://github.com/0x676e67/wreq/issues/1828)) - ([7e7b116](https://github.com/0x676e67/wreq/commit/7e7b116a134cc0d6d646ab316dd83976369d5298)) ### Dpes - *(typed-builder)* V0.20.0 ([#16](https://github.com/0x676e67/wreq/issues/16)) - ([ea70d90](https://github.com/0x676e67/wreq/commit/ea70d902c68bf785c45c255c61ed48276f005e14)) ### Example - Update usage doc for blocking example ([#1112](https://github.com/0x676e67/wreq/issues/1112)) - ([1f425a0](https://github.com/0x676e67/wreq/commit/1f425a0244bcd7b4565dceb9076450d951f2ec03)) ### Fmt - Wasm body ([#1359](https://github.com/0x676e67/wreq/issues/1359)) - ([bd4e0c6](https://github.com/0x676e67/wreq/commit/bd4e0c663c243b584dca114c1d376f67b1967f64)) ### Http3 - Upgrade dependencies ([#2028](https://github.com/0x676e67/wreq/issues/2028)) - ([52190df](https://github.com/0x676e67/wreq/commit/52190df64fb56edbfb9cb7c054662b1cfedad476)) - Enable `runtime-tokio` for `quinn` ([#1846](https://github.com/0x676e67/wreq/issues/1846)) - ([06c8e5b](https://github.com/0x676e67/wreq/commit/06c8e5b0b008afee8114fb979b85cd8b73415391)) - Don't force `webpki` when experiemental `http3` is enabled ([#1845](https://github.com/0x676e67/wreq/issues/1845)) - ([c9f0c28](https://github.com/0x676e67/wreq/commit/c9f0c28e4c6e2b9d09544df832c41deef3847505)) ### Impersonate - Add `chrome_126` - ([808e23a](https://github.com/0x676e67/wreq/commit/808e23a935439ac8a0d41c9aa6ab1661070761d7)) - Chrome_123, chrome_125 - add `zstd` to Accept-Encoding header - ([f17d07e](https://github.com/0x676e67/wreq/commit/f17d07e1d0c3aa8036dcbd785508a43f25bf21cd)) ### Msrv - Bump to 1.63 ([#1947](https://github.com/0x676e67/wreq/issues/1947)) - ([4aa8516](https://github.com/0x676e67/wreq/commit/4aa8516770eb96c66e753621660275e65e269213)) ### Multipart - Force a CRLF at the end of request - ([a525209](https://github.com/0x676e67/wreq/commit/a52520941f518ade756a73797e875722d1ba344b)) ### Native-tls - Add Identiy::from_pkcs8_pem ([#1655](https://github.com/0x676e67/wreq/issues/1655)) - ([231b18f](https://github.com/0x676e67/wreq/commit/231b18f83572836c674404b33cb1ca8b35ca3e36)) ### Proxy - Add support for proxy authentication with user-specified header values ([#2053](https://github.com/0x676e67/wreq/issues/2053)) - ([c09c5e6](https://github.com/0x676e67/wreq/commit/c09c5e6bbcf79b3984cd4c2cf2f2f5d9e2a4a6af)) - Refactor a collapsible_match ([#1214](https://github.com/0x676e67/wreq/issues/1214)) - ([544282a](https://github.com/0x676e67/wreq/commit/544282a0b49d6ba2ac78b844c23415c0bf62a304)) ### Refractor - *(tls/settings)* Generate configuration using builder mode ([#121](https://github.com/0x676e67/wreq/issues/121)) - ([a370f18](https://github.com/0x676e67/wreq/commit/a370f18774eced8c2c62ed2d4d9f9db72639eaba)) ### Remove - *(client)* Remove blocking client support ([#123](https://github.com/0x676e67/wreq/issues/123)) ([#124](https://github.com/0x676e67/wreq/issues/124)) ([#125](https://github.com/0x676e67/wreq/issues/125)) - ([5091f9a](https://github.com/0x676e67/wreq/commit/5091f9ae4f8394ec5e5a6dbf138c598c8d5b2295)) ### Request - Test adding duplicate headers to the request ([#519](https://github.com/0x676e67/wreq/issues/519)) - ([1bdc3fa](https://github.com/0x676e67/wreq/commit/1bdc3fa3c8dd3c4038efc566b7ccdbc86e38cfa3)) ### Tmp - Use upstream git repo for hyper-native-tls - ([d12d604](https://github.com/0x676e67/wreq/commit/d12d604e380b8f1ee8cc9e22fd218ce3d283aa4e)) ### Wasm - Add method `user_agent` to `ClientBuilder`. ([#2018](https://github.com/0x676e67/wreq/issues/2018)) - ([a9b960f](https://github.com/0x676e67/wreq/commit/a9b960fc24455c3c5c7e35b54dbcc6512cc86d2b)) - Blob url support ([#1797](https://github.com/0x676e67/wreq/issues/1797)) - ([2fa69ad](https://github.com/0x676e67/wreq/commit/2fa69ad384ceb9a0f718ceb45b092341a5285dd4)) - Fix premature abort for streaming bodies ([#1782](https://github.com/0x676e67/wreq/issues/1782)) - ([df2b3ba](https://github.com/0x676e67/wreq/commit/df2b3baadc1eade54b1c22415792b778442673a4)) - Fix standalone/multipart body conversion to JsValue ([#1364](https://github.com/0x676e67/wreq/issues/1364)) - ([0ef1a2e](https://github.com/0x676e67/wreq/commit/0ef1a2ea78eaa5aeb280fd1dbbbabb83abc45c30)) - Don't send request body as plain uint8 array ([#1358](https://github.com/0x676e67/wreq/issues/1358)) - ([bb3d102](https://github.com/0x676e67/wreq/commit/bb3d102108493da9adf9081b4d0badbff4a2bd91)) - Add missing `as_bytes` method to `Body` implementation ([#1270](https://github.com/0x676e67/wreq/issues/1270)) - ([d40276c](https://github.com/0x676e67/wreq/commit/d40276c0f081c2cc1ebc8b63ad6075daf0f6dff0)) - Avoid dependency on serde-serialize feature ([#1337](https://github.com/0x676e67/wreq/issues/1337)) - ([cfa301c](https://github.com/0x676e67/wreq/commit/cfa301c7fa0f83330f57b312f4e762a3e47ff2cb)) - Omit request body if it's empty ([#1012](https://github.com/0x676e67/wreq/issues/1012)) - ([d42385e](https://github.com/0x676e67/wreq/commit/d42385e7f2cc364efa5e16a7154e7e0cebdd1b57)) - Impl TryFrom> for Request ([#997](https://github.com/0x676e67/wreq/issues/997)) - ([dd8441f](https://github.com/0x676e67/wreq/commit/dd8441fd23dae6ffb79b4cea2862e5bca0c59743)) - Add error_for_status to wasm response ([#779](https://github.com/0x676e67/wreq/issues/779)) - ([1478313](https://github.com/0x676e67/wreq/commit/147831375613a5e508487b2d85a99104ae1505af)) - Add url function to wasm response ([#777](https://github.com/0x676e67/wreq/issues/777)) - ([fd88e0c](https://github.com/0x676e67/wreq/commit/fd88e0c648e6632f3f92ed119b1a93aefd66ed64)) - Add request body in the form of Bytes ([#696](https://github.com/0x676e67/wreq/issues/696)) - ([f6f81f9](https://github.com/0x676e67/wreq/commit/f6f81f9cc1ab84a007fe4203822de08d72c07f57)) - Add bytes method to wasm response ([#694](https://github.com/0x676e67/wreq/issues/694)) - ([b24b0be](https://github.com/0x676e67/wreq/commit/b24b0be461ed39a96335e40561d07a35f2c3eb36)) - Translate over response headers ([#689](https://github.com/0x676e67/wreq/issues/689)) - ([dd65fc7](https://github.com/0x676e67/wreq/commit/dd65fc7c3ad037e6674e8bac8c46f4bdeca6c4ca)) ## New Contributors ❤️ * @0x676e67 made their first contribution * @dairoot made their first contribution in [#68](https://github.com/0x676e67/wreq/pull/68) * @AliaSabur made their first contribution in [#31](https://github.com/0x676e67/wreq/pull/31) * @deedy5 made their first contribution * @dependabot[bot] made their first contribution * @seanmonstar made their first contribution * @jan-auer made their first contribution * @lorepozo made their first contribution * @abls made their first contribution * @Noah-Kennedy made their first contribution * @tshepang made their first contribution * @bitfl0wer made their first contribution * @FirelightFlagboy made their first contribution * @tnull made their first contribution * @conradludgate made their first contribution * @droe made their first contribution * @NobodyXu made their first contribution * @jefflloyd made their first contribution * @brian030128 made their first contribution * @eric-seppanen made their first contribution * @T-Sujeeban made their first contribution * @cipherbrain made their first contribution * @bouzuya made their first contribution * @VivekPanyam made their first contribution * @paolobarbolini made their first contribution * @ollyswanson made their first contribution * @daxpedda made their first contribution * @attila-lin made their first contribution * @smndtrl made their first contribution * @nyurik made their first contribution * @complexspaces made their first contribution * @cpu made their first contribution * @hulin32 made their first contribution * @skyf0l made their first contribution * @nickelc made their first contribution * @jneem made their first contribution * @kckeiks made their first contribution * @lucab made their first contribution * @j7nw4r made their first contribution * @TurnOfACard made their first contribution * @anhcuky made their first contribution * @lstrojny made their first contribution * @dmeijboom made their first contribution * @4JX made their first contribution * @link2xt made their first contribution * @beeb made their first contribution * @Khoulaiz made their first contribution * @BlackDex made their first contribution * @Austaras made their first contribution * @kianmeng made their first contribution * @Alvenix made their first contribution * @irrelevelephant made their first contribution * @mirecl made their first contribution * @lpraneis made their first contribution * @luqmana made their first contribution * @vidhanio made their first contribution * @futursolo made their first contribution * @neoeinstein made their first contribution * @ctron made their first contribution * @ made their first contribution * @cuishuang made their first contribution * @Mathspy made their first contribution * @eyalsatori made their first contribution * @flavio made their first contribution * @MisileLab made their first contribution * @jqnatividad made their first contribution * @ducaale made their first contribution * @biluohc made their first contribution * @nihaals made their first contribution * @ViddeM made their first contribution * @edmorley made their first contribution * @sugar700 made their first contribution * @kraktus made their first contribution * @TjeuKayim made their first contribution * @ecclarke42 made their first contribution * @nikstur made their first contribution * @vsaase made their first contribution * @BiagioFesta made their first contribution * @niuhuan made their first contribution * @nwolber made their first contribution * @fredr made their first contribution * @jeschkies made their first contribution * @pfernie made their first contribution * @crapStone made their first contribution * @6543 made their first contribution * @striezel made their first contribution * @victoryaskevich made their first contribution * @abatkin made their first contribution * @skystar-p made their first contribution * @silvioprog made their first contribution * @jmgilman made their first contribution * @Dr-Emann made their first contribution * @jplatte made their first contribution * @blyxxyz made their first contribution * @dlesl made their first contribution * @Saruniks made their first contribution * @campbellC made their first contribution * @kjvalencik made their first contribution * @mlodato517 made their first contribution * @bensadiku made their first contribution * @marcoieni made their first contribution * @ctjhoa made their first contribution * @jonhoo made their first contribution * @Septias made their first contribution * @kotborealis made their first contribution * @bishtpawan made their first contribution * @Gottox made their first contribution * @CfirTsabari made their first contribution * @ibraheemdev made their first contribution * @svenstaro made their first contribution * @kornelski made their first contribution * @meldron made their first contribution * @webern made their first contribution * @rakshith-ravi made their first contribution * @Marwes made their first contribution * @glyphpoch made their first contribution * @markhildreth made their first contribution * @wchargin made their first contribution * @amousset made their first contribution * @baoyachi made their first contribution * @messense made their first contribution * @ranile made their first contribution * @varoonp123 made their first contribution * @Martichou made their first contribution * @frewsxcv made their first contribution * @zicklag made their first contribution * @thomastaylor312 made their first contribution * @fiag made their first contribution * @est31 made their first contribution * @stevelr made their first contribution * @taiki-e made their first contribution * @federico-terzi made their first contribution * @XyLyXyRR made their first contribution * @pluehne made their first contribution * @sdroege made their first contribution * @Snarpix made their first contribution * @fabricedesre made their first contribution * @shuoli84 made their first contribution * @JOE1994 made their first contribution * @Jasonoro made their first contribution * @zacps made their first contribution * @fuyumatsuri made their first contribution * @707090 made their first contribution * @snejugal made their first contribution * @TaKO8Ki made their first contribution * @vorner made their first contribution * @alex made their first contribution * @LionsAd made their first contribution * @davidpdrsn made their first contribution * @alianse777 made their first contribution * @tasn made their first contribution * @jsha made their first contribution * @bryanburgers made their first contribution * @dcuenot made their first contribution * @slonopotamus made their first contribution * @hecrj made their first contribution * @x1957 made their first contribution * @cuviper made their first contribution * @x448 made their first contribution * @Luro02 made their first contribution * @eugene-babichenko made their first contribution * @kentfredric made their first contribution * @Diggsey made their first contribution * @nicklan made their first contribution * @tesuji made their first contribution * @metajack made their first contribution * @manyuanrong made their first contribution * @WindSoilder made their first contribution * @r-arias made their first contribution * @rhysd made their first contribution * @kodieg made their first contribution * @rodoufu made their first contribution * @Lucretiel made their first contribution * @mbrobbel made their first contribution * @tobdub made their first contribution * @jgall made their first contribution * @cbourjau made their first contribution * @gathuku made their first contribution * @vorot93 made their first contribution * @khuey made their first contribution * @SOF3 made their first contribution * @benesch made their first contribution * @danieleades made their first contribution * @basdebue made their first contribution * @vigneshsarma made their first contribution * @travier-anssi made their first contribution * @ancwrd1 made their first contribution * @nirasan made their first contribution * @prfss made their first contribution * @repi made their first contribution * @mathstuf made their first contribution * @GuillaumeGomez made their first contribution * @bluejekyll made their first contribution * @Liby99 made their first contribution * @quininer made their first contribution * @aaneto made their first contribution * @chenl made their first contribution * @jeromegn made their first contribution * @theduke made their first contribution * @arnodb made their first contribution * @CJP10 made their first contribution * @fbenkstein made their first contribution * @ismith made their first contribution * @antoinecarton made their first contribution * @mavax made their first contribution * @gbonnema made their first contribution * @emschwartz made their first contribution * @puffybsd made their first contribution * @sudo-ben made their first contribution * @shouya made their first contribution * @martin-t made their first contribution * @kevinwilson541 made their first contribution * @polyfloyd made their first contribution * @Eijebong made their first contribution * @illicitonion made their first contribution * @dbrgn made their first contribution * @davidwilemski made their first contribution * @KNnut made their first contribution * @MarkDDR made their first contribution * @yageek made their first contribution * @JoshMcguigan made their first contribution * @frol made their first contribution * @spk made their first contribution * @rukai made their first contribution * @jcaesar made their first contribution * @andy128k made their first contribution * @bhansconnect made their first contribution * @scottschroeder made their first contribution * @DoumanAsh made their first contribution * @kennytm made their first contribution * @cakey made their first contribution * @mattias-p made their first contribution * @Siilwyn made their first contribution * @Sh4pe made their first contribution * @Dylan-DPC made their first contribution * @csirkeee made their first contribution * @is made their first contribution * @oli-obk made their first contribution * @sbstp made their first contribution * @shepmaster made their first contribution * @tafia made their first contribution * @knight42 made their first contribution * @Henning-K made their first contribution * @osa1 made their first contribution * @marmistrz made their first contribution * @kamalmarhubi made their first contribution * @chrisvittal made their first contribution * @e00E made their first contribution * @KodrAus made their first contribution * @Roguelazer made their first contribution * @bhendo made their first contribution * @tomprince made their first contribution * @AndyGauge made their first contribution * @jaemk made their first contribution * @budziq made their first contribution * @steverob made their first contribution * @rap2hpoutre made their first contribution * @TedDriggs made their first contribution * @imp made their first contribution * @gsquire made their first contribution * @rylio made their first contribution * @emk made their first contribution * @Keruspe made their first contribution * @quodlibetor made their first contribution * @sfackler made their first contribution * @sebasgarcep made their first contribution * @saghm made their first contribution * @nelsonjchen made their first contribution * @badboy made their first contribution * @brycefisher made their first contribution * @aidanhs made their first contribution * @Michael-F-Bryan made their first contribution ================================================ FILE: bench/http1.rs ================================================ //! HTTP/1.1 benchmark mod support; use std::time::Duration; use criterion::{Criterion, criterion_group, criterion_main}; use support::{HttpVersion, Tls, bench}; const NUM_REQUESTS_TO_SEND: usize = 500; #[inline] fn bench(c: &mut Criterion) { bench::bench(c, Tls::Disabled, HttpVersion::Http1, NUM_REQUESTS_TO_SEND) .expect("Failed to run HTTP/1 benchmark server") } criterion_group!( name = benches; config = Criterion::default() .sample_size(10) .warm_up_time(Duration::from_secs(3)); targets = bench ); criterion_main!(benches); ================================================ FILE: bench/http1_over_tls.rs ================================================ //! HTTP/1.1 over TLS benchmark mod support; use std::time::Duration; use criterion::{Criterion, criterion_group, criterion_main}; use support::{HttpVersion, Tls, bench}; const NUM_REQUESTS_TO_SEND: usize = 500; #[inline] fn bench(c: &mut Criterion) { bench::bench(c, Tls::Enabled, HttpVersion::Http1, NUM_REQUESTS_TO_SEND) .expect("Failed to run HTTP/1 over TLS benchmark server") } criterion_group!( name = benches; config = Criterion::default() .sample_size(10) .warm_up_time(Duration::from_secs(3)); targets = bench ); criterion_main!(benches); ================================================ FILE: bench/http2.rs ================================================ //! HTTP/2 benchmark mod support; use std::time::Duration; use criterion::{Criterion, criterion_group, criterion_main}; use support::{HttpVersion, Tls, bench}; const NUM_REQUESTS_TO_SEND: usize = 500; #[inline] fn bench(c: &mut Criterion) { bench::bench(c, Tls::Disabled, HttpVersion::Http2, NUM_REQUESTS_TO_SEND) .expect("Failed to run HTTP/2 benchmark server") } criterion_group!( name = benches; config = Criterion::default() .sample_size(10) .warm_up_time(Duration::from_secs(3)); targets = bench ); criterion_main!(benches); ================================================ FILE: bench/http2_over_tls.rs ================================================ //! HTTP/2 over TLS benchmark mod support; use std::time::Duration; use criterion::{Criterion, criterion_group, criterion_main}; use support::{HttpVersion, Tls, bench}; const NUM_REQUESTS_TO_SEND: usize = 500; #[inline] fn bench(c: &mut Criterion) { bench::bench(c, Tls::Enabled, HttpVersion::Http2, NUM_REQUESTS_TO_SEND) .expect("Failed to run HTTP/2 over TLS benchmark server") } criterion_group!( name = benches; config = Criterion::default() .sample_size(10) .warm_up_time(Duration::from_secs(3)); targets = bench ); criterion_main!(benches); ================================================ FILE: bench/support/bench.rs ================================================ use criterion::Criterion; use crate::support::{ BoxError, HttpVersion, Tls, client::bench_clients, current_thread_runtime, multi_thread_runtime, server::with_server, }; pub const CURRENT_THREAD_LABEL: &str = "current_thread"; pub const MULTI_THREAD_LABEL: &str = "multi_thread"; pub const CONCURRENT_CASES: &[usize] = &[10, 50, 100, 150]; /// Recommended chunk sizes for real-world network scenarios: /// - 16 KB: Matches standard TCP buffers, ideal for HTTP/2 frames. /// - 32 KB: For large HTTP payloads, fits modern socket buffers. /// - 64 KB: Default Linux buffer size, optimized for large uploads. /// - 128 KB: For high-throughput, large-scale transfers. /// - 256 KB: Bulk data, maximum throughput on fast networks. /// /// For benchmarking latency-sensitive and high-throughput transfers. pub const BODY_CASES: [(&[u8], usize); 7] = [ (&[b'a'; 1024], 1024), // 1 KB, chunk 1 KB (&[b'a'; 10 * 1024], 10 * 1024), // 10 KB, chunk 10 KB (&[b'a'; 64 * 1024], 16 * 1024), // 64 KB, chunk 16 KB (&[b'a'; 128 * 1024], 32 * 1024), // 128 KB, chunk 32 KB (&[b'a'; 1024 * 1024], 64 * 1024), // 1 MB, chunk 64 KB (&[b'a'; 2 * 1024 * 1024], 128 * 1024), // 2 MB, chunk 128 KB (&[b'a'; 4 * 1024 * 1024], 256 * 1024), // 4 MB, chunk 256 KB ]; pub fn bench( c: &mut Criterion, tls: Tls, http_version: HttpVersion, num_requests: usize, ) -> Result<(), BoxError> { const OS: &str = std::env::consts::OS; const ARCH: &str = std::env::consts::ARCH; let system = sysinfo::System::new_all(); let cpu_model = system .cpus() .first() .map_or("n/a", |cpu| cpu.brand().trim_start().trim_end()); for &concurrent_limit in CONCURRENT_CASES { for body in BODY_CASES { with_server(tls, |addr| { // single-threaded client let mut group = c.benchmark_group(format!( "{cpu_model}/{OS}_{ARCH}/{CURRENT_THREAD_LABEL}/{tls}/{http_version}/{concurrent_limit}/{}KB", body.0.len() / 1024, )); bench_clients( &mut group, current_thread_runtime, addr, tls, http_version, num_requests, concurrent_limit, body, )?; group.finish(); Ok(()) })?; with_server(tls, |addr| { // multi-threaded client let mut group = c.benchmark_group(format!( "{cpu_model}/{OS}_{ARCH}/{MULTI_THREAD_LABEL}/{tls}/{http_version}/{concurrent_limit}/{}KB", body.0.len() / 1024, )); bench_clients( &mut group, multi_thread_runtime, addr, tls, http_version, num_requests, concurrent_limit, body, )?; group.finish(); Ok(()) })?; } } Ok(()) } ================================================ FILE: bench/support/client.rs ================================================ use std::{convert::Infallible, net::SocketAddr, sync::Arc}; use bytes::Bytes; use criterion::{BenchmarkGroup, measurement::WallTime}; use http_body_util::BodyExt; use tokio::{runtime::Runtime, sync::Semaphore}; use super::{BoxError, HttpVersion, Tls}; fn create_wreq_client(tls: Tls, http_version: HttpVersion) -> Result { let builder = wreq::Client::builder() .no_proxy() .redirect(wreq::redirect::Policy::none()) .tls_cert_verification(!matches!(tls, Tls::Enabled)); let builder = match http_version { HttpVersion::Http1 => builder.http1_only(), HttpVersion::Http2 => builder.http2_only(), }; Ok(builder.build()?) } fn create_reqwest_client(tls: Tls, http_version: HttpVersion) -> Result { let builder = reqwest::Client::builder() .no_proxy() .redirect(reqwest::redirect::Policy::none()) .danger_accept_invalid_certs(matches!(tls, Tls::Enabled)); let builder = match http_version { HttpVersion::Http1 => builder.http1_only(), HttpVersion::Http2 => builder.http2_prior_knowledge(), }; Ok(builder.build()?) } async fn wreq_body_assert(mut response: wreq::Response, expected_body_size: usize) { let mut body_size = 0; while let Some(Ok(chunk)) = response.frame().await { if let Ok(chunk) = chunk.into_data() { body_size += chunk.len(); } } assert!( body_size == expected_body_size, "Unexpected response body: got {body_size} bytes, expected {expected_body_size} bytes" ); } async fn reqwest_body_assert(mut response: reqwest::Response, expected_body_size: usize) { let mut body_size = 0; while let Ok(Some(chunk)) = response.chunk().await { body_size += chunk.len(); } assert!( body_size == expected_body_size, "Unexpected response body: got {body_size} bytes, expected {expected_body_size} bytes" ); } fn stream_from_bytes( body: &'static [u8], chunk_size: usize, ) -> impl futures_util::stream::TryStream + Send + 'static { futures_util::stream::unfold((body, 0), move |(body, offset)| async move { if offset >= body.len() { None } else { let end = (offset + chunk_size).min(body.len()); let chunk = Bytes::from_static(&body[offset..end]); Some((Ok::(chunk), (body, end))) } }) } #[inline] fn wreq_body(stream: bool, (body, chunk_size): (&'static [u8], usize)) -> wreq::Body { if stream { let stream = stream_from_bytes(body, chunk_size); wreq::Body::wrap_stream(stream) } else { wreq::Body::from(body) } } #[inline] fn reqwest_body(stream: bool, (body, chunk_size): (&'static [u8], usize)) -> reqwest::Body { if stream { let stream = stream_from_bytes(body, chunk_size); reqwest::Body::wrap_stream(stream) } else { reqwest::Body::from(body) } } async fn wreq_requests_concurrent( client: &wreq::Client, url: &str, num_requests: usize, concurrent_limit: usize, body: (&'static [u8], usize), stream: bool, ) { let semaphore = Arc::new(Semaphore::new(concurrent_limit)); let mut handles = Vec::with_capacity(num_requests); for _ in 0..num_requests { let client = client.clone(); let url = url.to_string(); let semaphore = semaphore.clone(); let fut = async move { let _permit = semaphore .acquire() .await .expect("Semaphore should be acquirable"); let response = client .post(url) .body(wreq_body(stream, body)) .send() .await .expect("Unexpected request failure"); wreq_body_assert(response, body.0.len()).await; }; handles.push(tokio::spawn(fut)); } futures_util::future::join_all(handles).await; } async fn reqwest_requests_concurrent( client: &reqwest::Client, url: &str, num_requests: usize, concurrent_limit: usize, body: (&'static [u8], usize), stream: bool, ) { let semaphore = Arc::new(Semaphore::new(concurrent_limit)); let mut handles = Vec::with_capacity(num_requests); for _ in 0..num_requests { let client = client.clone(); let url = url.to_string(); let semaphore = semaphore.clone(); let fut = async move { let _permit = semaphore .acquire() .await .expect("Semaphore should be acquirable"); let response = client .post(url) .body(reqwest_body(stream, body)) .send() .await .expect("Unexpected request failure"); reqwest_body_assert(response, body.0.len()).await; }; handles.push(tokio::spawn(fut)); } futures_util::future::join_all(handles).await; } #[allow(clippy::too_many_arguments)] pub fn bench_clients( group: &mut BenchmarkGroup<'_, WallTime>, rt: fn() -> Runtime, addr: SocketAddr, tls: Tls, http_version: HttpVersion, num_requests: usize, concurrent_limit: usize, body: (&'static [u8], usize), ) -> Result<(), BoxError> { let url = format!("{tls}://{addr}"); fn make_benchmark_label(stream: bool) -> String { let client = std::any::type_name::() .split("::") .next() .expect("Type name should contain at least one segment"); let body_type = if stream { "stream" } else { "full" }; format!("{body_type}/{client}") } for stream in [false, true] { let client = create_wreq_client(tls, http_version)?; group.bench_function(make_benchmark_label::(stream), |b| { b.to_async(rt()).iter(|| { wreq_requests_concurrent( &client, &url, num_requests, concurrent_limit, body, stream, ) }) }); ::std::mem::drop(client); let client = create_reqwest_client(tls, http_version)?; group.bench_function(make_benchmark_label::(stream), |b| { b.to_async(rt()).iter(|| { reqwest_requests_concurrent( &client, &url, num_requests, concurrent_limit, body, stream, ) }) }); ::std::mem::drop(client); } Ok(()) } ================================================ FILE: bench/support/server.rs ================================================ use std::{convert::Infallible, io, net::SocketAddr, pin::Pin, sync::Arc, time::Duration}; use btls::{ pkey::PKey, ssl::{Ssl, SslAcceptor, SslMethod}, x509::X509, }; use bytes::Bytes; use http_body_util::{BodyExt, Collected, Full}; use hyper::{body::Incoming, service::service_fn}; use hyper_util::{ rt::{TokioExecutor, TokioIo, TokioTimer}, server::conn::auto::Builder, }; use tokio::{ io::{AsyncRead, AsyncWrite}, net::{TcpListener, TcpStream}, sync::oneshot, task::JoinSet, }; use tokio_btls::SslStream; use super::{BoxError, Tls, multi_thread_runtime}; pub struct Server { listener: std::net::TcpListener, tls_acceptor: Option>, builder: Builder, } impl Server { pub fn new(tls: Tls) -> Result { let tls_acceptor = match tls { Tls::Enabled => { let mut builder = SslAcceptor::mozilla_intermediate_v5(SslMethod::tls())?; let cert = X509::from_der(include_bytes!("../../tests/support/server.cert"))?; let key = PKey::private_key_from_der(include_bytes!("../../tests/support/server.key"))?; builder.set_certificate(&cert)?; builder.set_private_key(&key)?; builder.check_private_key()?; Some(Arc::new(builder.build())) } Tls::Disabled => None, }; let mut builder = Builder::new(TokioExecutor::new()); builder.http1().timer(TokioTimer::new()).keep_alive(true); builder .http2() .timer(TokioTimer::new()) .keep_alive_interval(Duration::from_secs(30)); let listener = std::net::TcpListener::bind("127.0.0.1:0")?; listener.set_nonblocking(true)?; Ok(Server { listener, tls_acceptor, builder, }) } fn local_addr(&self) -> io::Result { self.listener.local_addr() } async fn run(self, mut shutdown: oneshot::Receiver<()>) -> Result<(), BoxError> { let mut join_set = JoinSet::new(); let listener = TcpListener::from_std(self.listener)?; loop { tokio::select! { _ = &mut shutdown => { break; } accept = listener.accept() => { if let Ok((socket, _peer_addr)) = accept { let tls_acceptor = self.tls_acceptor.clone(); let builder = self.builder.clone(); join_set.spawn(async move { handle_connection(socket, tls_acceptor, builder).await; }); } } } } while let Some(result) = join_set.join_next().await { if let Err(e) = result { eprintln!("connection task failed: {e}"); } } // Tokio internally accepts TCP connections while the TCPListener is active; // drop the listener to immediately refuse connections rather than letting // them hang. ::std::mem::drop(listener); Ok(()) } } pub struct Handle { shutdown: oneshot::Sender<()>, join: std::thread::JoinHandle<()>, } impl Handle { pub fn shutdown(self) { let _ = self.shutdown.send(()); let _ = self.join.join(); } } pub fn with_server(tls: Tls, f: F) -> Result<(), BoxError> where F: FnOnce(SocketAddr) -> Result<(), BoxError>, { let server = Server::new(tls)?; let addr = server.local_addr()?; let (shutdown_tx, shutdown_rx) = oneshot::channel(); let join = std::thread::spawn(move || { let rt = multi_thread_runtime(); rt.block_on(server.run(shutdown_rx)) .expect("Failed to run server with shutdown"); }); std::thread::sleep(Duration::from_millis(100)); let handle = Handle { shutdown: shutdown_tx, join, }; f(addr)?; handle.shutdown(); std::thread::sleep(Duration::from_millis(100)); Ok(()) } async fn serve(builder: Builder, stream: S) where S: AsyncRead + AsyncWrite + Unpin + Send + 'static, { let _ = builder .serve_connection( TokioIo::new(stream), service_fn(|req: http::Request| async { let bytes = req .into_body() .collect() .await .map(Collected::::to_bytes); let bytes = bytes.unwrap_or_else(|_| Bytes::new()); Ok::<_, Infallible>(http::Response::new(Full::new(bytes))) }), ) .await; } async fn handle_connection( socket: TcpStream, tls_acceptor: Option>, builder: Builder, ) { if let Some(acceptor) = tls_acceptor { let ssl = Ssl::new(acceptor.context()).expect("failed to create Ssl"); let mut stream = SslStream::new(ssl, socket).expect("failed to create SslStream"); // The client (or its connection pool) may proactively close the connection, // especially during benchmarks or when cleaning up idle connections. // This can cause TLS handshake failures (e.g., ConnectionReset, ConnectionAborted). // Such errors are expected and should be handled gracefully to avoid panicking // and to ensure the server remains robust under load. if Pin::new(&mut stream).accept().await.is_err() { return; } serve(builder, stream).await; } else { serve(builder, socket).await; } } ================================================ FILE: bench/support.rs ================================================ pub mod bench; pub mod client; pub mod server; use std::fmt; pub type BoxError = Box; #[allow(unused)] #[derive(Clone, Copy, Debug)] pub enum HttpVersion { Http1, Http2, } impl fmt::Display for HttpVersion { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let value = match self { HttpVersion::Http1 => "h1", HttpVersion::Http2 => "h2", }; f.write_str(value) } } #[allow(unused)] #[derive(Clone, Copy, Debug)] pub enum Tls { Enabled, Disabled, } impl fmt::Display for Tls { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let value = match self { Tls::Enabled => "https", Tls::Disabled => "http", }; f.write_str(value) } } pub fn current_thread_runtime() -> tokio::runtime::Runtime { tokio::runtime::Builder::new_current_thread() .enable_all() .build() .expect("Failed to build current-thread runtime") } pub fn multi_thread_runtime() -> tokio::runtime::Runtime { tokio::runtime::Builder::new_multi_thread() .worker_threads(4) .enable_all() .build() .expect("Failed to build multi-thread runtime") } ================================================ FILE: cliff.toml ================================================ # git-cliff ~ configuration file # https://git-cliff.org/docs/configuration [remote.github] owner = "0x676e67" repo = "wreq" [changelog] # A Tera template to be rendered for each release in the changelog. # See https://keats.github.io/tera/docs/#introduction body = """ {%- macro remote_url() -%} https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }} {%- endmacro -%} {% macro print_commit(commit) -%} - {% if commit.scope %}*({{ commit.scope }})* {% endif %}\ {% if commit.breaking %}[**breaking**] {% endif %}\ {{ commit.message | upper_first }} - \ ([{{ commit.id | truncate(length=7, end="") }}]({{ self::remote_url() }}/commit/{{ commit.id }}))\ {% endmacro -%} {% if version %}\ {% if previous.version %}\ ## [{{ version | trim_start_matches(pat="v") }}]\ ({{ self::remote_url() }}/compare/{{ previous.version }}..{{ version }}) - {{ timestamp | date(format="%Y-%m-%d") }} {% else %}\ ## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }} {% endif %}\ {% else %}\ ## [unreleased] {% endif %}\ {% for group, commits in commits | group_by(attribute="group") %} ### {{ group | striptags | trim | upper_first }} {% for commit in commits | filter(attribute="scope") | sort(attribute="scope") %} {{ self::print_commit(commit=commit) }} {%- endfor %} {% for commit in commits %} {%- if not commit.scope -%} {{ self::print_commit(commit=commit) }} {% endif -%} {% endfor -%} {% endfor -%} {%- if github -%} {% if github.contributors | filter(attribute="is_first_time", value=true) | length != 0 %} ## New Contributors ❤️ {% endif %}\ {% for contributor in github.contributors | filter(attribute="is_first_time", value=true) %} * @{{ contributor.username }} made their first contribution {%- if contributor.pr_number %} in \ [#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \ {%- endif %} {%- endfor -%} {%- endif %} """ # A Tera template to be rendered as the changelog's footer. # See https://keats.github.io/tera/docs/#introduction footer = """ """ # Remove leading and trailing whitespaces from the changelog's body. trim = true # An array of regex based postprocessors to modify the changelog. postprocessors = [ # Replace the placeholder `` with a URL. { pattern = '', replace = "https://github.com/0x676e67/wreq" }, # replace repository URL ] [git] # Parse commits according to the conventional commits specification. # See https://www.conventionalcommits.org conventional_commits = true # Exclude commits that do not match the conventional commits specification. filter_unconventional = true # Split commits on newlines, treating each line as an individual commit. split_commits = false # An array of regex based parsers to modify commit messages prior to further processing. commit_preprocessors = [ # Replace issue numbers with link templates to be updated in `changelog.postprocessors`. { pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](/issues/${2}))" }, ] # An array of regex based parsers for extracting data from the commit message. # Assigns commits to groups. # Optionally sets the commit's scope and can decide to exclude commits from further processing. commit_parsers = [ { message = "^feat", group = "Features" }, { message = "^fix", group = "Bug Fixes" }, { message = "^doc", group = "Documentation" }, { message = "^perf", group = "Performance" }, { message = "^refactor\\(clippy\\)", skip = true }, { message = "^refactor", group = "Refactor" }, { message = "^style", group = "Styling" }, { message = "^test", group = "Testing" }, { message = "^chore\\(release\\): prepare for", skip = true }, { message = "^chore\\(deps.*\\)", skip = true }, { message = "^chore\\(pr\\)", skip = true }, { message = "^chore\\(pull\\)", skip = true }, { message = "^chore\\(npm\\).*yarn\\.lock", skip = true }, { message = "^chore|^ci", group = "Miscellaneous Tasks" }, { body = ".*security", group = "Security" }, { message = "^revert", group = "Revert" }, ] # Prevent commits that are breaking from being excluded by commit parsers. protect_breaking_commits = false # Exclude commits that are not matched by any commit parser. filter_commits = false # Regex to select git tags that represent releases. tag_pattern = "v[0-9].*" # Regex to select git tags that do not represent proper releases. # Takes precedence over `tag_pattern`. # Changes belonging to these releases will be included in the next release. skip_tags = "" # Regex to exclude git tags after applying the tag_pattern. ignore_tags = "v2.1.0|v2.1.1" # Order releases topologically instead of chronologically. topo_order = false # Order of commits in each group/release within the changelog. # Allowed values: newest, oldest sort_commits = "newest" ================================================ FILE: examples/cert_store.rs ================================================ use std::time::Duration; use wreq::{ Client, tls::{TlsInfo, trust::CertStore}, }; /// Certificate Store Example /// /// In most cases, you don't need to manually configure certificate stores. wreq automatically /// uses appropriate default certificates: /// - With `webpki-roots` feature enabled: Uses Mozilla's maintained root certificate collection /// - Without this feature: Uses system default certificate store paths /// /// Manual certificate store configuration is only needed in the following special cases: /// /// ## Scenarios requiring custom certificate store: /// /// ### 1. Self-signed Certificates /// - Connect to internal services using self-signed certificates /// - Test servers in development environments /// /// ### 2. Enterprise Internal CA /// - Add root certificates from enterprise internal certificate authorities /// - Access HTTPS services on corporate intranets /// /// ### 3. Certificate Updates and Management /// - Dynamically update certificates in the certificate store /// - Remove revoked or expired certificates /// /// ### 4. Compliance Requirements /// - Special compliance requirements for certain industries or regions /// - Need to use specific certificate collections /// /// ### 5. Performance Optimization /// - Reduce certificate store size to improve TLS handshake performance /// - Include only necessary root certificates #[tokio::main] async fn main() -> wreq::Result<()> { // Create a client with a custom certificate store using webpki-roots let client = Client::builder() .tls_cert_store(CertStore::from_der_certs( webpki_root_certs::TLS_SERVER_ROOT_CERTS, )?) .build()?; // Use the API you're already familiar with client.get("https://www.google.com").send().await?; // Self-signed certificate Client // Skip certificate verification for self-signed certificates let client = Client::builder() .tls_info(true) .tls_cert_verification(false) .build()?; // Use the API you're already familiar with let resp = client.get("https://self-signed.badssl.com/").send().await?; if let Some(tls_info) = resp.extensions().get::() { if let Some(peer_cert_der) = tls_info.peer_certificate() { // Create self-signed certificate Store let self_signed_store = CertStore::from_der_certs(&[peer_cert_der])?; // Create a client with self-signed certificate store let client = Client::builder() .tls_cert_store(self_signed_store) .connect_timeout(Duration::from_secs(10)) .build()?; // Use the API you're already familiar with let resp = client.get("https://self-signed.badssl.com/").send().await?; println!("{}", resp.text().await?); } } Ok(()) } ================================================ FILE: examples/connect_via_lower_priority_tokio_runtime.rs ================================================ // This example demonstrates how to delegate the connect calls, which contain TLS handshakes, // to a secondary tokio runtime of lower OS thread priority using a custom tower layer. // This helps to ensure that long-running futures during handshake crypto operations don't block // other I/O futures. // // This does introduce overhead of additional threads, channels, extra vtables, etc, // so it is best suited to services with large numbers of incoming connections or that // are otherwise very sensitive to any blocking futures. Or, you might want fewer threads // and/or to use the current_thread runtime. // // This is using the `tokio` runtime and certain other dependencies: // // `tokio = { version = "1", features = ["full"] }` // `libc = "0"` // `pin-project-lite = "0.2"` // `tower = { version = "0.5", default-features = false}` #[tokio::main] async fn main() -> wreq::Result<()> { tracing_subscriber::fmt() .with_max_level(tracing::Level::TRACE) .init(); background_threadpool::init_background_runtime(); tokio::time::sleep(std::time::Duration::from_millis(10)).await; let client = wreq::Client::builder() .connector_layer(background_threadpool::BackgroundProcessorLayer::new()) .build() .expect("should be able to build wreq client"); let url = if let Some(url) = std::env::args().nth(1) { url } else { println!("No CLI URL provided, using default."); "https://hyper.rs".into() }; eprintln!("Fetching {url:?}..."); let res = client.get(url).send().await?; eprintln!("Response: {:?} {}", res.version(), res.status()); eprintln!("Headers: {:#?}\n", res.headers()); let body = res.text().await?; println!("{body}"); Ok(()) } // separating out for convenience to avoid a million mod background_threadpool { use std::{ future::Future, pin::Pin, sync::OnceLock, task::{Context, Poll}, }; use futures_util::TryFutureExt; use pin_project_lite::pin_project; use tokio::{runtime::Handle, select, sync::mpsc::error::TrySendError}; use tower::{BoxError, Layer, Service}; static CPU_HEAVY_THREAD_POOL: OnceLock< tokio::sync::mpsc::Sender + Send + 'static>>>, > = OnceLock::new(); pub(crate) fn init_background_runtime() { std::thread::Builder::new() .name("cpu-heavy-background-threadpool".to_string()) .spawn(move || { let rt = tokio::runtime::Builder::new_multi_thread() .thread_name("cpu-heavy-background-pool-thread") .worker_threads(std::thread::available_parallelism().unwrap().get()) // ref: https://github.com/tokio-rs/tokio/issues/4941 // consider uncommenting if seeing heavy task contention // .disable_lifo_slot() .on_thread_start(move || { #[cfg(target_os = "linux")] unsafe { // Increase thread pool thread niceness, so they are lower priority // than the foreground executor and don't interfere with I/O tasks { *libc::__errno_location() = 0; if libc::nice(10) == -1 && *libc::__errno_location() != 0 { let error = std::io::Error::last_os_error(); tracing::error!("failed to set threadpool niceness: {error}"); } } } }) .enable_all() .build() .unwrap_or_else(|e| panic!("cpu heavy runtime failed_to_initialize: {e}")); rt.block_on(async { tracing::debug!("starting background cpu-heavy work"); process_cpu_work().await; }); }) .unwrap_or_else(|e| panic!("cpu heavy thread failed_to_initialize: {e}")); } async fn process_cpu_work() { // we only use this channel for routing work, it should move pretty quick, it can be small let (tx, mut rx) = tokio::sync::mpsc::channel(10); // share the handle to the background channel globally CPU_HEAVY_THREAD_POOL.set(tx).unwrap(); while let Some(work) = rx.recv().await { tokio::task::spawn(work); } } // retrieve the sender to the background channel, and send the future over to it for execution fn send_to_background_runtime(future: impl Future + Send + 'static) { let tx = CPU_HEAVY_THREAD_POOL.get().expect( "start up the secondary tokio runtime before sending to `CPU_HEAVY_THREAD_POOL`", ); match tx.try_send(Box::pin(future)) { Ok(_) => (), Err(TrySendError::Closed(_)) => { panic!("background cpu heavy runtime channel is closed") } Err(TrySendError::Full(msg)) => { tracing::warn!( "background cpu heavy runtime channel is full, task spawning loop delayed" ); let tx = tx.clone(); Handle::current().spawn(async move { tx.send(msg) .await .expect("background cpu heavy runtime channel is closed") }); } } } // This tower layer injects futures with a oneshot channel, and then sends them to the // background runtime for processing. We don't use the Buffer service because that is // intended to process sequentially on a single task, whereas we want to spawn a new task // per call. #[derive(Copy, Clone)] pub struct BackgroundProcessorLayer {} impl BackgroundProcessorLayer { pub fn new() -> Self { Self {} } } impl Layer for BackgroundProcessorLayer { type Service = BackgroundProcessor; fn layer(&self, service: S) -> Self::Service { BackgroundProcessor::new(service) } } impl std::fmt::Debug for BackgroundProcessorLayer { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { f.debug_struct("BackgroundProcessorLayer").finish() } } // This tower service injects futures with a oneshot channel, and then sends them to the // background runtime for processing. #[derive(Debug, Clone)] pub struct BackgroundProcessor { inner: S, } impl BackgroundProcessor { pub fn new(inner: S) -> Self { BackgroundProcessor { inner } } } impl Service for BackgroundProcessor where S: Service, S::Response: Send + 'static, S::Error: Into + Send, S::Future: Send + 'static, { type Response = S::Response; type Error = BoxError; type Future = BackgroundResponseFuture; fn poll_ready( &mut self, cx: &mut std::task::Context<'_>, ) -> std::task::Poll> { match self.inner.poll_ready(cx) { Poll::Pending => Poll::Pending, Poll::Ready(r) => Poll::Ready(r.map_err(Into::into)), } } fn call(&mut self, req: Request) -> Self::Future { let response = self.inner.call(req); // wrap our inner service's future with a future that writes to this oneshot channel let (mut tx, rx) = tokio::sync::oneshot::channel(); let future = async move { select!( _ = tx.closed() => { // receiver already dropped, don't need to do anything } result = response.map_err(Into::::into) => { // if this fails, the receiver already dropped, so we don't need to do anything let _ = tx.send(result); } ) }; // send the wrapped future to the background send_to_background_runtime(future); BackgroundResponseFuture::new(rx) } } // `BackgroundProcessor` response future pin_project! { #[derive(Debug)] pub struct BackgroundResponseFuture { #[pin] rx: tokio::sync::oneshot::Receiver>, } } impl BackgroundResponseFuture { pub(crate) fn new(rx: tokio::sync::oneshot::Receiver>) -> Self { BackgroundResponseFuture { rx } } } impl Future for BackgroundResponseFuture where S: Send + 'static, { type Output = Result; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.project(); // now poll on the receiver end of the oneshot to get the result match this.rx.poll(cx) { Poll::Ready(v) => match v { Ok(v) => Poll::Ready(v), Err(err) => Poll::Ready(Err(Box::new(err) as BoxError)), }, Poll::Pending => Poll::Pending, } } } } ================================================ FILE: examples/emulate.rs ================================================ use wreq::{ Client, Emulation, header::{self, HeaderMap, HeaderValue, OrigHeaderMap}, http2::{Http2Options, PseudoId, PseudoOrder}, tls::{AlpnProtocol, TlsOptions, TlsVersion}, }; macro_rules! join { ($sep:expr, $first:expr $(, $rest:expr)*) => { concat!($first $(, $sep, $rest)*) }; } #[tokio::main] async fn main() -> wreq::Result<()> { tracing_subscriber::fmt() .with_max_level(tracing::Level::TRACE) .init(); // TLS options config let tls = TlsOptions::builder() .enable_ocsp_stapling(true) .curves_list(join!(":", "X25519", "P-256", "P-384")) .cipher_list(join!( ":", "TLS_AES_128_GCM_SHA256", "TLS_AES_256_GCM_SHA384", "TLS_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256" )) .sigalgs_list(join!( ":", "ecdsa_secp256r1_sha256", "rsa_pss_rsae_sha256", "rsa_pkcs1_sha256", "ecdsa_secp384r1_sha384", "rsa_pss_rsae_sha384", "rsa_pkcs1_sha384", "rsa_pss_rsae_sha512", "rsa_pkcs1_sha512", "rsa_pkcs1_sha1" )) .alpn_protocols([AlpnProtocol::HTTP2, AlpnProtocol::HTTP1]) .min_tls_version(TlsVersion::TLS_1_2) .max_tls_version(TlsVersion::TLS_1_3) .build(); // HTTP/2 options config let http2 = Http2Options::builder() .initial_stream_id(3) .initial_window_size(16777216) .initial_connection_window_size(16711681 + 65535) .headers_pseudo_order( PseudoOrder::builder() .extend([ PseudoId::Method, PseudoId::Path, PseudoId::Authority, PseudoId::Scheme, ]) .build(), ) .build(); // Default headers let headers = { let mut headers = HeaderMap::new(); headers.insert(header::USER_AGENT, HeaderValue::from_static("TwitterAndroid/10.89.0-release.0 (310890000-r-0) G011A/9 (google;G011A;google;G011A;0;;1;2016)")); headers.insert(header::ACCEPT_LANGUAGE, HeaderValue::from_static("en-US")); headers.insert( header::ACCEPT_ENCODING, HeaderValue::from_static("br, gzip, deflate"), ); headers.insert(header::ACCEPT, HeaderValue::from_static("application/json")); headers.insert(header::CACHE_CONTROL, HeaderValue::from_static("no-store")); headers.insert( header::COOKIE, HeaderValue::from_static("ct0=YOUR_CT0_VALUE;"), ); headers }; // The headers keep the original case and order let orig_headers = { let mut orig_headers = OrigHeaderMap::new(); orig_headers.insert("cookie"); orig_headers.insert("content-length"); orig_headers.insert("User-Agent"); orig_headers.insert("Accept-Language"); orig_headers.insert("Accept-Encoding"); orig_headers }; // This provider encapsulates TLS, HTTP/1, HTTP/2, default headers, and original headers let emulation = Emulation::builder() .tls_options(tls) .http2_options(http2) .headers(headers) .orig_headers(orig_headers) .build(Default::default()); // Build a client with emulation config let client = Client::builder() .emulation(emulation) .tls_cert_verification(false) .build()?; // Use the API you're already familiar with let resp = client.get("https://tls.browserleaks.com/").send().await?; println!("{}", resp.text().await?); Ok(()) } ================================================ FILE: examples/form.rs ================================================ // Short example of a POST request with form data. // // This is using the `tokio` runtime. You'll need the following dependency: // // `tokio = { version = "1", features = ["full"] }` #[tokio::main] async fn main() { let response = wreq::post("http://www.baidu.com") .form(&[("one", "1")]) .send() .await .expect("send"); println!("Response status {}", response.status()); } ================================================ FILE: examples/http1_websocket.rs ================================================ use futures_util::{SinkExt, StreamExt, TryStreamExt}; use wreq::{header, ws::message::Message}; #[tokio::main] async fn main() -> wreq::Result<()> { // Use the API you're already familiar with let resp = wreq::websocket("wss://echo.websocket.org") .header(header::USER_AGENT, env!("CARGO_PKG_NAME")) .read_buffer_size(1024 * 1024) .send() .await?; assert_eq!(resp.version(), http::Version::HTTP_11); let websocket = resp.into_websocket().await?; if let Some(protocol) = websocket.protocol() { println!("WebSocket subprotocol: {:?}", protocol); } let (mut tx, mut rx) = websocket.split(); tokio::spawn(async move { for i in 1..11 { if let Err(err) = tx.send(Message::text(format!("Hello, World! {i}"))).await { eprintln!("failed to send message: {err}"); } } }); while let Some(message) = rx.try_next().await? { if let Message::Text(text) = message { println!("received: {text}"); } } Ok(()) } ================================================ FILE: examples/http2_websocket.rs ================================================ //! Run websocket server //! //! ```not_rust //! git clone https://github.com/tokio-rs/axum && cd axum //! cargo run -p example-websockets-http2 //! ``` use futures_util::{SinkExt, StreamExt, TryStreamExt}; use wreq::{Version, header, ws::message::Message}; #[tokio::main] async fn main() -> wreq::Result<()> { // Use the API you're already familiar with let resp = wreq::websocket("wss://127.0.0.1:3000/ws") .version(Version::HTTP_2) .header(header::USER_AGENT, env!("CARGO_PKG_NAME")) .read_buffer_size(1024 * 1024) .send() .await?; assert_eq!(resp.version(), http::Version::HTTP_2); let websocket = resp.into_websocket().await?; if let Some(protocol) = websocket.protocol() { println!("WebSocket subprotocol: {:?}", protocol); } let (mut tx, mut rx) = websocket.split(); tokio::spawn(async move { for i in 1..11 { if let Err(err) = tx.send(Message::text(format!("Hello, World! #{i}"))).await { eprintln!("failed to send message: {err}"); } } }); while let Some(message) = rx.try_next().await? { if let Message::Text(text) = message { println!("received: {text}"); } } Ok(()) } ================================================ FILE: examples/json_dynamic.rs ================================================ //! This example illustrates the way to send and receive arbitrary JSON. //! //! This is useful for some ad-hoc experiments and situations when you don't //! really care about the structure of the JSON and just need to display it or //! process it at runtime. // This is using the `tokio` runtime. You'll need the following dependency: // // `tokio = { version = "1", features = ["full"] }` #[tokio::main] async fn main() -> wreq::Result<()> { let echo_json: serde_json::Value = wreq::post("https://jsonplaceholder.typicode.com/posts") .json(&serde_json::json!({ "title": "wreq.rs", "body": "https://docs.rs/wreq", "userId": 1 })) .send() .await? .json() .await?; println!("{echo_json:#?}"); // Object( // { // "body": String( // "https://docs.rs/wreq" // ), // "id": Number( // 101 // ), // "title": String( // "wreq.rs" // ), // "userId": Number( // 1 // ) // } // ) Ok(()) } ================================================ FILE: examples/json_typed.rs ================================================ //! This example illustrates the way to send and receive statically typed JSON. //! //! In contrast to the arbitrary JSON example, this brings up the full power of //! Rust compile-time type system guaranties though it requires a little bit //! more code. // These require the `serde` dependency. use serde::{Deserialize, Serialize}; #[derive(Debug, Serialize, Deserialize)] struct Post { id: Option, title: String, body: String, #[serde(rename = "userId")] user_id: i32, } // This is using the `tokio` runtime. You'll need the following dependency: // // `tokio = { version = "1", features = ["full"] }` #[tokio::main] async fn main() -> wreq::Result<()> { let new_post = Post { id: None, title: "wreq.rs".into(), body: "https://docs.rs/wreq".into(), user_id: 1, }; let new_post: Post = wreq::post("https://jsonplaceholder.typicode.com/posts") .json(&new_post) .send() .await? .json() .await?; println!("{new_post:#?}"); // Post { // id: Some( // 101 // ), // title: "wreq.rs", // body: "https://docs.rs/wreq", // user_id: 1 // } Ok(()) } ================================================ FILE: examples/keylog.rs ================================================ use wreq::tls::keylog::KeyLog; #[tokio::main] async fn main() -> wreq::Result<()> { // Build a client let client = wreq::Client::builder() .tls_keylog(KeyLog::from_file("keylog.txt")) .tls_cert_verification(false) .build()?; // Use the API you're already familiar with let resp = client.get("https://yande.re/post.json").send().await?; println!("{}", resp.text().await?); Ok(()) } ================================================ FILE: examples/request_with_emulate.rs ================================================ use wreq::{ Emulation, header::{self, HeaderMap, HeaderValue, OrigHeaderMap}, http2::{Http2Options, PseudoId, PseudoOrder}, tls::{AlpnProtocol, TlsOptions, TlsVersion}, }; macro_rules! join { ($sep:expr, $first:expr $(, $rest:expr)*) => { concat!($first $(, $sep, $rest)*) }; } #[tokio::main] async fn main() -> wreq::Result<()> { tracing_subscriber::fmt() .with_max_level(tracing::Level::TRACE) .init(); // TLS options config let tls = TlsOptions::builder() .enable_ocsp_stapling(true) .curves_list(join!(":", "X25519", "P-256", "P-384")) .cipher_list(join!( ":", "TLS_AES_128_GCM_SHA256", "TLS_AES_256_GCM_SHA384", "TLS_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256" )) .sigalgs_list(join!( ":", "ecdsa_secp256r1_sha256", "rsa_pss_rsae_sha256", "rsa_pkcs1_sha256", "ecdsa_secp384r1_sha384", "rsa_pss_rsae_sha384", "rsa_pkcs1_sha384", "rsa_pss_rsae_sha512", "rsa_pkcs1_sha512", "rsa_pkcs1_sha1" )) .alpn_protocols([AlpnProtocol::HTTP2, AlpnProtocol::HTTP1]) .min_tls_version(TlsVersion::TLS_1_2) .max_tls_version(TlsVersion::TLS_1_3) .build(); // HTTP/2 options config let http2 = Http2Options::builder() .initial_stream_id(3) .initial_window_size(16777216) .initial_connection_window_size(16711681 + 65535) .headers_pseudo_order( PseudoOrder::builder() .extend([ PseudoId::Method, PseudoId::Path, PseudoId::Authority, PseudoId::Scheme, ]) .build(), ) .build(); // Default headers let headers = { let mut headers = HeaderMap::new(); headers.insert(header::USER_AGENT, HeaderValue::from_static("TwitterAndroid/10.89.0-release.0 (310890000-r-0) G011A/9 (google;G011A;google;G011A;0;;1;2016)")); headers.insert(header::ACCEPT_LANGUAGE, HeaderValue::from_static("en-US")); headers.insert( header::ACCEPT_ENCODING, HeaderValue::from_static("br, gzip, deflate"), ); headers.insert(header::ACCEPT, HeaderValue::from_static("application/json")); headers.insert(header::CACHE_CONTROL, HeaderValue::from_static("no-store")); headers.insert( header::COOKIE, HeaderValue::from_static("ct0=YOUR_CT0_VALUE;"), ); headers }; // The headers keep the original case and order let orig_headers = { let mut orig_headers = OrigHeaderMap::new(); orig_headers.insert("cookie"); orig_headers.insert("content-length"); orig_headers.insert("User-Agent"); orig_headers.insert("Accept-Language"); orig_headers.insert("Accept-Encoding"); orig_headers }; // This provider encapsulates TLS, HTTP/1, HTTP/2, default headers, and original headers let emulation = Emulation::builder() .tls_options(tls) .http2_options(http2) .orig_headers(orig_headers) .headers(headers) .build(Default::default()); // Use the API you're already familiar with let resp = wreq::get("https://tls.peet.ws/api/all") .emulation(emulation) .send() .await?; println!("{}", resp.text().await?); Ok(()) } ================================================ FILE: examples/request_with_interface.rs ================================================ #[cfg(any( target_os = "android", target_os = "fuchsia", target_os = "illumos", target_os = "ios", target_os = "linux", target_os = "macos", target_os = "solaris", target_os = "tvos", target_os = "visionos", target_os = "watchos", ))] #[tokio::main] async fn main() -> wreq::Result<()> { // Use the API you're already familiar with let resp = wreq::get("https://api.ip.sb/ip") .interface("utun4") .send() .await?; println!("{}", resp.text().await?); Ok(()) } #[cfg(not(any( target_os = "android", target_os = "fuchsia", target_os = "illumos", target_os = "ios", target_os = "linux", target_os = "macos", target_os = "solaris", target_os = "tvos", target_os = "visionos", target_os = "watchos", )))] fn main() {} ================================================ FILE: examples/request_with_local_address.rs ================================================ use std::net::IpAddr; use wreq::redirect::Policy; #[tokio::main] async fn main() -> wreq::Result<()> { // Use the API you're already familiar with let resp = wreq::get("http://www.baidu.com") .redirect(Policy::default()) .local_address(IpAddr::from([192, 168, 1, 226])) .send() .await?; println!("{}", resp.text().await?); Ok(()) } ================================================ FILE: examples/request_with_proxy.rs ================================================ use wreq::Proxy; #[tokio::main] async fn main() -> wreq::Result<()> { // Use the API you're already familiar with let resp = wreq::get("https://api.ip.sb/ip") .proxy(Proxy::all("socks5h://localhost:6153")?) .send() .await?; println!("{}", resp.text().await?); Ok(()) } ================================================ FILE: examples/request_with_redirect.rs ================================================ use wreq::redirect::Policy; #[tokio::main] async fn main() -> wreq::Result<()> { // Use the API you're already familiar with let resp = wreq::get("https://google.com/") .redirect(Policy::custom(|attempt| { // we can inspect the redirect attempt println!( "Redirecting (status: {}) to {:?} and headers: {:#?}", attempt.status, attempt.uri, attempt.headers ); // we can follow redirects as normal attempt.follow() })) .send() .await?; println!("{}", resp.text().await?); Ok(()) } ================================================ FILE: examples/request_with_version.rs ================================================ use http::Version; #[tokio::main] async fn main() -> wreq::Result<()> { // Use the API you're already familiar with let resp = wreq::get("https://www.google.com") .version(Version::HTTP_11) .send() .await?; assert_eq!(resp.version(), Version::HTTP_11); println!("{}", resp.text().await?); Ok(()) } ================================================ FILE: examples/tor_socks.rs ================================================ #![deny(warnings)] // This is using the `tokio` runtime. You'll need the following dependency: // // `tokio = { version = "1", features = ["full"] }` #[tokio::main] async fn main() -> wreq::Result<()> { // Make sure you are running tor and this is your socks port let proxy = wreq::Proxy::all("socks5h://127.0.0.1:9050").expect("tor proxy should be there"); let client = wreq::Client::builder() .proxy(proxy) .build() .expect("should be able to build wreq client"); let res = client.get("https://check.torproject.org").send().await?; println!("Status: {}", res.status()); let text = res.text().await?; let is_tor = text.contains("Congratulations. This emulation is configured to use Tor."); println!("Is Tor: {is_tor}"); assert!(is_tor); Ok(()) } ================================================ FILE: examples/unix_socket.rs ================================================ #[cfg(unix)] #[tokio::main] async fn main() -> wreq::Result<()> { // Create a Unix socket proxy let proxy = wreq::Proxy::unix("/var/run/docker.sock")?; // Build a client let client = wreq::Client::builder() // Specify the Unix socket path .proxy(proxy.clone()) .timeout(std::time::Duration::from_secs(10)) .build()?; // Use the API you're already familiar with let resp = client .get("http://localhost/v1.41/containers/json") .send() .await?; println!("{}", resp.text().await?); // Or specify the Unix socket directly in the request let resp = client .get("http://localhost/v1.41/containers/json") .proxy(proxy) .send() .await?; println!("{}", resp.text().await?); Ok(()) } #[cfg(not(unix))] fn main() {} ================================================ FILE: rustfmt.toml ================================================ group_imports = "StdExternalCrate" imports_granularity = "Crate" reorder_imports = true wrap_comments = true comment_width = 100 ================================================ FILE: src/client/body.rs ================================================ use std::{ pin::Pin, task::{Context, Poll, ready}, }; use bytes::Bytes; use http_body::{Body as HttpBody, SizeHint}; use http_body_util::{BodyExt, Either, Full, combinators::BoxBody}; use pin_project_lite::pin_project; #[cfg(feature = "stream")] use {tokio::fs::File, tokio_util::io::ReaderStream}; use crate::error::{BoxError, Error}; /// An request body. #[derive(Debug)] pub struct Body(Either, BoxBody>); pin_project! { /// We can't use `map_frame()` because that loses the hint data (for good reason). /// But we aren't transforming the data. struct IntoBytesBody { #[pin] inner: B, } } // ===== impl Body ===== impl Body { /// Wrap a [`HttpBody`] in a box inside `Body`. /// /// # Example /// /// ``` /// # use wreq::Body; /// # use futures_util; /// # fn main() { /// let content = "hello,world!".to_string(); /// /// let body = Body::wrap(content); /// # } /// ``` pub fn wrap(inner: B) -> Body where B: HttpBody + Send + Sync + 'static, B::Data: Into, B::Error: Into, { Body(Either::Right( IntoBytesBody { inner }.map_err(Into::into).boxed(), )) } /// Wrap a futures `Stream` in a box inside `Body`. /// /// # Example /// /// ``` /// # use wreq::Body; /// # use futures_util; /// # fn main() { /// let chunks: Vec> = vec![Ok("hello"), Ok(" "), Ok("world")]; /// /// let stream = futures_util::stream::iter(chunks); /// /// let body = Body::wrap_stream(stream); /// # } /// ``` /// /// # Optional /// /// This requires the `stream` feature to be enabled. #[cfg(feature = "stream")] #[cfg_attr(docsrs, doc(cfg(feature = "stream")))] pub fn wrap_stream(stream: S) -> Body where S: futures_util::stream::TryStream + Send + 'static, S::Error: Into, Bytes: From, { Body::stream(stream) } #[cfg(any(feature = "stream", feature = "multipart"))] pub(crate) fn stream(stream: S) -> Body where S: futures_util::stream::TryStream + Send + 'static, S::Error: Into, Bytes: From, { use futures_util::TryStreamExt; use http_body::Frame; use http_body_util::StreamBody; use sync_wrapper::SyncStream; let body = StreamBody::new(SyncStream::new( stream .map_ok(Bytes::from) .map_ok(Frame::data) .map_err(Into::into), )); Body(Either::Right(body.boxed())) } #[inline] pub(crate) fn empty() -> Body { Body::reusable(Bytes::new()) } #[inline] pub(crate) fn reusable(chunk: Bytes) -> Body { Body(Either::Left(Full::new(chunk))) } #[inline] #[cfg(feature = "multipart")] pub(crate) fn content_length(&self) -> Option { self.0.size_hint().exact() } #[inline] pub(crate) fn try_clone(&self) -> Option { match self.0 { Either::Left(ref chunk) => Some(Body(Either::Left(chunk.clone()))), Either::Right { .. } => None, } } } impl Default for Body { #[inline] fn default() -> Body { Body::empty() } } impl From> for Body { #[inline] fn from(body: BoxBody) -> Self { Self(Either::Right(body)) } } impl From for Body { #[inline] fn from(bytes: Bytes) -> Body { Body::reusable(bytes) } } impl From> for Body { #[inline] fn from(vec: Vec) -> Body { Body::reusable(vec.into()) } } impl From<&'static [u8]> for Body { #[inline] fn from(s: &'static [u8]) -> Body { Body::reusable(Bytes::from_static(s)) } } impl From for Body { #[inline] fn from(s: String) -> Body { Body::reusable(s.into()) } } impl From<&'static str> for Body { #[inline] fn from(s: &'static str) -> Body { s.as_bytes().into() } } #[cfg(feature = "stream")] impl From for Body { #[inline] fn from(file: File) -> Body { Body::wrap_stream(ReaderStream::new(file)) } } impl HttpBody for Body { type Data = Bytes; type Error = Error; #[inline] fn poll_frame( mut self: Pin<&mut Self>, cx: &mut Context, ) -> Poll, Self::Error>>> { Pin::new(&mut self.0).poll_frame(cx).map_err(|err| { err.downcast::() .map_or_else(Error::request, |err| *err) }) } #[inline] fn size_hint(&self) -> SizeHint { self.0.size_hint() } #[inline] fn is_end_stream(&self) -> bool { self.0.is_end_stream() } } // ===== impl IntoBytesBody ===== impl HttpBody for IntoBytesBody where B: HttpBody, B::Data: Into, { type Data = Bytes; type Error = B::Error; fn poll_frame( self: Pin<&mut Self>, cx: &mut Context, ) -> Poll, Self::Error>>> { match ready!(self.project().inner.poll_frame(cx)) { Some(Ok(f)) => Poll::Ready(Some(Ok(f.map_data(Into::into)))), Some(Err(e)) => Poll::Ready(Some(Err(e))), None => Poll::Ready(None), } } #[inline] fn size_hint(&self) -> SizeHint { self.inner.size_hint() } #[inline] fn is_end_stream(&self) -> bool { self.inner.is_end_stream() } } #[cfg(test)] mod tests { use http_body::Body as _; use super::Body; #[test] fn body_exact_length() { let empty_body = Body::empty(); assert!(empty_body.is_end_stream()); assert_eq!(empty_body.size_hint().exact(), Some(0)); let bytes_body = Body::reusable("abc".into()); assert!(!bytes_body.is_end_stream()); assert_eq!(bytes_body.size_hint().exact(), Some(3)); // can delegate even when wrapped let stream_body = Body::wrap(empty_body); assert!(stream_body.is_end_stream()); assert_eq!(stream_body.size_hint().exact(), Some(0)); } } ================================================ FILE: src/client/conn/connector.rs ================================================ use std::{ borrow::Cow, future::Future, pin::Pin, sync::Arc, task::{Context, Poll}, time::Duration, }; use tokio_btls::SslStream; use tower::{ BoxError, Service, ServiceBuilder, ServiceExt, timeout::TimeoutLayer, util::{BoxCloneSyncService, MapRequestLayer}, }; #[cfg(unix)] use super::uds::UnixConnector; use super::{ AsyncConnWithInfo, BoxedConnectorLayer, BoxedConnectorService, Conn, Connection, HttpConnector, TlsConn, TlsInfoFactory, Unnameable, http::HttpTransport, proxy, verbose::Verbose, }; use crate::{ client::conn::{TokioTcpConnector, descriptor::ConnectionDescriptor}, dns::DynResolver, error::{ProxyConnect, TimedOut, map_timeout_to_connector_error}, ext::UriExt, proxy::{Intercepted, Matcher as ProxyMatcher, matcher::Intercept}, tls::{ TlsOptions, conn::{ EstablishedConn, HttpsConnector, MaybeHttpsStream, TlsConnector, TlsConnectorBuilder, }, }, }; type Connecting = Pin> + Send>>; /// Configuration for the connector service. #[derive(Clone)] struct Config { proxies: Arc>, verbose: Verbose, nodelay: bool, tls_info: bool, /// When there is a single timeout layer and no other layers, /// we embed it directly inside our base Service::call(). /// This lets us avoid an extra `Box::pin` indirection layer /// since `tokio::time::Timeout` is `Unpin` timeout: Option, } /// Builder for `Connector`. pub struct ConnectorBuilder { config: Config, #[cfg(feature = "socks")] resolver: DynResolver, http: HttpConnector, builder: TlsConnectorBuilder, } /// Connector service that establishes connections. #[derive(Clone)] pub enum Connector { Simple(ConnectorService), WithLayers(BoxedConnectorService), } /// Service that establishes connections to HTTP servers. #[derive(Clone)] pub struct ConnectorService { config: Config, #[cfg(feature = "socks")] resolver: DynResolver, tls: TlsConnector, http: HttpConnector, builder: Arc, } // ===== impl ConnectorBuilder ===== impl ConnectorBuilder { /// Set the HTTP connector to use. #[inline] pub fn with_http(mut self, call: F) -> ConnectorBuilder where F: FnOnce(&mut HttpConnector), { call(&mut self.http); self } /// Set the TLS connector builder to use. #[inline] pub fn with_tls(mut self, call: F) -> ConnectorBuilder where F: FnOnce(TlsConnectorBuilder) -> TlsConnectorBuilder, { self.builder = call(self.builder); self } /// Set the connect timeout. /// /// If a domain resolves to multiple IP addresses, the timeout will be /// evenly divided across them. #[inline] pub fn timeout(mut self, timeout: Option) -> ConnectorBuilder { self.config.timeout = timeout; self } /// Set connecting verbose mode. #[inline] pub fn verbose(mut self, enabled: bool) -> ConnectorBuilder { self.config.verbose.0 = enabled; self } /// Sets the TLS info flag. #[inline] pub fn tls_info(mut self, enabled: bool) -> ConnectorBuilder { self.config.tls_info = enabled; self } /// Sets the TCP_NODELAY option for connections. #[inline] pub fn tcp_nodelay(mut self, enabled: bool) -> ConnectorBuilder { self.config.nodelay = enabled; self } /// Build a [`Connector`] with the provided layers. pub fn build( self, tls_options: Option, layers: Vec, ) -> crate::Result { let mut service = ConnectorService { config: self.config, #[cfg(feature = "socks")] resolver: self.resolver.clone(), http: self.http, tls: self .builder .build(tls_options.map(Cow::Owned).unwrap_or_default())?, builder: Arc::new(self.builder), }; // we have no user-provided layers, only use concrete types if layers.is_empty() { return Ok(Connector::Simple(service)); } // user-provided layers exist, the timeout will be applied as an additional layer. let timeout = service.config.timeout.take(); // otherwise we have user provided layers // so we need type erasure all the way through // as well as mapping the unnameable type of the layers back to ConnectionDescriptor for the // inner service let service = layers.into_iter().fold( BoxCloneSyncService::new( ServiceBuilder::new() .layer(MapRequestLayer::new(|request: Unnameable| request.0)) .service(service), ), |service, layer| ServiceBuilder::new().layer(layer).service(service), ); // now we handle the concrete stuff - any `connect_timeout`, // plus a final map_err layer we can use to cast default tower layer // errors to internal errors match timeout { Some(timeout) => { let service = ServiceBuilder::new() .layer(TimeoutLayer::new(timeout)) .service(service) .map_err(map_timeout_to_connector_error); Ok(Connector::WithLayers(BoxCloneSyncService::new(service))) } None => { // no timeout, but still map err // no named timeout layer but we still map errors since // we might have user-provided timeout layer let service = ServiceBuilder::new() .service(service) .map_err(map_timeout_to_connector_error); Ok(Connector::WithLayers(BoxCloneSyncService::new(service))) } } } } // ===== impl Connector ===== impl Connector { /// Creates a new [`Connector`] with the provided configuration and optional layers. pub(crate) fn builder(proxies: Vec, resolver: DynResolver) -> ConnectorBuilder { ConnectorBuilder { config: Config { proxies: Arc::new(proxies), verbose: Verbose::OFF, nodelay: true, tls_info: false, timeout: None, }, #[cfg(feature = "socks")] resolver: resolver.clone(), http: HttpConnector::new(resolver, TokioTcpConnector::new()), builder: TlsConnector::builder(), } } } impl Service for Connector { type Response = Conn; type Error = BoxError; type Future = Connecting; #[inline] fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { match self { Connector::Simple(service) => service.poll_ready(cx), Connector::WithLayers(service) => service.poll_ready(cx), } } #[inline] fn call(&mut self, descriptor: ConnectionDescriptor) -> Self::Future { match self { Connector::Simple(service) => service.call(descriptor), Connector::WithLayers(service) => service.call(Unnameable(descriptor)), } } } // ===== impl ConnectorService ===== impl ConnectorService { fn build_https_connector( &self, https: bool, descriptor: &ConnectionDescriptor, ) -> Result, BoxError> { let mut http = self.http.clone(); // Disable Nagle's algorithm for TLS handshake // // https://www.openssl.org/docs/man1.1.1/man3/SSL_connect.html#NOTES if https && !self.config.nodelay { http.set_nodelay(true); } // Apply TCP options if provided in metadata if let Some(socket_opts) = descriptor.socket_bind_options() { http.set_local_addresses(socket_opts.ipv4_address, socket_opts.ipv6_address); #[cfg(any( target_os = "android", target_os = "fuchsia", target_os = "illumos", target_os = "ios", target_os = "linux", target_os = "macos", target_os = "solaris", target_os = "tvos", target_os = "visionos", target_os = "watchos", ))] if let Some(interface) = &socket_opts.interface { http.set_interface(interface.clone()); } } // Prefer TLS options from metadata, fallback to default let tls = descriptor .tls_options() .map(|opts| self.builder.build(Cow::Borrowed(opts))) .transpose()? .unwrap_or_else(|| self.tls.clone()); Ok(HttpsConnector::new(http, tls)) } fn tunnel_conn_from_stream(&self, io: MaybeHttpsStream) -> Result where IO: AsyncConnWithInfo, TlsConn: Connection, SslStream: TlsInfoFactory, { let conn = match io { MaybeHttpsStream::Http(stream) => Conn { stream: self.config.verbose.wrap(stream), tls_info: false, proxy: None, }, MaybeHttpsStream::Https(stream) => Conn { stream: self.config.verbose.wrap(TlsConn { stream }), tls_info: self.config.tls_info, proxy: None, }, }; Ok(conn) } fn conn_from_stream(&self, io: MaybeHttpsStream, proxy: P) -> Result where IO: AsyncConnWithInfo, TlsConn: Connection, SslStream: TlsInfoFactory, P: Into>, { let conn = match io { MaybeHttpsStream::Http(stream) => self.config.verbose.wrap(stream), MaybeHttpsStream::Https(stream) => self.config.verbose.wrap(TlsConn { stream }), }; Ok(Conn { stream: conn, tls_info: self.config.tls_info, proxy: proxy.into(), }) } async fn connect_auto_proxy>>( self, descriptor: ConnectionDescriptor, proxy: P, ) -> Result { let is_https = descriptor.uri().is_https(); let proxy = proxy.into(); trace!("connect with maybe proxy: {:?}", proxy); let mut connector = self.build_https_connector(is_https, &descriptor)?; // When using a proxy for HTTPS targets, disable ALPN to avoid protocol negotiation issues if proxy.is_some() && is_https { connector.no_alpn(); } let io = connector.call(descriptor).await?; // Re-enable Nagle's algorithm if it was disabled earlier if is_https && !self.config.nodelay { io.as_ref().set_nodelay(false)?; } self.conn_from_stream(io, proxy) } async fn connect_via_proxy( self, mut descriptor: ConnectionDescriptor, proxy: Intercepted, ) -> Result { let uri = descriptor.uri().clone(); match proxy { Intercepted::Proxy(proxy) => { let is_https = uri.is_https(); let proxy_uri = proxy.uri().clone(); #[cfg(feature = "socks")] { use proxy::socks::{DnsResolve, SocksConnector, Version}; if let Some((version, dns_resolve)) = match proxy_uri.scheme_str() { Some("socks4") => Some((Version::V4, DnsResolve::Local)), Some("socks4a") => Some((Version::V4, DnsResolve::Remote)), Some("socks5") => Some((Version::V5, DnsResolve::Local)), Some("socks5h") => Some((Version::V5, DnsResolve::Remote)), _ => None, } { trace!("connecting via SOCKS proxy: {:?}", proxy_uri); // Connect to the proxy and establish the SOCKS connection. let conn = { // Build a SOCKS connector. let mut socks = SocksConnector::new( proxy_uri, self.http.clone(), self.resolver.clone(), ); socks.set_auth(proxy.raw_auth()); socks.set_version(version); socks.set_dns_mode(dns_resolve); socks.call(uri).await? }; // Build an HTTPS connector. let mut connector = self.build_https_connector(is_https, &descriptor)?; // Wrap the established SOCKS connection with TLS if needed. let io = connector .call(EstablishedConn::new(conn, descriptor)) .await?; // Re-enable Nagle's algorithm if it was disabled earlier if is_https && !self.config.nodelay { io.as_ref().set_nodelay(false)?; } return self.tunnel_conn_from_stream(io); } } if is_https { trace!("tunneling over HTTP(s) proxy: {:?}", proxy_uri); // Build an HTTPS connector. let mut connector = self.build_https_connector(is_https, &descriptor)?; // Build a tunnel connector to establish the CONNECT tunnel. let tunneled = { let mut tunnel = proxy::tunnel::TunnelConnector::new(proxy_uri, connector.clone()); // If the proxy requires basic authentication, add it to the tunnel. if let Some(auth) = proxy.basic_auth() { tunnel = tunnel.with_auth(auth.clone()); } // If the proxy has custom headers, add them to the tunnel. if let Some(headers) = proxy.custom_headers() { tunnel = tunnel.with_headers(headers.clone()); } // Connect to the proxy and establish the tunnel. tunnel.call(uri).await? }; // Wrap the established tunneled stream with TLS. let io = connector .call(EstablishedConn::new(tunneled, descriptor)) .await?; // Re-enable Nagle's algorithm if it was disabled earlier if !self.config.nodelay { io.as_ref().as_ref().set_nodelay(false)?; } return self.tunnel_conn_from_stream(io); } *descriptor.uri_mut() = proxy_uri; self.connect_auto_proxy(descriptor, proxy) .await .map_err(ProxyConnect) .map_err(Into::into) } #[cfg(unix)] Intercepted::Unix(unix_socket) => { trace!("connecting via Unix socket: {:?}", unix_socket); // Create a Unix connector with the specified socket path. let mut connector = HttpsConnector::new(UnixConnector::new(unix_socket), self.tls.clone()); // If the target URI is HTTPS, establish a CONNECT tunnel over the Unix socket, // then upgrade the tunneled stream to TLS. if uri.is_https() { // Use a dummy HTTP URI so the HTTPS connector works over the Unix socket. let proxy_uri = http::Uri::from_static("http://localhost"); // The tunnel connector will first establish a CONNECT tunnel, // then perform the TLS handshake over the tunneled stream. let tunneled = { // Create a tunnel connector using the Unix socket and the HTTPS connector. let mut tunnel = proxy::tunnel::TunnelConnector::new(proxy_uri, connector.clone()); tunnel.call(uri).await? }; // Wrap the established tunneled stream with TLS. let io = connector .call(EstablishedConn::new(tunneled, descriptor)) .await?; return self.tunnel_conn_from_stream(io); } // For plain HTTP, use the Unix connector directly. let io = connector.call(descriptor).await?; self.conn_from_stream(io, None) } } } async fn connect_auto(self, req: ConnectionDescriptor) -> Result { debug!("starting new connection: {:?}", req.uri()); let timeout = self.config.timeout; // Determine if a proxy should be used for this request. let fut = async { let intercepted = req .proxy() .and_then(|prox| prox.intercept(req.uri())) .or_else(|| { self.config .proxies .iter() .find_map(|prox| prox.intercept(req.uri())) }); // If a proxy is matched, connect via proxy; otherwise, connect directly. if let Some(intercepted) = intercepted { self.connect_via_proxy(req, intercepted).await } else { self.connect_auto_proxy(req, None).await } }; // Apply timeout if configured. if let Some(to) = timeout { tokio::time::timeout(to, fut).await.map_err(|_| TimedOut)? } else { fut.await } } } impl Service for ConnectorService { type Response = Conn; type Error = BoxError; type Future = Connecting; #[inline] fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } #[inline] fn call(&mut self, descriptor: ConnectionDescriptor) -> Self::Future { Box::pin(self.clone().connect_auto(descriptor)) } } ================================================ FILE: src/client/conn/descriptor.rs ================================================ use std::{ hash::{BuildHasher, Hash, Hasher}, num::NonZeroU64, sync::{ Arc, LazyLock, atomic::{AtomicU64, Ordering}, }, }; use http::{Uri, Version}; use lru::DefaultHasher; use crate::{ client::{conn::SocketBindOptions, group::Group}, proxy::Matcher as ProxyMacher, tls::TlsOptions, }; /// A key that uniquely identifies a group of interchangeable connections for pooling. /// /// This ID is derived from all parameters that define a connection endpoint, /// such as URI, proxy, and local socket bindings. Connections with the same /// ID are considered equivalent and can be reused. #[derive(Debug, Clone)] pub(crate) struct ConnectionId(Arc<(Group, AtomicU64)>); /// A blueprint for creating a new client connection, containing all necessary parameters. /// /// This descriptor bundles the target `Uri`, HTTP version, `TlsOptions`, proxy settings, /// and other configurations needed to establish a connection. #[must_use] #[derive(Clone)] pub(crate) struct ConnectionDescriptor { uri: Uri, version: Option, proxy: Option, tls_options: Option, socket_bind: Option, connection_id: ConnectionId, } // ===== impl ConnectionId ===== impl Hash for ConnectionId { fn hash(&self, state: &mut H) { let hash = self.0.1.load(Ordering::Relaxed); if hash != 0 { state.write_u64(hash); return; } static HASHER: LazyLock = LazyLock::new(DefaultHasher::default); let computed_hash = NonZeroU64::new(HASHER.hash_one(&self.0.0)) .map(NonZeroU64::get) .unwrap_or(1); let _ = self.0.1.compare_exchange( u64::MIN, computed_hash, Ordering::Relaxed, Ordering::Relaxed, ); state.write_u64(computed_hash); } } impl PartialEq for ConnectionId { #[inline] fn eq(&self, other: &Self) -> bool { self.0.0.eq(&other.0.0) } } impl Eq for ConnectionId {} // ===== impl ConnectionDescriptor ===== impl ConnectionDescriptor { /// Create a new [`ConnectionDescriptor`]. pub(crate) fn new( uri: Uri, mut group: Group, proxy: Option, version: Option, tls_options: Option, socket_bind: Option, ) -> ConnectionDescriptor { let connection_id = { group .uri(uri.clone()) .version(version) .proxy(proxy.clone()) .socket_bind(socket_bind.clone()); ConnectionId(Arc::new((group, AtomicU64::new(u64::MIN)))) }; ConnectionDescriptor { uri, proxy, version, tls_options, socket_bind, connection_id, } } /// Returns a [`ConnectionId`] group ID for this descriptor. #[inline] pub(crate) fn id(&self) -> ConnectionId { self.connection_id.clone() } /// Returns a reference to the [`Uri`]. #[inline] pub(crate) fn uri(&self) -> &Uri { &self.uri } /// Returns a mutable reference to the [`Uri`]. #[inline] pub(crate) fn uri_mut(&mut self) -> &mut Uri { &mut self.uri } /// Return the negotiated HTTP version, if any. pub(crate) fn version(&self) -> Option { self.version } /// Return a reference to the [`TlsOptions`]. #[inline] pub(crate) fn tls_options(&self) -> Option<&TlsOptions> { self.tls_options.as_ref() } /// Return a reference to the [`ProxyMacher`]. #[inline] pub(crate) fn proxy(&self) -> Option<&ProxyMacher> { self.proxy.as_ref() } /// Return a reference to the [`SocketBindOptions`]. #[inline] pub(crate) fn socket_bind_options(&self) -> Option<&SocketBindOptions> { self.socket_bind.as_ref() } } ================================================ FILE: src/client/conn/http.rs ================================================ use std::{ future::Future, marker::PhantomData, net::{Ipv4Addr, Ipv6Addr, SocketAddr}, pin::Pin, sync::Arc, task::{self, Poll}, time::Duration, }; use http::uri::{Scheme, Uri}; use pin_project_lite::pin_project; use tokio::io::{AsyncRead, AsyncWrite}; use tower::{BoxError, Service}; use super::{ Connection, tcp::{ ConnectError, ConnectingTcp, SocketBindOptions, TcpConnector, TcpKeepaliveOptions, TcpOptions, }, }; use crate::dns::{self, InternalResolve}; static INVALID_NOT_HTTP: &str = "invalid URI, scheme is not http"; static INVALID_MISSING_SCHEME: &str = "invalid URI, scheme is missing"; static INVALID_MISSING_HOST: &str = "invalid URI, host is missing"; type ConnectResult = Result<::Connection, ConnectError>; type BoxConnecting = Pin> + Send>>; /// A trait for configuring HTTP transport options on a [`Service`] connector. /// /// Provides methods to adjust TCP/socket-level settings such as keepalive, /// timeouts, buffer sizes, and local address binding. [`HttpConnector`] /// is the default implementation. pub trait HttpTransport: Service + Clone + Send + Sized + 'static where Self::Response: AsyncRead + AsyncWrite + Connection + Unpin + Send + 'static, Self::Error: Into, Self::Future: Unpin + Send + 'static, { /// Set that all sockets have `SO_KEEPALIVE` set with the supplied duration /// to remain idle before sending TCP keepalive probes. fn enforce_http(&mut self, enforced: bool); /// Set that all sockets have `SO_NODELAY` set to the supplied value `nodelay`. fn set_nodelay(&mut self, nodelay: bool); /// Sets the value of the `SO_SNDBUF` option on the socket. fn set_send_buffer_size(&mut self, size: Option); /// Sets the value of the `SO_RCVBUF` option on the socket. fn set_recv_buffer_size(&mut self, size: Option); /// Set that all socket have `SO_REUSEADDR` set to the supplied value `reuse_address`. fn set_reuse_address(&mut self, reuse: bool); /// Sets the value of the `TCP_USER_TIMEOUT` option on the socket. #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] fn set_tcp_user_timeout(&mut self, time: Option); /// Set the connect timeout. fn set_connect_timeout(&mut self, dur: Option); /// Set timeout for [RFC 6555 (Happy Eyeballs)][RFC 6555] algorithm. /// /// [RFC 6555]: https://tools.ietf.org/html/rfc6555 fn set_happy_eyeballs_timeout(&mut self, dur: Option); /// Set that all sockets have `SO_KEEPALIVE` set with the supplied duration /// to remain idle before sending TCP keepalive probes. fn set_keepalive(&mut self, time: Option); /// Set the duration between two successive TCP keepalive retransmissions, /// if acknowledgement to the previous keepalive transmission is not received. fn set_keepalive_interval(&mut self, interval: Option); /// Set the number of retransmissions to be carried out before declaring that remote end is not /// available. fn set_keepalive_retries(&mut self, retries: Option); /// Sets the name of the interface to bind sockets produced. #[cfg(any( target_os = "android", target_os = "fuchsia", target_os = "illumos", target_os = "ios", target_os = "linux", target_os = "macos", target_os = "solaris", target_os = "tvos", target_os = "visionos", target_os = "watchos", ))] fn set_interface>>(&mut self, interface: I); /// Set that all sockets are bound to the configured IPv4 or IPv6 address (depending on host's /// preferences) before connection. fn set_local_addresses(&mut self, ipv4_address: V4, ipv6_address: V6) where V4: Into>, V6: Into>; } /// A connector for the `http` scheme. /// /// Performs DNS resolution in a thread pool, and then connects over TCP. /// /// # Note /// /// Sets the [`HttpInfo`] value on responses, which includes /// transport information such as the remote socket address used. #[derive(Clone)] pub struct HttpConnector { options: Arc, resolver: R, connector: S, } /// Extra information about the transport when an HttpConnector is used. /// /// # Example /// /// ``` /// # fn doc(res: http::Response<()>) { /// use crate::util::client::connect::HttpInfo; /// /// // res = http::Response /// res.extensions().get::().map(|info| { /// println!("remote addr = {}", info.remote_addr()); /// }); /// # } /// ``` /// /// # Note /// /// If a different connector is used besides [`HttpConnector`], /// this value will not exist in the extensions. Consult that specific /// connector to see what "extra" information it might provide to responses. #[derive(Clone, Debug)] pub struct HttpInfo { pub(crate) remote_addr: SocketAddr, pub(crate) local_addr: SocketAddr, } // ===== impl HttpConnector ===== impl HttpConnector { /// Construct a new [`HttpConnector`]. pub fn new(resolver: R, connector: S) -> HttpConnector { HttpConnector { options: Arc::new(TcpOptions { enforce_http: true, connect_timeout: None, happy_eyeballs_timeout: Some(Duration::from_millis(300)), nodelay: false, reuse_address: false, send_buffer_size: None, recv_buffer_size: None, #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] tcp_user_timeout: None, tcp_keepalive: TcpKeepaliveOptions::default(), socket_bind: SocketBindOptions::default(), }), resolver, connector, } } fn config_mut(&mut self) -> &mut TcpOptions { // If the are HttpConnector clones, this will clone the inner // config. So mutating the config won't ever affect previous // clones. Arc::make_mut(&mut self.options) } } impl HttpTransport for HttpConnector where R: InternalResolve + Clone + Send + Sync + 'static, R::Future: Send, S: TcpConnector, { /// Option to enforce all `Uri`s have the `http` scheme. /// /// Enabled by default. #[inline] fn enforce_http(&mut self, is_enforced: bool) { self.config_mut().enforce_http = is_enforced; } /// Set that all sockets have `SO_NODELAY` set to the supplied value `nodelay`. /// /// Default is `false`. #[inline] fn set_nodelay(&mut self, nodelay: bool) { self.config_mut().nodelay = nodelay; } /// Sets the value of the SO_SNDBUF option on the socket. #[inline] fn set_send_buffer_size(&mut self, size: Option) { self.config_mut().send_buffer_size = size; } /// Sets the value of the SO_RCVBUF option on the socket. #[inline] fn set_recv_buffer_size(&mut self, size: Option) { self.config_mut().recv_buffer_size = size; } /// Set that all socket have `SO_REUSEADDR` set to the supplied value `reuse_address`. /// /// Default is `false`. #[inline] fn set_reuse_address(&mut self, reuse_address: bool) { self.config_mut().reuse_address = reuse_address; } /// Sets the value of the TCP_USER_TIMEOUT option on the socket. #[inline] #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] fn set_tcp_user_timeout(&mut self, time: Option) { self.config_mut().tcp_user_timeout = time; } /// Set the connect timeout. /// /// If a domain resolves to multiple IP addresses, the timeout will be /// evenly divided across them. /// /// Default is `None`. #[inline] fn set_connect_timeout(&mut self, dur: Option) { self.config_mut().connect_timeout = dur; } /// Set timeout for [RFC 6555 (Happy Eyeballs)][RFC 6555] algorithm. /// /// If hostname resolves to both IPv4 and IPv6 addresses and connection /// cannot be established using preferred address family before timeout /// elapses, then connector will in parallel attempt connection using other /// address family. /// /// If `None`, parallel connection attempts are disabled. /// /// Default is 300 milliseconds. /// /// [RFC 6555]: https://tools.ietf.org/html/rfc6555 #[inline] fn set_happy_eyeballs_timeout(&mut self, dur: Option) { self.config_mut().happy_eyeballs_timeout = dur; } /// Set that all sockets have `SO_KEEPALIVE` set with the supplied duration /// to remain idle before sending TCP keepalive probes. /// /// If `None`, keepalive is disabled. /// /// Default is `None`. #[inline] fn set_keepalive(&mut self, time: Option) { self.config_mut().tcp_keepalive.time = time; } /// Set the duration between two successive TCP keepalive retransmissions, /// if acknowledgement to the previous keepalive transmission is not received. #[inline] fn set_keepalive_interval(&mut self, interval: Option) { self.config_mut().tcp_keepalive.interval = interval; } /// Set the number of retransmissions to be carried out before declaring that remote end is not /// available. #[inline] fn set_keepalive_retries(&mut self, retries: Option) { self.config_mut().tcp_keepalive.retries = retries; } /// Sets the name of the interface to bind sockets produced by this /// connector. /// /// On Linux, this sets the `SO_BINDTODEVICE` option on this socket (see /// [`man 7 socket`] for details). On macOS (and macOS-derived systems like /// iOS), illumos, and Solaris, this will instead use the `IP_BOUND_IF` /// socket option (see [`man 7p ip`]). /// /// If a socket is bound to an interface, only packets received from that particular /// interface are processed by the socket. Note that this only works for some socket /// types, particularly `AF_INET`` sockets. /// /// On Linux it can be used to specify a [VRF], but the binary needs /// to either have `CAP_NET_RAW` or to be run as root. /// /// This function is only available on the following operating systems: /// - Linux, including Android /// - Fuchsia /// - illumos and Solaris /// - macOS, iOS, visionOS, watchOS, and tvOS /// /// [VRF]: https://www.kernel.org/doc/Documentation/networking/vrf.txt /// [`man 7 socket`]: https://man7.org/linux/man-pages/man7/socket.7.html /// [`man 7p ip`]: https://docs.oracle.com/cd/E86824_01/html/E54777/ip-7p.html #[cfg(any( target_os = "android", target_os = "fuchsia", target_os = "illumos", target_os = "ios", target_os = "linux", target_os = "macos", target_os = "solaris", target_os = "tvos", target_os = "visionos", target_os = "watchos", ))] fn set_interface>>(&mut self, interface: I) { self.config_mut().socket_bind.set_interface(interface); } /// Set that all sockets are bound to the configured IPv4 or IPv6 address (depending on host's /// preferences) before connection. /// /// If `None`, the sockets will not be bound. /// /// Default is `None`. fn set_local_addresses(&mut self, ipv4_address: V4, ipv6_address: V6) where V4: Into>, V6: Into>, { self.config_mut() .socket_bind .set_local_addresses(ipv4_address, ipv6_address); } } impl Service for HttpConnector where R: InternalResolve + Clone + Send + Sync + 'static, R::Future: Send, S: TcpConnector, S::TcpStream: From, { type Response = S::Connection; type Error = ConnectError; type Future = HttpConnecting; #[inline] fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { self.resolver.poll_ready(cx).map_err(ConnectError::dns) } fn call(&mut self, dst: Uri) -> Self::Future { let mut this = self.clone(); let fut = async move { let options = &this.options; let (host, port) = get_host_port(options, &dst)?; let host = host.trim_start_matches('[').trim_end_matches(']'); let addrs = if let Some(addrs) = dns::SocketAddrs::try_parse(host, port) { addrs } else { let addrs = dns::resolve(&mut this.resolver, dns::Name::new(host.into())) .await .map_err(ConnectError::dns)?; let addrs = addrs .map(|mut addr| { set_port(&mut addr, port, dst.port().is_some()); addr }) .collect(); dns::SocketAddrs::new(addrs) }; ConnectingTcp::new(addrs, options, this.connector) .connect(options) .await }; HttpConnecting { fut: Box::pin(fut), _marker: PhantomData, } } } fn get_host_port<'u>(options: &TcpOptions, dst: &'u Uri) -> Result<(&'u str, u16), ConnectError> { trace!( "Http::connect; scheme={:?}, host={:?}, port={:?}", dst.scheme(), dst.host(), dst.port(), ); if options.enforce_http { if dst.scheme() != Some(&Scheme::HTTP) { return Err(ConnectError { msg: INVALID_NOT_HTTP, addr: None, cause: None, }); } } else if dst.scheme().is_none() { return Err(ConnectError { msg: INVALID_MISSING_SCHEME, addr: None, cause: None, }); } let host = match dst.host() { Some(s) => s, None => { return Err(ConnectError { msg: INVALID_MISSING_HOST, addr: None, cause: None, }); } }; let port = match dst.port() { Some(port) => port.as_u16(), None => { if dst.scheme() == Some(&Scheme::HTTPS) { 443 } else { 80 } } }; Ok((host, port)) } /// Respect explicit ports in the URI, if none, either /// keep non `0` ports resolved from a custom dns resolver, /// or use the default port for the scheme. fn set_port(addr: &mut SocketAddr, host_port: u16, explicit: bool) { if explicit || addr.port() == 0 { addr.set_port(host_port) }; } impl HttpInfo { /// Get the remote address of the transport used. pub fn remote_addr(&self) -> SocketAddr { self.remote_addr } /// Get the local address of the transport used. pub fn local_addr(&self) -> SocketAddr { self.local_addr } } pin_project! { // Not publicly exported (so missing_docs doesn't trigger). // // We return this `Future` instead of the `Pin>` directly // so that users don't rely on it fitting in a `Pin>` slot // (and thus we can change the type in the future). #[must_use = "futures do nothing unless polled"] pub struct HttpConnecting { #[pin] fut: BoxConnecting, _marker: PhantomData, } } impl Future for HttpConnecting where R: InternalResolve, S: TcpConnector, { type Output = ConnectResult; #[inline] fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { self.project().fut.poll(cx) } } ================================================ FILE: src/client/conn/proxy/socks.rs ================================================ use std::{ borrow::Cow, task::{Context, Poll}, }; use bytes::Bytes; use http::Uri; use tokio::io::{AsyncRead, AsyncWrite}; use tokio_socks::{ TargetAddr, tcp::{Socks4Stream, Socks5Stream}, }; use tower::Service; use super::Tunneling; use crate::{ dns::{GaiResolver, InternalResolve, Name}, error::BoxError, ext::UriExt, }; #[derive(Debug)] pub enum SocksError { ConnectFailed(BoxError), DnsResolveFailure(BoxError), Socks(tokio_socks::Error), Io(std::io::Error), Utf8(std::str::Utf8Error), DnsFailure, MissingHost, } impl std::fmt::Display for SocksError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str("SOCKS error: ")?; match self { Self::ConnectFailed(e) => { f.write_fmt(format_args!("failed to create underlying connection: {e}")) } Self::Socks(e) => f.write_fmt(format_args!("error during SOCKS handshake: {e}")), Self::Io(e) => f.write_fmt(format_args!("io error during SOCKS handshake: {e}")), Self::Utf8(e) => f.write_fmt(format_args!( "invalid UTF-8 during SOCKS authentication: {e}" )), Self::DnsResolveFailure(e) => { f.write_fmt(format_args!("failed to resolve DNS for SOCKS target: {e}")) } Self::DnsFailure => f.write_str("could not resolve to acceptable address type"), Self::MissingHost => f.write_str("missing destination host"), } } } impl std::error::Error for SocksError {} impl From for SocksError { fn from(err: std::io::Error) -> Self { Self::Io(err) } } impl From for SocksError { fn from(err: std::str::Utf8Error) -> Self { Self::Utf8(err) } } impl From for SocksError { fn from(err: tokio_socks::Error) -> Self { Self::Socks(err) } } /// Represents the SOCKS protocol version. #[derive(Clone, Copy)] #[repr(u8)] pub enum Version { V4, V5, } /// Represents the DNS resolution strategy for SOCKS connections. #[derive(Clone, Copy)] #[repr(u8)] pub enum DnsResolve { Local, Remote, } /// A connector that establishes connections through a SOCKS proxy. pub struct SocksConnector { inner: C, resolver: R, proxy_dst: Uri, auth: Option<(Bytes, Bytes)>, version: Version, dns_resolve: DnsResolve, } impl SocksConnector where R: InternalResolve + Clone, { /// Create a new [`SocksConnector`]. pub fn new(proxy_dst: Uri, inner: C, resolver: R) -> Self { SocksConnector { inner, resolver, proxy_dst, version: Version::V5, dns_resolve: DnsResolve::Local, auth: None, } } /// Sets the authentication credentials for the SOCKS proxy connection. #[inline] pub fn set_auth(&mut self, auth: Option<(Bytes, Bytes)>) { self.auth = auth; } /// Sets whether to use the SOCKS5 protocol for the proxy connection. #[inline] pub fn set_version(&mut self, version: Version) { self.version = version; } /// Sets whether to resolve DNS locally or let the proxy handle DNS resolution. #[inline] pub fn set_dns_mode(&mut self, dns_resolve: DnsResolve) { self.dns_resolve = dns_resolve; } } impl Service for SocksConnector where C: Service, C::Future: Send + 'static, C::Response: AsyncRead + AsyncWrite + Unpin + Send + 'static, C::Error: Into, R: InternalResolve + Clone + Send + 'static, ::Future: Send + 'static, { type Response = C::Response; type Error = SocksError; type Future = Tunneling; #[inline] fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.inner .poll_ready(cx) .map_err(Into::into) .map_err(SocksError::ConnectFailed) } fn call(&mut self, dst: Uri) -> Self::Future { let connecting = self.inner.call(self.proxy_dst.clone()); let version = self.version; let dns_resolve = self.dns_resolve; let auth = self.auth.clone(); let mut resolver = self.resolver.clone(); let fut = async move { let host = dst.host().ok_or(SocksError::MissingHost)?; let port = dst.port_or_default(); // Attempt to tcp connect to the proxy server. // This will return a `tokio::net::TcpStream` if successful. let socket = connecting .await .map_err(Into::into) .map_err(SocksError::ConnectFailed)?; // Resolve the target address using the provided resolver. let target_addr = match dns_resolve { DnsResolve::Local => { let mut socket_addr = resolver .resolve(Name::new(host.into())) .await .map(|mut s| s.next()) .transpose() .ok_or(SocksError::DnsFailure)? .map_err(Into::into) .map_err(SocksError::DnsResolveFailure)?; socket_addr.set_port(port); TargetAddr::Ip(socket_addr) } DnsResolve::Remote => TargetAddr::Domain(Cow::Borrowed(host), port), }; match version { Version::V4 => { // For SOCKS4, we connect directly to the target address. let stream = Socks4Stream::connect_with_socket(socket, target_addr).await?; Ok(stream.into_inner()) } Version::V5 => { // For SOCKS5, we need to handle authentication if provided. // The `auth` is an optional tuple of (username, password). let stream = match auth { Some((username, password)) => { let username = std::str::from_utf8(&username)?; let password = std::str::from_utf8(&password)?; Socks5Stream::connect_with_password_and_socket( socket, target_addr, username, password, ) .await? } None => Socks5Stream::connect_with_socket(socket, target_addr).await?, }; Ok(stream.into_inner()) } } }; Tunneling { fut: Box::pin(fut), _marker: Default::default(), } } } ================================================ FILE: src/client/conn/proxy/tunnel.rs ================================================ use std::{ marker::{PhantomData, Unpin}, task::{self, Poll}, }; use bytes::BytesMut; use http::{HeaderMap, HeaderValue, Uri}; use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; use tower::{BoxError, Service}; use super::Tunneling; use crate::ext::UriExt; /// Tunnel Proxy via HTTP CONNECT /// /// This is a connector that can be used by the `Client`. It wraps /// another connector, and after getting an underlying connection, it creates /// an HTTP CONNECT tunnel over it. #[derive(Debug)] pub struct TunnelConnector { headers: Headers, inner: C, proxy_dst: Uri, } #[derive(Clone, Debug)] enum Headers { Empty, Auth(HeaderValue), Extra(HeaderMap), } #[derive(Debug)] pub enum TunnelError { ConnectFailed(BoxError), Io(std::io::Error), Parse(httparse::Error), MissingHost, ProxyAuthRequired, TunnelUnexpectedEof, TunnelUnsuccessful, } impl TunnelConnector { /// Create a new tunnel connector. /// /// This wraps an underlying connector, and stores the address of a /// tunneling proxy server. /// /// A `TunnelConnector` can then be called with any destination. The `proxy_dst` passed to /// `call` will not be used to create the underlying connection, but will /// be used in an HTTP CONNECT request sent to the proxy destination. pub fn new(proxy_dst: Uri, connector: C) -> Self { Self { headers: Headers::Empty, inner: connector, proxy_dst, } } /// Add `proxy-authorization` header value to the CONNECT request. pub fn with_auth(mut self, mut auth: HeaderValue) -> Self { // just in case the user forgot auth.set_sensitive(true); match self.headers { Headers::Empty => { self.headers = Headers::Auth(auth); } Headers::Auth(ref mut existing) => { *existing = auth; } Headers::Extra(ref mut extra) => { extra.insert(http::header::PROXY_AUTHORIZATION, auth); } } self } /// Add extra headers to be sent with the CONNECT request. /// /// If existing headers have been set, these will be merged. pub fn with_headers(mut self, mut headers: HeaderMap) -> Self { match self.headers { Headers::Empty => { self.headers = Headers::Extra(headers); } Headers::Auth(auth) => { headers .entry(http::header::PROXY_AUTHORIZATION) .or_insert(auth); self.headers = Headers::Extra(headers); } Headers::Extra(ref mut extra) => { extra.extend(headers); } } self } } impl Service for TunnelConnector where C: Service, C::Future: Send + 'static, C::Response: AsyncRead + AsyncWrite + Unpin + Send + 'static, C::Error: Into, { type Response = C::Response; type Error = TunnelError; type Future = Tunneling; #[inline] fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { self.inner .poll_ready(cx) .map_err(Into::into) .map_err(TunnelError::ConnectFailed) } fn call(&mut self, dst: Uri) -> Self::Future { let connecting = self.inner.call(self.proxy_dst.clone()); let headers = self.headers.clone(); Tunneling { fut: Box::pin(async move { let conn = connecting .await .map_err(Into::into) .map_err(TunnelError::ConnectFailed)?; tunnel( conn, dst.host().ok_or(TunnelError::MissingHost)?, dst.port_or_default(), &headers, ) .await }), _marker: PhantomData, } } } async fn tunnel(mut conn: T, host: &str, port: u16, headers: &Headers) -> Result where T: AsyncRead + AsyncWrite + Unpin, { let mut buf = format!( "\ CONNECT {host}:{port} HTTP/1.1\r\n\ Host: {host}:{port}\r\n\ " ) .into_bytes(); match headers { Headers::Auth(auth) => { buf.extend_from_slice(b"Proxy-Authorization: "); buf.extend_from_slice(auth.as_bytes()); buf.extend_from_slice(b"\r\n"); } Headers::Extra(extra) => { for (name, value) in extra { buf.extend_from_slice(name.as_str().as_bytes()); buf.extend_from_slice(b": "); buf.extend_from_slice(value.as_bytes()); buf.extend_from_slice(b"\r\n"); } } Headers::Empty => (), } // headers end buf.extend_from_slice(b"\r\n"); conn.write_all(&buf).await.map_err(TunnelError::Io)?; conn.flush().await.map_err(TunnelError::Io)?; let mut buf = BytesMut::with_capacity(8192); loop { if conn.read_buf(&mut buf).await.map_err(TunnelError::Io)? == 0 { return Err(TunnelError::TunnelUnexpectedEof); } let mut headers = [httparse::EMPTY_HEADER; 64]; let mut res = httparse::Response::new(&mut headers); match res.parse(&buf).map_err(TunnelError::Parse)? { httparse::Status::Partial => continue, httparse::Status::Complete(_) => match res.code { Some(200) => return Ok(conn), Some(407) => return Err(TunnelError::ProxyAuthRequired), Some(_) | None => return Err(TunnelError::TunnelUnsuccessful), }, } } } impl std::fmt::Display for TunnelError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str("tunnel error: ")?; f.write_str(match self { TunnelError::MissingHost => "missing destination host", TunnelError::ProxyAuthRequired => "proxy authorization required", TunnelError::Parse(_) => "invalid proxy response", TunnelError::TunnelUnexpectedEof => "unexpected end of file", TunnelError::TunnelUnsuccessful => "unsuccessful", TunnelError::ConnectFailed(_) => "failed to create underlying connection", TunnelError::Io(_) => "io error establishing tunnel", }) } } impl std::error::Error for TunnelError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self { TunnelError::Io(e) => Some(e), TunnelError::Parse(e) => Some(e), TunnelError::ConnectFailed(e) => Some(&**e), _ => None, } } } ================================================ FILE: src/client/conn/proxy.rs ================================================ //! Proxy helpers #[cfg(feature = "socks")] pub mod socks; pub mod tunnel; use std::{ marker::PhantomData, pin::Pin, task::{Context, Poll}, }; use pin_project_lite::pin_project; pin_project! { // Not publicly exported (so missing_docs doesn't trigger). // // We return this `Future` instead of the `Pin>` directly // so that users don't rely on it fitting in a `Pin>` slot // (and thus we can change the type in the future). #[must_use = "futures do nothing unless polled"] pub struct Tunneling { #[pin] fut: Pin> + Send>>, _marker: PhantomData, } } impl Future for Tunneling where F: Future>, { type Output = Result; #[inline] fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { self.project().fut.poll(cx) } } ================================================ FILE: src/client/conn/tcp/tokio.rs ================================================ use std::{future::Future, io, net::SocketAddr, pin::Pin, time::Duration}; use tokio::net::{TcpSocket, TcpStream}; use super::TcpConnector; use crate::client::{Connected, Connection, conn::HttpInfo}; /// A connector that uses `tokio` for TCP connections. #[derive(Clone, Copy, Debug, Default)] pub struct TokioTcpConnector { _priv: (), } impl TokioTcpConnector { /// Create a new [`TokioTcpConnector`]. pub fn new() -> Self { Self { _priv: () } } } impl TcpConnector for TokioTcpConnector { type TcpStream = std::net::TcpStream; type Connection = TcpStream; type Error = io::Error; type Future = Pin> + Send>>; type Sleep = tokio::time::Sleep; #[inline] fn connect(&self, socket: Self::TcpStream, addr: SocketAddr) -> Self::Future { let socket = TcpSocket::from_std_stream(socket); Box::pin(socket.connect(addr)) } #[inline] fn sleep(&self, duration: Duration) -> Self::Sleep { tokio::time::sleep(duration) } } impl Connection for TcpStream { fn connected(&self) -> Connected { let connected = Connected::new(); if let (Ok(remote_addr), Ok(local_addr)) = (self.peer_addr(), self.local_addr()) { connected.extra(HttpInfo { remote_addr, local_addr, }) } else { connected } } } ================================================ FILE: src/client/conn/tcp.rs ================================================ pub mod tokio; #[cfg(any( target_os = "illumos", target_os = "ios", target_os = "macos", target_os = "solaris", target_os = "tvos", target_os = "visionos", target_os = "watchos", target_os = "android", target_os = "fuchsia", target_os = "linux", ))] use std::borrow::Cow; use std::{ error::Error as StdError, fmt, future::Future, io, net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, pin::pin, time::Duration, }; use futures_util::future::Either; use socket2::TcpKeepalive; use super::Connection; use crate::{dns, error::BoxError}; /// A builder for tcp connections. pub trait TcpConnector: Clone + Send + Sync + 'static { /// The underlying stream type. type TcpStream: From + Send + Sync + 'static; /// The type of connection returned by this builder. type Connection: ::tokio::io::AsyncRead + ::tokio::io::AsyncWrite + Connection + Send + Unpin + 'static; /// The type of error returned by this builder. type Error: Into>; /// The future type returned by this builder. type Future: Future> + Send + 'static; /// The future type returned by this builder's sleep. type Sleep: Future + Send + 'static; /// Build a connection from the given socket and connect to the address. fn connect(&self, socket: Self::TcpStream, addr: SocketAddr) -> Self::Future; /// Return a future that sleeps for the given duration. fn sleep(&self, duration: Duration) -> Self::Sleep; } pub(super) struct ConnectingTcp { preferred: ConnectingTcpRemote, fallback: Option>, } struct ConnectingTcpFallback { delay: S::Sleep, remote: ConnectingTcpRemote, } struct ConnectingTcpRemote { addrs: dns::SocketAddrs, connect_timeout: Option, connector: S, } impl ConnectingTcp where S::TcpStream: From, { pub(super) fn new(remote_addrs: dns::SocketAddrs, config: &TcpOptions, connector: S) -> Self { if let Some(fallback_timeout) = config.happy_eyeballs_timeout { let (preferred_addrs, fallback_addrs) = remote_addrs.split_by_preference( config.socket_bind.ipv4_address, config.socket_bind.ipv6_address, ); if fallback_addrs.is_empty() { return ConnectingTcp { preferred: ConnectingTcpRemote::new( preferred_addrs, config.connect_timeout, connector, ), fallback: None, }; } ConnectingTcp { preferred: ConnectingTcpRemote::new( preferred_addrs, config.connect_timeout, connector.clone(), ), fallback: Some(ConnectingTcpFallback { delay: connector.sleep(fallback_timeout), remote: ConnectingTcpRemote::new( fallback_addrs, config.connect_timeout, connector, ), }), } } else { ConnectingTcp { preferred: ConnectingTcpRemote::new( remote_addrs, config.connect_timeout, connector, ), fallback: None, } } } } impl ConnectingTcpRemote where S::TcpStream: From, { fn new(addrs: dns::SocketAddrs, connect_timeout: Option, connector: S) -> Self { let connect_timeout = connect_timeout.and_then(|t| t.checked_div(addrs.len() as u32)); Self { addrs, connect_timeout, connector, } } async fn connect(&mut self, config: &TcpOptions) -> Result { let mut err = None; for addr in &mut self.addrs { debug!("connecting to {}", addr); match connect(&addr, config, self.connect_timeout, &self.connector) { Ok(fut) => match fut.await { Ok(tcp) => { debug!("connected to {}", addr); return Ok(tcp); } Err(mut e) => { trace!("connect error for {}: {:?}", addr, e); e.addr = Some(addr); if err.is_none() { err = Some(e); } } }, Err(mut e) => { trace!("connect error for {}: {:?}", addr, e); e.addr = Some(addr); if err.is_none() { err = Some(e); } } } } match err { Some(e) => Err(e), None => Err(ConnectError::new( "tcp connect error", std::io::Error::new(std::io::ErrorKind::NotConnected, "Network unreachable"), )), } } } fn bind_local_address( socket: &socket2::Socket, dst_addr: &SocketAddr, local_addr_ipv4: &Option, local_addr_ipv6: &Option, ) -> io::Result<()> { match (*dst_addr, local_addr_ipv4, local_addr_ipv6) { (SocketAddr::V4(_), Some(addr), _) => { socket.bind(&SocketAddr::new((*addr).into(), 0).into())?; } (SocketAddr::V6(_), _, Some(addr)) => { socket.bind(&SocketAddr::new((*addr).into(), 0).into())?; } _ => { if cfg!(windows) { // Windows requires a socket be bound before calling connect let any: SocketAddr = match *dst_addr { SocketAddr::V4(_) => ([0, 0, 0, 0], 0).into(), SocketAddr::V6(_) => ([0, 0, 0, 0, 0, 0, 0, 0], 0).into(), }; socket.bind(&any.into())?; } } } Ok(()) } fn connect( addr: &SocketAddr, config: &TcpOptions, connect_timeout: Option, connector: &S, ) -> Result>, ConnectError> where S::TcpStream: From, { use socket2::{Domain, Protocol, Socket, Type}; let domain = Domain::for_address(*addr); let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP)) .map_err(ConnectError::m("tcp open error"))?; // When constructing a Tokio `TcpSocket` from a raw fd/socket, the user is // responsible for ensuring O_NONBLOCK is set. socket .set_nonblocking(true) .map_err(ConnectError::m("tcp set_nonblocking error"))?; if let Some(tcp_keepalive) = &config.tcp_keepalive.into_tcpkeepalive() { if let Err(_e) = socket.set_tcp_keepalive(tcp_keepalive) { warn!("tcp set_keepalive error: {_e}"); } } // That this only works for some socket types, particularly AF_INET sockets. #[cfg(any( target_os = "android", target_os = "fuchsia", target_os = "illumos", target_os = "ios", target_os = "linux", target_os = "macos", target_os = "solaris", target_os = "tvos", target_os = "visionos", target_os = "watchos", ))] if let Some(interface) = &config.socket_bind.interface { // On Linux-like systems, set the interface to bind using // `SO_BINDTODEVICE`. #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] socket .bind_device(Some(interface.as_bytes())) .map_err(ConnectError::m("tcp bind interface error"))?; // On macOS-like and Solaris-like systems, we instead use `IP_BOUND_IF`. // This socket option desires an integer index for the interface, so we // must first determine the index of the requested interface name using // `if_nametoindex`. #[cfg(any( target_os = "illumos", target_os = "ios", target_os = "macos", target_os = "solaris", target_os = "tvos", target_os = "visionos", target_os = "watchos", ))] if let Ok(interface) = std::ffi::CString::new(interface.as_bytes()) { #[allow(unsafe_code)] let idx = unsafe { libc::if_nametoindex(interface.as_ptr()) }; let idx = std::num::NonZeroU32::new(idx).ok_or_else(|| { // If the index is 0, check errno and return an I/O error. ConnectError::new( "error converting interface name to index", io::Error::last_os_error(), ) })?; // Different setsockopt calls are necessary depending on whether the // address is IPv4 or IPv6. match addr { SocketAddr::V4(_) => socket.bind_device_by_index_v4(Some(idx)), SocketAddr::V6(_) => socket.bind_device_by_index_v6(Some(idx)), } .map_err(ConnectError::m("tcp bind interface error"))?; } } #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] if let Some(tcp_user_timeout) = &config.tcp_user_timeout { if let Err(_e) = socket.set_tcp_user_timeout(Some(*tcp_user_timeout)) { warn!("tcp set_tcp_user_timeout error: {_e}"); } } bind_local_address( &socket, addr, &config.socket_bind.ipv4_address, &config.socket_bind.ipv6_address, ) .map_err(ConnectError::m("tcp bind local error"))?; if config.reuse_address { if let Err(_e) = socket.set_reuse_address(true) { warn!("tcp set_reuse_address error: {_e}"); } } if let Some(size) = config.send_buffer_size { if let Err(_e) = socket.set_send_buffer_size(size) { warn!("tcp set_buffer_size error: {_e}"); } } if let Some(size) = config.recv_buffer_size { if let Err(_e) = socket.set_recv_buffer_size(size) { warn!("tcp set_recv_buffer_size error: {_e}"); } } if let Err(_e) = socket.set_tcp_nodelay(config.nodelay) { warn!("tcp set_tcp_nodelay error: {_e}"); } let connect = connector.connect(socket.into(), *addr); let sleep = connect_timeout.map(|dur| connector.sleep(dur)); Ok(async move { match sleep { Some(sleep) => match futures_util::future::select(pin!(sleep), pin!(connect)).await { Either::Left(((), _)) => { Err(io::Error::new(io::ErrorKind::TimedOut, "connect timeout").into()) } Either::Right((Ok(s), _)) => Ok(s), Either::Right((Err(e), _)) => Err(e.into()), }, None => connect.await.map_err(Into::into), } .map_err(ConnectError::m("tcp connect error")) }) } impl ConnectingTcp where S::TcpStream: From, { pub(super) async fn connect( mut self, config: &TcpOptions, ) -> Result { match self.fallback { None => self.preferred.connect(config).await, Some(mut fallback) => { let preferred_fut = pin!(self.preferred.connect(config)); let fallback_fut = pin!(fallback.remote.connect(config)); let fallback_delay = pin!(fallback.delay); let (result, future) = match futures_util::future::select(preferred_fut, fallback_delay).await { Either::Left((result, _fallback_delay)) => { (result, Either::Right(fallback_fut)) } Either::Right(((), preferred_fut)) => { // Delay is done, start polling both the preferred and the fallback futures_util::future::select(preferred_fut, fallback_fut) .await .factor_first() } }; if result.is_err() { // Fallback to the remaining future (could be preferred or fallback) // if we get an error future.await } else { result } } } } } // Not publicly exported (so missing_docs doesn't trigger). pub struct ConnectError { pub(super) msg: &'static str, pub(super) addr: Option, pub(super) cause: Option, } impl ConnectError { pub(super) fn new(msg: &'static str, cause: E) -> ConnectError where E: Into, { ConnectError { msg, addr: None, cause: Some(cause.into()), } } pub(super) fn dns(cause: E) -> ConnectError where E: Into, { ConnectError::new("dns error", cause) } pub(super) fn m(msg: &'static str) -> impl FnOnce(E) -> ConnectError where E: Into, { move |cause| ConnectError::new(msg, cause) } } impl fmt::Debug for ConnectError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut b = f.debug_tuple("ConnectError"); b.field(&self.msg); if let Some(ref addr) = self.addr { b.field(addr); } if let Some(ref cause) = self.cause { b.field(cause); } b.finish() } } impl fmt::Display for ConnectError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(self.msg) } } impl StdError for ConnectError { fn source(&self) -> Option<&(dyn StdError + 'static)> { self.cause.as_ref().map(|e| &**e as _) } } /// Options for configuring socket bind behavior for outbound connections. #[derive(Debug, Clone, Hash, PartialEq, Eq, Default)] pub(crate) struct SocketBindOptions { #[cfg(any( target_os = "illumos", target_os = "ios", target_os = "macos", target_os = "solaris", target_os = "tvos", target_os = "visionos", target_os = "watchos", target_os = "android", target_os = "fuchsia", target_os = "linux", ))] pub interface: Option>, pub ipv4_address: Option, pub ipv6_address: Option, } impl SocketBindOptions { /// Sets the name of the network interface to bind the socket to. /// /// ## Platform behavior /// - On Linux/Fuchsia/Android: sets `SO_BINDTODEVICE` /// - On macOS/illumos/Solaris/iOS/etc.: sets `IP_BOUND_IF` /// /// If `interface` is `None`, the socket will not be explicitly bound to any device. /// /// # Errors /// /// On platforms that require a `CString` (e.g. macOS), this will return an error if the /// interface name contains an internal null byte (`\0`), which is invalid in C strings. /// /// # See Also /// - [VRF documentation](https://www.kernel.org/doc/Documentation/networking/vrf.txt) /// - [`man 7 socket`](https://man7.org/linux/man-pages/man7/socket.7.html) /// - [`man 7p ip`](https://docs.oracle.com/cd/E86824_01/html/E54777/ip-7p.html) #[cfg(any( target_os = "android", target_os = "fuchsia", target_os = "illumos", target_os = "ios", target_os = "linux", target_os = "macos", target_os = "solaris", target_os = "tvos", target_os = "visionos", target_os = "watchos", ))] #[inline] pub fn set_interface(&mut self, interface: I) -> &mut Self where I: Into>, { self.interface = Some(interface.into()); self } /// Set that all sockets are bound to the configured address before connection. /// /// If `None`, the sockets will not be bound. /// /// Default is `None`. #[inline] pub fn set_local_address(&mut self, local_address: V) where V: Into>, { match local_address.into() { Some(IpAddr::V4(a)) => { self.ipv4_address = Some(a); } Some(IpAddr::V6(a)) => { self.ipv6_address = Some(a); } _ => {} }; } /// Set that all sockets are bound to the configured IPv4 or IPv6 address (depending on host's /// preferences) before connection. /// /// If `None`, the sockets will not be bound. /// /// Default is `None`. #[inline] pub fn set_local_addresses(&mut self, ipv4_address: V4, ipv6_address: V6) where V4: Into>, V6: Into>, { if let Some(addr) = ipv4_address.into() { self.ipv4_address = Some(addr); } if let Some(addr) = ipv6_address.into() { self.ipv6_address = Some(addr); } } } #[derive(Clone)] pub(crate) struct TcpOptions { pub enforce_http: bool, pub connect_timeout: Option, pub happy_eyeballs_timeout: Option, pub nodelay: bool, pub reuse_address: bool, pub send_buffer_size: Option, pub recv_buffer_size: Option, #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] pub tcp_user_timeout: Option, pub tcp_keepalive: TcpKeepaliveOptions, pub socket_bind: SocketBindOptions, } #[derive(Default, Debug, Clone, Copy)] pub(crate) struct TcpKeepaliveOptions { pub time: Option, #[cfg(any( target_os = "android", target_os = "dragonfly", target_os = "freebsd", target_os = "fuchsia", target_os = "illumos", target_os = "ios", target_os = "visionos", target_os = "linux", target_os = "macos", target_os = "netbsd", target_os = "tvos", target_os = "watchos", target_os = "windows", target_os = "cygwin", ))] pub interval: Option, #[cfg(any( target_os = "android", target_os = "dragonfly", target_os = "freebsd", target_os = "fuchsia", target_os = "illumos", target_os = "ios", target_os = "visionos", target_os = "linux", target_os = "macos", target_os = "netbsd", target_os = "tvos", target_os = "watchos", target_os = "cygwin", target_os = "windows", ))] pub retries: Option, } impl TcpKeepaliveOptions { /// Converts into a `socket2::TcpKeealive` if there is any keep alive configuration. pub(crate) fn into_tcpkeepalive(self) -> Option { let mut dirty = false; let mut ka = TcpKeepalive::new(); if let Some(time) = self.time { ka = ka.with_time(time); dirty = true } // Set the value of the `TCP_KEEPINTVL` option. On Windows, this sets the // value of the `tcp_keepalive` struct's `keepaliveinterval` field. // // Sets the time interval between TCP keepalive probes. // // Some platforms specify this value in seconds, so sub-second // specifications may be omitted. #[cfg(any( target_os = "android", target_os = "dragonfly", target_os = "freebsd", target_os = "fuchsia", target_os = "illumos", target_os = "ios", target_os = "visionos", target_os = "linux", target_os = "macos", target_os = "netbsd", target_os = "tvos", target_os = "watchos", target_os = "windows", target_os = "cygwin", ))] { if let Some(interval) = self.interval { dirty = true; ka = ka.with_interval(interval) }; } // Set the value of the `TCP_KEEPCNT` option. // // Set the maximum number of TCP keepalive probes that will be sent before // dropping a connection, if TCP keepalive is enabled on this socket. #[cfg(any( target_os = "android", target_os = "dragonfly", target_os = "freebsd", target_os = "fuchsia", target_os = "illumos", target_os = "ios", target_os = "visionos", target_os = "linux", target_os = "macos", target_os = "netbsd", target_os = "tvos", target_os = "watchos", target_os = "cygwin", target_os = "windows", ))] if let Some(retries) = self.retries { dirty = true; ka = ka.with_retries(retries) }; if dirty { Some(ka) } else { None } } } ================================================ FILE: src/client/conn/tls_info.rs ================================================ use bytes::Bytes; use tokio::net::TcpStream; #[cfg(unix)] use tokio::net::UnixStream; use tokio_btls::SslStream; use crate::tls::{TlsInfo, conn::MaybeHttpsStream}; /// A trait for extracting TLS information from a connection. /// /// Implementors can provide access to peer certificate data or other TLS-related metadata. /// For non-TLS connections, this typically returns `None`. pub trait TlsInfoFactory { fn tls_info(&self) -> Option; } fn extract_tls_info(ssl_stream: &SslStream) -> TlsInfo { let ssl = ssl_stream.ssl(); TlsInfo { peer_certificate: ssl .peer_certificate() .and_then(|cert| cert.to_der().ok()) .map(Bytes::from), peer_certificate_chain: ssl.peer_cert_chain().map(|chain| { chain .iter() .filter_map(|cert| cert.to_der().ok()) .map(Bytes::from) .collect() }), } } // ===== impl TcpStream ===== impl TlsInfoFactory for TcpStream { fn tls_info(&self) -> Option { None } } impl TlsInfoFactory for SslStream { #[inline] fn tls_info(&self) -> Option { Some(extract_tls_info(self)) } } impl TlsInfoFactory for MaybeHttpsStream { fn tls_info(&self) -> Option { match self { MaybeHttpsStream::Https(tls) => tls.tls_info(), MaybeHttpsStream::Http(_) => None, } } } impl TlsInfoFactory for SslStream> { #[inline] fn tls_info(&self) -> Option { Some(extract_tls_info(self)) } } // ===== impl UnixStream ===== #[cfg(unix)] impl TlsInfoFactory for UnixStream { fn tls_info(&self) -> Option { None } } #[cfg(unix)] impl TlsInfoFactory for SslStream { #[inline] fn tls_info(&self) -> Option { Some(extract_tls_info(self)) } } #[cfg(unix)] impl TlsInfoFactory for MaybeHttpsStream { fn tls_info(&self) -> Option { match self { MaybeHttpsStream::Https(tls) => tls.tls_info(), MaybeHttpsStream::Http(_) => None, } } } #[cfg(unix)] impl TlsInfoFactory for SslStream> { #[inline] fn tls_info(&self) -> Option { Some(extract_tls_info(self)) } } ================================================ FILE: src/client/conn/uds.rs ================================================ use std::{ io, path::Path, pin::Pin, sync::Arc, task::{Context, Poll}, }; use http::Uri; use tokio::net::UnixStream; use super::{Connected, Connection}; type ConnectResult = io::Result; type BoxConnecting = Pin + Send>>; #[derive(Clone)] pub struct UnixConnector { path: Arc, } impl UnixConnector { /// Create a new [`UnixConnector`]. pub fn new(path: impl Into>) -> Self { Self { path: path.into() } } } impl tower::Service for UnixConnector { type Response = UnixStream; type Error = io::Error; type Future = BoxConnecting; #[inline] fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } fn call(&mut self, _: Uri) -> Self::Future { let fut = UnixStream::connect(self.path.clone()); Box::pin(async move { let io = fut.await?; Ok::<_, io::Error>(io) }) } } impl Connection for UnixStream { #[inline] fn connected(&self) -> Connected { Connected::new() } } ================================================ FILE: src/client/conn/verbose.rs ================================================ use super::AsyncConnWithInfo; /// Controls whether to enable verbose tracing for connections. /// /// When enabled (with the `tracing` feature), connections are wrapped to log I/O operations for /// debugging. #[derive(Clone, Copy)] pub struct Verbose(pub(super) bool); impl Verbose { pub const OFF: Verbose = Verbose(false); #[cfg_attr(not(feature = "tracing"), inline(always))] pub(super) fn wrap(&self, conn: T) -> Box where T: AsyncConnWithInfo + 'static, { #[cfg(feature = "tracing")] if self.0 { return Box::new(sealed::Wrapper { id: crate::util::fast_random(), inner: conn, }); } Box::new(conn) } } #[cfg(feature = "tracing")] mod sealed { use std::{ fmt, io::{self, IoSlice}, pin::Pin, task::{Context, Poll}, }; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; use super::super::{Connected, Connection, TlsInfoFactory}; use crate::{tls::TlsInfo, util::Escape}; pub(super) struct Wrapper { pub(super) id: u64, pub(super) inner: T, } impl Connection for Wrapper { #[inline] fn connected(&self) -> Connected { self.inner.connected() } } impl AsyncRead for Wrapper { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context, buf: &mut ReadBuf<'_>, ) -> Poll> { match Pin::new(&mut self.inner).poll_read(cx, buf) { Poll::Ready(Ok(())) => { trace!("{:08x} read: {:?}", self.id, Escape::new(buf.filled())); Poll::Ready(Ok(())) } Poll::Ready(Err(e)) => Poll::Ready(Err(e)), Poll::Pending => Poll::Pending, } } } impl AsyncWrite for Wrapper { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context, buf: &[u8], ) -> Poll> { match Pin::new(&mut self.inner).poll_write(cx, buf) { Poll::Ready(Ok(n)) => { trace!("{:08x} write: {:?}", self.id, Escape::new(&buf[..n])); Poll::Ready(Ok(n)) } Poll::Ready(Err(e)) => Poll::Ready(Err(e)), Poll::Pending => Poll::Pending, } } fn poll_write_vectored( mut self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[IoSlice<'_>], ) -> Poll> { match Pin::new(&mut self.inner).poll_write_vectored(cx, bufs) { Poll::Ready(Ok(nwritten)) => { trace!( "{:08x} write (vectored): {:?}", self.id, Vectored { bufs, nwritten } ); Poll::Ready(Ok(nwritten)) } Poll::Ready(Err(e)) => Poll::Ready(Err(e)), Poll::Pending => Poll::Pending, } } #[inline] fn is_write_vectored(&self) -> bool { self.inner.is_write_vectored() } #[inline] fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { Pin::new(&mut self.inner).poll_flush(cx) } #[inline] fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { Pin::new(&mut self.inner).poll_shutdown(cx) } } impl TlsInfoFactory for Wrapper { fn tls_info(&self) -> Option { self.inner.tls_info() } } struct Vectored<'a, 'b> { bufs: &'a [IoSlice<'b>], nwritten: usize, } impl fmt::Debug for Vectored<'_, '_> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut left = self.nwritten; for buf in self.bufs.iter() { if left == 0 { break; } let n = std::cmp::min(left, buf.len()); Escape::new(&buf[..n]).fmt(f)?; left -= n; } Ok(()) } } } ================================================ FILE: src/client/conn.rs ================================================ mod connector; mod http; mod proxy; mod tcp; mod tls_info; #[cfg(unix)] mod uds; mod verbose; pub mod descriptor; use std::{ fmt::{self, Debug, Formatter}, io, io::IoSlice, pin::Pin, sync::{ Arc, atomic::{AtomicBool, Ordering}, }, task::{Context, Poll}, }; use ::http::{Extensions, HeaderMap, HeaderValue}; use pin_project_lite::pin_project; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; use tokio_btls::SslStream; use tower::{ BoxError, util::{BoxCloneSyncService, BoxCloneSyncServiceLayer}, }; #[cfg(feature = "socks")] pub(super) use self::proxy::socks; pub(super) use self::{ connector::Connector, http::{HttpInfo, HttpTransport}, proxy::tunnel, tcp::{SocketBindOptions, tokio::TokioTcpConnector}, tls_info::TlsInfoFactory, }; use crate::{dns::DynResolver, proxy::matcher::Intercept, tls::TlsInfo}; /// HTTP connector with dynamic DNS resolver. pub type HttpConnector = self::http::HttpConnector; /// Boxed connector service for establishing connections. pub type BoxedConnectorService = BoxCloneSyncService; /// Boxed layer for building a boxed connector service. pub type BoxedConnectorLayer = BoxCloneSyncServiceLayer; /// A wrapper type for [`descriptor::ConnectionDescriptor`] used to erase its concrete type. /// /// [`Unnameable`] allows passing connection requests through trait objects or /// type-erased interfaces where the concrete type of the request is not important. /// This is mainly used internally to simplify service composition and dynamic dispatch. pub struct Unnameable(pub(super) descriptor::ConnectionDescriptor); /// A trait alias for types that can be used as async connections. /// /// This trait is automatically implemented for any type that satisfies the required bounds: /// - [`AsyncRead`] + [`AsyncWrite`]: For I/O operations /// - [`Connection`]: For connection metadata /// - [`Send`] + [`Sync`] + [`Unpin`] + `'static`: For async/await compatibility trait AsyncConn: AsyncRead + AsyncWrite + Connection + Send + Sync + Unpin + 'static {} /// An async connection that can also provide TLS information. /// /// This extends [`AsyncConn`] with the ability to extract TLS certificate information /// when available. Useful for connections that may be either plain TCP or TLS-encrypted. trait AsyncConnWithInfo: AsyncConn + TlsInfoFactory {} impl AsyncConn for T where T: AsyncRead + AsyncWrite + Connection + Send + Sync + Unpin + 'static {} impl AsyncConnWithInfo for T where T: AsyncConn + TlsInfoFactory {} pin_project! { /// Note: the `is_proxy` member means *is plain text HTTP proxy*. /// This tells core whether the URI should be written in /// * origin-form (`GET /just/a/path HTTP/1.1`), when `proxy == None`, or /// * absolute-form (`GET http://foo.bar/and/a/path HTTP/1.1`), otherwise. pub struct Conn { tls_info: bool, proxy: Option, #[pin] stream: Box, } } pin_project! { /// A wrapper around `SslStream` that adapts it for use as a generic async connection. /// /// This type enables unified handling of plain TCP and TLS-encrypted streams by providing /// implementations of `Connection`, `Read`, `Write`, and `TlsInfoFactory`. /// It is mainly used internally to abstract over different connection types. pub struct TlsConn { #[pin] stream: SslStream, } } /// Describes a type returned by a connector. pub trait Connection { /// Return metadata describing the connection. fn connected(&self) -> Connected; } /// Indicates the negotiated ALPN protocol. #[derive(Clone, Copy, Debug, PartialEq)] enum Alpn { H2, None, } /// A pill that can be poisoned to indicate that a connection should not be reused. #[derive(Clone)] struct PoisonPill(Arc); /// A boxed asynchronous connection with associated information. #[derive(Debug)] struct Extra(Box); /// Inner trait for extra connection information. trait ExtraInner: Send + Sync + Debug { fn clone_box(&self) -> Box; fn set(&self, res: &mut Extensions); } // This indirection allows the `Connected` to have a type-erased "extra" value, // while that type still knows its inner extra type. This allows the correct // TypeId to be used when inserting into `res.extensions_mut()`. #[derive(Debug, Clone)] struct ExtraEnvelope(T); /// Chains two `ExtraInner` implementations together, inserting both into /// the extensions. #[derive(Debug)] struct ExtraChain(Box, T); /// Information about an HTTP proxy identity. #[derive(Debug, Default, Clone)] struct ProxyIdentity { is_proxied: bool, auth: Option, headers: Option, } /// Extra information about the connected transport. /// /// This can be used to inform recipients about things like if ALPN /// was used, or if connected to an HTTP proxy. #[derive(Debug, Clone)] pub struct Connected { alpn: Alpn, proxy: Box, extra: Option, poisoned: PoisonPill, } // ==== impl Conn ==== impl Connection for Conn { fn connected(&self) -> Connected { let mut connected = self.stream.connected(); if let Some(proxy) = &self.proxy { connected = connected.proxy(proxy.clone()); } if self.tls_info { if let Some(tls_info) = self.stream.tls_info() { connected.extra(tls_info) } else { connected } } else { connected } } } impl AsyncRead for Conn { #[inline] fn poll_read( self: Pin<&mut Self>, cx: &mut Context, buf: &mut ReadBuf<'_>, ) -> Poll> { AsyncRead::poll_read(self.project().stream, cx, buf) } } impl AsyncWrite for Conn { #[inline] fn poll_write( self: Pin<&mut Self>, cx: &mut Context, buf: &[u8], ) -> Poll> { AsyncWrite::poll_write(self.project().stream, cx, buf) } #[inline] fn poll_write_vectored( self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[IoSlice<'_>], ) -> Poll> { AsyncWrite::poll_write_vectored(self.project().stream, cx, bufs) } #[inline] fn is_write_vectored(&self) -> bool { self.stream.is_write_vectored() } #[inline] fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { AsyncWrite::poll_flush(self.project().stream, cx) } #[inline] fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { AsyncWrite::poll_shutdown(self.project().stream, cx) } } // ===== impl TlsConn ===== impl Connection for TlsConn where T: Connection, { fn connected(&self) -> Connected { let connected = self.stream.get_ref().connected(); if self.stream.ssl().selected_alpn_protocol() == Some(b"h2") { connected.negotiated_h2() } else { connected } } } impl AsyncRead for TlsConn { #[inline] fn poll_read( self: Pin<&mut Self>, cx: &mut Context, buf: &mut ReadBuf<'_>, ) -> Poll> { AsyncRead::poll_read(self.project().stream, cx, buf) } } impl AsyncWrite for TlsConn { #[inline] fn poll_write( self: Pin<&mut Self>, cx: &mut Context, buf: &[u8], ) -> Poll> { AsyncWrite::poll_write(self.project().stream, cx, buf) } #[inline] fn poll_write_vectored( self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[IoSlice<'_>], ) -> Poll> { AsyncWrite::poll_write_vectored(self.project().stream, cx, bufs) } #[inline] fn is_write_vectored(&self) -> bool { self.stream.is_write_vectored() } #[inline] fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { AsyncWrite::poll_flush(self.project().stream, cx) } #[inline] fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { AsyncWrite::poll_shutdown(self.project().stream, cx) } } impl TlsInfoFactory for TlsConn where SslStream: TlsInfoFactory, { #[inline] fn tls_info(&self) -> Option { self.stream.tls_info() } } // ===== impl PoisonPill ===== impl fmt::Debug for PoisonPill { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { // print the address of the pill—this makes debugging issues much easier write!( f, "PoisonPill@{:p} {{ poisoned: {} }}", self.0, self.0.load(Ordering::Relaxed) ) } } impl PoisonPill { /// Create a healthy (not poisoned) pill. #[inline] fn healthy() -> Self { Self(Arc::new(AtomicBool::new(false))) } } // ===== impl Connected ===== impl Connected { /// Create new `Connected` type with empty metadata. pub fn new() -> Connected { Connected { alpn: Alpn::None, proxy: Box::new(ProxyIdentity::default()), extra: None, poisoned: PoisonPill::healthy(), } } /// Set extra connection information to be set in the extensions of every `Response`. pub fn extra(mut self, extra: T) -> Connected { if let Some(prev) = self.extra { self.extra = Some(Extra(Box::new(ExtraChain(prev.0, extra)))); } else { self.extra = Some(Extra(Box::new(ExtraEnvelope(extra)))); } self } /// Copies the extra connection information into an `Extensions` map. #[inline] pub fn set_extras(&self, extensions: &mut Extensions) { if let Some(extra) = &self.extra { extra.set(extensions); } } /// Set that the proxy was used for this connected transport. pub fn proxy(mut self, proxy: Intercept) -> Connected { self.proxy.is_proxied = true; if let Some(auth) = proxy.basic_auth() { self.proxy.auth.replace(auth.clone()); } if let Some(headers) = proxy.custom_headers() { self.proxy.headers.replace(headers.clone()); } self } /// Determines if the connected transport is to an HTTP proxy. #[inline] pub fn is_proxied(&self) -> bool { self.proxy.is_proxied } /// Get the proxy identity information for the connected transport. #[inline] pub fn proxy_auth(&self) -> Option<&HeaderValue> { self.proxy.auth.as_ref() } /// Get the custom proxy headers for the connected transport. #[inline] pub fn proxy_headers(&self) -> Option<&HeaderMap> { self.proxy.headers.as_ref() } /// Set that the connected transport negotiated HTTP/2 as its next protocol. #[inline] pub fn negotiated_h2(mut self) -> Connected { self.alpn = Alpn::H2; self } /// Determines if the connected transport negotiated HTTP/2 as its next protocol. #[inline] pub fn is_negotiated_h2(&self) -> bool { self.alpn == Alpn::H2 } /// Determine if this connection is poisoned #[inline] pub fn poisoned(&self) -> bool { self.poisoned.0.load(Ordering::Relaxed) } /// Poison this connection /// /// A poisoned connection will not be reused for subsequent requests by the pool #[allow(unused)] #[inline] pub fn poison(&self) { self.poisoned.0.store(true, Ordering::Relaxed); debug!( "connection was poisoned. this connection will not be reused for subsequent requests" ); } } // ===== impl Extra ===== impl Extra { #[inline] fn set(&self, res: &mut Extensions) { self.0.set(res); } } impl Clone for Extra { fn clone(&self) -> Extra { Extra(self.0.clone_box()) } } // ===== impl ExtraEnvelope ===== impl ExtraInner for ExtraEnvelope where T: Clone + Send + Sync + Debug + 'static, { fn clone_box(&self) -> Box { Box::new(self.clone()) } fn set(&self, res: &mut Extensions) { res.insert(self.0.clone()); } } // ===== impl ExtraChain ===== impl Clone for ExtraChain { fn clone(&self) -> Self { ExtraChain(self.0.clone_box(), self.1.clone()) } } impl ExtraInner for ExtraChain where T: Clone + Send + Sync + Debug + 'static, { fn clone_box(&self) -> Box { Box::new(self.clone()) } fn set(&self, res: &mut Extensions) { self.0.set(res); res.insert(self.1.clone()); } } ================================================ FILE: src/client/core/body/incoming.rs ================================================ use std::{ fmt, pin::Pin, task::{Context, Poll, ready}, }; use bytes::Bytes; use http::HeaderMap; use http_body::{Body, Frame, SizeHint}; use tokio::sync::{mpsc, oneshot}; use tokio_util::sync::PollSender; use super::{DecodedLength, watch}; use crate::client::core::{Error, Result, proto::http2::ping}; /// A stream of [`Bytes`], used when receiving bodies from the network. /// /// Note that Users should not instantiate this struct directly. When working with the client, /// [`Incoming`] is returned to you in responses. #[must_use = "streams do nothing unless polled"] pub struct Incoming { kind: Kind, } enum Kind { H1 { want_tx: watch::Sender, data_rx: mpsc::Receiver>, trailers_rx: oneshot::Receiver, content_length: DecodedLength, data_done: bool, }, H2 { ping: ping::Recorder, recv: http2::RecvStream, content_length: DecodedLength, data_done: bool, }, Empty, } /// A sender half created through [`Body::channel()`]. /// /// Useful when wanting to stream chunks from another thread. /// /// ## Body Closing /// /// Note that the request body will always be closed normally when the sender is dropped (meaning /// that the empty terminating chunk will be sent to the remote). If you desire to close the /// connection with an incomplete response (e.g. in the case of an error during asynchronous /// processing), call the [`Sender::abort()`] method to abort the body in an abnormal fashion. /// /// [`Body::channel()`]: struct.Body.html#method.channel /// [`Sender::abort()`]: struct.Sender.html#method.abort #[must_use = "Sender does nothing unless sent on"] pub(crate) struct Sender { want_rx: watch::Receiver, data_tx: PollSender>, trailers_tx: Option>, } // ===== impl Incoming ===== impl Incoming { #[inline] pub(crate) fn empty() -> Incoming { Incoming { kind: Kind::Empty } } pub(crate) fn h1(content_length: DecodedLength, wanter: bool) -> (Sender, Incoming) { let (data_tx, data_rx) = mpsc::channel(2); let (trailers_tx, trailers_rx) = oneshot::channel(); // If wanter is true, `Sender::poll_ready()` won't becoming ready // until the `Body` has been polled for data once. let (want_tx, want_rx) = watch::channel(wanter); ( Sender { want_rx, data_tx: PollSender::new(data_tx), trailers_tx: Some(trailers_tx), }, Incoming { kind: Kind::H1 { want_tx, data_rx, trailers_rx, content_length, data_done: false, }, }, ) } pub(crate) fn h2( recv: http2::RecvStream, mut content_length: DecodedLength, ping: ping::Recorder, ) -> Self { // If the stream is already EOS, then the "unknown length" is clearly // actually ZERO. if !content_length.is_exact() && recv.is_end_stream() { content_length = DecodedLength::ZERO; } Incoming { kind: Kind::H2 { ping, recv, content_length, data_done: false, }, } } } impl Body for Incoming { type Data = Bytes; type Error = Error; fn poll_frame( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll, Self::Error>>> { match self.kind { Kind::H1 { ref want_tx, ref mut data_rx, ref mut trailers_rx, ref mut content_length, ref mut data_done, } => { want_tx.ready(); if !*data_done { match ready!(data_rx.poll_recv(cx)) { Some(Ok(chunk)) => { content_length.sub_if(chunk.len() as u64); return Poll::Ready(Some(Ok(Frame::data(chunk)))); } Some(Err(err)) => return Poll::Ready(Some(Err(err))), None => { // fall through to trailers *data_done = true; } } } // check trailers after data is terminated if !trailers_rx.is_terminated() { if let Ok(trailers) = ready!(Pin::new(trailers_rx).poll(cx)) { return Poll::Ready(Some(Ok(Frame::trailers(trailers)))); } } Poll::Ready(None) } Kind::H2 { ref ping, ref mut recv, ref mut content_length, ref mut data_done, } => { if !*data_done { match ready!(recv.poll_data(cx)) { Some(Ok(bytes)) => { let _ = recv.flow_control().release_capacity(bytes.len()); content_length.sub_if(bytes.len() as u64); ping.record_data(bytes.len()); return Poll::Ready(Some(Ok(Frame::data(bytes)))); } Some(Err(e)) => { if let Some(http2::Reason::NO_ERROR) = e.reason() { // As mentioned in RFC 7540 Section 8.1, a RST_STREAM with NO_ERROR // indicates an early response, and should cause the body reading // to stop, but not fail it: return Poll::Ready(None); } else { return Poll::Ready(Some(Err(Error::new_body(e)))); } } None => { // fall through to trailers *data_done = true; } } } // after data, check trailers match ready!(recv.poll_trailers(cx)) { Ok(t) => { ping.record_non_data(); Poll::Ready(Ok(t.map(Frame::trailers)).transpose()) } Err(e) => { if let Some(http2::Reason::NO_ERROR) = e.reason() { // Same as above, a RST_STREAM with NO_ERROR indicates an early // response, and should cause reading the trailers to stop, but // not fail it: Poll::Ready(None) } else { Poll::Ready(Some(Err(Error::new_h2(e)))) } } } } Kind::Empty => Poll::Ready(None), } } #[inline] fn is_end_stream(&self) -> bool { match self.kind { Kind::H1 { content_length, .. } => content_length == DecodedLength::ZERO, Kind::H2 { recv: ref h2, .. } => h2.is_end_stream(), Kind::Empty => true, } } #[inline] fn size_hint(&self) -> SizeHint { match self.kind { Kind::H1 { content_length, .. } | Kind::H2 { content_length, .. } => content_length .into_opt() .map_or_else(SizeHint::default, SizeHint::with_exact), Kind::Empty => SizeHint::with_exact(0), } } } impl fmt::Debug for Incoming { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut builder = f.debug_tuple(stringify!(Incoming)); match self.kind { Kind::Empty => builder.field(&stringify!(Empty)), _ => builder.field(&stringify!(Streaming)), }; builder.finish() } } // ===== impl Sender ===== impl Sender { /// Check to see if this `Sender` can send more data. #[inline] pub(crate) fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { // Check if the receiver end has tried polling for the body yet ready!(self.want_rx.poll_ready(cx)?); self.data_tx .poll_reserve(cx) .map_err(|_| Error::new_closed()) } /// Send data on this channel. /// /// # Errors /// /// Returns `Err(Bytes)` if the channel could not (currently) accept /// another `Bytes`. /// /// # Panics /// /// If `poll_ready` was not successfully called prior to calling `send_data`, then this method /// will panic. #[inline] pub(crate) fn send_data(&mut self, chunk: Bytes) -> Result<(), Bytes> { self.data_tx.send_item(Ok(chunk)).map_err(|err| { err.into_inner() .expect("value returned") .expect("just sent Ok") }) } /// Send trailers on this channel. /// /// # Errors /// /// Returns `Err(HeaderMap)` if the channel could not (currently) accept /// another `HeaderMap`. #[inline] pub(crate) fn send_trailers(&mut self, trailers: HeaderMap) -> Result<(), Option> { self.trailers_tx .take() .ok_or(None)? .send(trailers) .map_err(Some) } /// Send an error on this channel, which will cause the body stream to end with an error. #[inline] pub(crate) fn send_error(&mut self, err: Error) { self.data_tx .get_ref() .map(|sender| sender.try_send(Err(err))); } } #[cfg(test)] mod tests { use std::{mem, task::Poll}; use http_body_util::BodyExt; use super::{Body, DecodedLength, Error, Incoming, Result, Sender, SizeHint}; impl Incoming { /// Create a `Body` stream with an associated sender half. /// /// Useful when wanting to stream chunks from another thread. pub(crate) fn channel() -> (Sender, Incoming) { Self::h1(DecodedLength::CHUNKED, /* wanter = */ false) } } impl Sender { async fn ready(&mut self) -> Result<()> { std::future::poll_fn(|cx| self.poll_ready(cx)).await } fn abort(mut self) { self.send_error(Error::new_body_write_aborted()); } } #[test] fn test_size_of() { // These are mostly to help catch *accidentally* increasing // the size by too much. let body_size = mem::size_of::(); let body_expected_size = mem::size_of::() * 6; assert!( body_size <= body_expected_size, "Body size = {body_size} <= {body_expected_size}", ); //assert_eq!(body_size, mem::size_of::>(), "Option"); assert_eq!( mem::size_of::(), mem::size_of::() * 8, "Sender" ); assert_eq!( mem::size_of::(), mem::size_of::>(), "Option" ); } #[test] fn size_hint() { fn eq(body: Incoming, b: SizeHint, note: &str) { let a = body.size_hint(); assert_eq!(a.lower(), b.lower(), "lower for {note:?}"); assert_eq!(a.upper(), b.upper(), "upper for {note:?}"); } eq(Incoming::empty(), SizeHint::with_exact(0), "empty"); eq(Incoming::channel().1, SizeHint::new(), "channel"); eq( Incoming::h1(DecodedLength::new(4), /* wanter = */ false).1, SizeHint::with_exact(4), "channel with length", ); } #[tokio::test] async fn channel_abort() { let (tx, mut rx) = Incoming::channel(); tx.abort(); let err = rx.frame().await.unwrap().unwrap_err(); assert!(err.is_body_write_aborted(), "{err:?}"); } #[tokio::test] async fn channel_abort_when_buffer_is_full() { let (mut tx, mut rx) = Incoming::channel(); tx.ready().await.expect("ready"); tx.send_data("chunk 1".into()).expect("send 1"); // buffer is full, but can still send abort tx.abort(); let chunk1 = rx .frame() .await .expect("item 1") .expect("chunk 1") .into_data() .unwrap(); assert_eq!(chunk1, "chunk 1"); let err = rx.frame().await.unwrap().unwrap_err(); assert!(err.is_body_write_aborted(), "{err:?}"); } #[tokio::test] async fn channel_buffers_two() { let (mut tx, _rx) = Incoming::channel(); tx.ready().await.expect("ready"); tx.send_data("chunk 1".into()).expect("send 1"); tx.ready().await.expect("ready"); tx.send_data("chunk 2".into()).expect("send 2"); // buffer is now full, poll_ready should not be ready let res = tokio::time::timeout( std::time::Duration::from_millis(100), std::future::poll_fn(|cx| tx.poll_ready(cx)), ) .await; assert!(res.is_err(), "poll_ready unexpectedly became ready"); } #[tokio::test] async fn channel_empty() { let (_, mut rx) = Incoming::channel(); assert!(rx.frame().await.is_none()); } #[test] fn channel_ready() { let (mut tx, _rx) = Incoming::h1(DecodedLength::CHUNKED, /* wanter = */ false); let mut tx_ready = tokio_test::task::spawn(tx.ready()); assert!(tx_ready.poll().is_ready(), "tx is ready immediately"); } #[test] fn channel_wanter() { let (mut tx, mut rx) = Incoming::h1(DecodedLength::CHUNKED, /* wanter = */ true); let mut tx_ready = tokio_test::task::spawn(tx.ready()); let mut rx_data = tokio_test::task::spawn(rx.frame()); assert!( tx_ready.poll().is_pending(), "tx isn't ready before rx has been polled" ); assert!(rx_data.poll().is_pending(), "poll rx.data"); assert!(tx_ready.is_woken(), "rx poll wakes tx"); assert!( tx_ready.poll().is_ready(), "tx is ready after rx has been polled" ); } #[test] fn channel_notices_closure() { let (mut tx, rx) = Incoming::h1(DecodedLength::CHUNKED, /* wanter = */ true); let mut tx_ready = tokio_test::task::spawn(tx.ready()); assert!( tx_ready.poll().is_pending(), "tx isn't ready before rx has been polled" ); drop(rx); assert!(tx_ready.is_woken(), "dropping rx wakes tx"); match tx_ready.poll() { Poll::Ready(Err(ref e)) if e.is_closed() => (), unexpected => panic!("tx poll ready unexpected: {unexpected:?}"), } } } ================================================ FILE: src/client/core/body/length.rs ================================================ use std::fmt; use crate::client::core::error::Parse; #[derive(Clone, Copy, PartialEq, Eq)] pub(crate) struct DecodedLength(u64); impl DecodedLength { pub(crate) const MAX_LEN: u64 = u64::MAX - 2; pub(crate) const CLOSE_DELIMITED: DecodedLength = DecodedLength(u64::MAX); pub(crate) const CHUNKED: DecodedLength = DecodedLength(u64::MAX - 1); pub(crate) const ZERO: DecodedLength = DecodedLength(0); /// Takes the length as a content-length without other checks. /// /// Should only be called if previously confirmed this isn't /// CLOSE_DELIMITED or CHUNKED. #[inline] pub(crate) fn danger_len(self) -> u64 { debug_assert!(self.0 < Self::CHUNKED.0); self.0 } /// Converts to an `Option` representing a Known or Unknown length. #[inline] pub(crate) fn into_opt(self) -> Option { match self { DecodedLength::CHUNKED | DecodedLength::CLOSE_DELIMITED => None, DecodedLength(known) => Some(known), } } /// Checks the `u64` is within the maximum allowed for content-length. pub(crate) fn checked_new(len: u64) -> Result { if len <= Self::MAX_LEN { Ok(DecodedLength(len)) } else { warn!( "content-length bigger than maximum: {} > {}", len, Self::MAX_LEN ); Err(Parse::TooLarge) } } /// Subtracts the given amount from the length, if it's a known length. pub(crate) fn sub_if(&mut self, amt: u64) { match *self { DecodedLength::CHUNKED | DecodedLength::CLOSE_DELIMITED => (), DecodedLength(ref mut known) => { *known -= amt; } } } /// Returns whether this represents an exact length. /// /// This includes 0, which of course is an exact known length. /// /// It would return false if "chunked" or otherwise size-unknown. #[inline] pub(crate) fn is_exact(&self) -> bool { self.0 <= Self::MAX_LEN } } impl From> for DecodedLength { fn from(len: Option) -> Self { // If the length is u64::MAX, oh well, just reported chunked. len.and_then(|len| Self::checked_new(len).ok()) .unwrap_or(DecodedLength::CHUNKED) } } impl fmt::Debug for DecodedLength { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { DecodedLength::CLOSE_DELIMITED => f.write_str("CLOSE_DELIMITED"), DecodedLength::CHUNKED => f.write_str("CHUNKED"), DecodedLength(n) => f.debug_tuple("DecodedLength").field(&n).finish(), } } } impl fmt::Display for DecodedLength { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { DecodedLength::CLOSE_DELIMITED => f.write_str("close-delimited"), DecodedLength::CHUNKED => f.write_str("chunked encoding"), DecodedLength::ZERO => f.write_str("empty"), DecodedLength(n) => write!(f, "content-length ({n} bytes)"), } } } #[cfg(test)] mod tests { use super::*; impl DecodedLength { pub(crate) fn new(len: u64) -> Self { debug_assert!(len <= Self::MAX_LEN); DecodedLength(len) } } #[test] fn sub_if_known() { let mut len = DecodedLength::new(30); len.sub_if(20); assert_eq!(len.0, 10); } #[test] fn sub_if_chunked() { let mut len = DecodedLength::CHUNKED; len.sub_if(20); assert_eq!(len, DecodedLength::CHUNKED); } } ================================================ FILE: src/client/core/body/watch.rs ================================================ //! An SPSC broadcast channel. //! //! - The value can only be a `u8`. //! - The consumer is only notified if the value is different. //! - The value `0` is reserved for closed. use std::{ sync::{ Arc, atomic::{AtomicU8, Ordering}, }, task::{self, Poll}, }; use futures_util::task::AtomicWaker; use crate::client::core::Error; type Value = u8; const READY: Value = 2; const PENDING: Value = 1; const CLOSED: Value = 0; pub(super) fn channel(wanter: bool) -> (Sender, Receiver) { let initial = if wanter { PENDING } else { READY }; let shared = Arc::new(Shared { value: AtomicU8::new(initial), waker: AtomicWaker::new(), }); ( Sender { shared: shared.clone(), }, Receiver { shared }, ) } struct Shared { value: AtomicU8, waker: AtomicWaker, } pub(super) struct Sender { shared: Arc, } pub(super) struct Receiver { shared: Arc, } // ===== impl Sender ===== impl Sender { #[inline(always)] pub(super) fn ready(&self) { self.send(READY); } fn send(&self, value: Value) { if self.shared.value.swap(value, Ordering::SeqCst) != value { self.shared.waker.wake(); } } } impl Drop for Sender { #[inline(always)] fn drop(&mut self) { self.send(CLOSED); } } // ===== impl Receiver ===== impl Receiver { #[inline(always)] pub(super) fn poll_ready(&self, cx: &mut task::Context<'_>) -> Poll> { self.shared.waker.register(cx.waker()); match self.shared.value.load(Ordering::SeqCst) { READY => Poll::Ready(Ok(())), PENDING => Poll::Pending, CLOSED => Poll::Ready(Err(Error::new_closed())), unexpected => unreachable!("watch value: {}", unexpected), } } } ================================================ FILE: src/client/core/body.rs ================================================ //! Streaming bodies for Requests and Responses //! //! For both [Clients](crate::client), requests and //! responses use streaming bodies, instead of complete buffering. This //! allows applications to not use memory they don't need, and allows exerting //! back-pressure on connections by only reading when asked. //! //! There are two pieces to this in crate::core:: //! //! - **The [\`Body`\] trait** describes all possible bodies. crate::core: allows any body type that //! implements `Body`, allowing applications to have fine-grained control over their streaming. //! - **The [`Incoming`] concrete type**, which is an implementation of `Body`, and returned by //! crate::core: as a "receive stream" (so, for server requests and client responses). //! //! There are additional implementations available in [`http-body-util`][], //! such as a `Full` or `Empty` body. //! //! [`http-body-util`]: https://docs.rs/http-body-util mod incoming; mod length; mod watch; pub(crate) use self::{ incoming::{Incoming, Sender}, length::DecodedLength, }; fn _assert_send_sync() { fn _assert_send() {} fn _assert_sync() {} _assert_send::(); _assert_sync::(); } ================================================ FILE: src/client/core/conn/http1.rs ================================================ //! HTTP/1 client connections use std::{ future::Future, pin::Pin, task::{Context, Poll, ready}, }; use bytes::Bytes; use http::{Request, Response}; use http_body::Body; use httparse::ParserConfig; use tokio::io::{AsyncRead, AsyncWrite}; use crate::client::core::{ Error, Result, body::Incoming, dispatch::{self, TrySendError}, error::BoxError, proto::{ self, http1::{self, Http1Options, conn::Conn, role::Client}, }, }; /// The sender side of an established connection. pub struct SendRequest { dispatch: dispatch::Sender, Response>, } /// Deconstructed parts of a `Connection`. /// /// This allows taking apart a `Connection` at a later time, in order to /// reclaim the IO object, and additional related pieces. #[derive(Debug)] #[non_exhaustive] pub struct Parts { /// The original IO object used in the handshake. pub io: T, /// A buffer of bytes that have been read but not processed as HTTP. /// /// For instance, if the `Connection` is used for an HTTP upgrade request, /// it is possible the server sent back the first bytes of the new protocol /// along with the response upgrade. /// /// You will want to check for any existing bytes if you plan to continue /// communicating on the IO object. pub read_buf: Bytes, } /// A future that processes all HTTP state for the IO object. /// /// In most cases, this should just be spawned into an executor, so that it /// can process incoming and outgoing messages, notice hangups, and the like. #[must_use = "futures do nothing unless polled"] pub struct Connection where T: AsyncRead + AsyncWrite, B: Body + 'static, { inner: http1::dispatch::Dispatcher, B, T, Client>, } impl Connection where T: AsyncRead + AsyncWrite + Unpin, B: Body + 'static, B::Error: Into, { /// Return the inner IO object, and additional information. /// /// Only works for HTTP/1 connections. HTTP/2 connections will panic. #[inline] pub fn into_parts(self) -> Parts { let (io, read_buf, _) = self.inner.into_inner(); Parts { io, read_buf } } } /// A builder to configure an HTTP connection. /// /// After setting options, the builder is used to create a handshake future. /// /// **Note**: The default values of options are *not considered stable*. They /// are subject to change at any time. #[derive(Clone, Debug)] pub struct Builder { opts: Http1Options, } // ===== impl SendRequest impl SendRequest { /// Polls to determine whether this sender can be used yet for a request. /// /// If the associated connection is closed, this returns an Error. #[inline] pub fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.dispatch.poll_ready(cx) } /// Waits until the dispatcher is ready /// /// If the associated connection is closed, this returns an Error. #[inline] pub async fn ready(&mut self) -> Result<()> { std::future::poll_fn(|cx| self.poll_ready(cx)).await } /// Checks if the connection is currently ready to send a request. /// /// # Note /// /// This is mostly a hint. Due to inherent latency of networks, it is /// possible that even after checking this is ready, sending a request /// may still fail because the connection was closed in the meantime. #[inline] pub fn is_ready(&self) -> bool { self.dispatch.is_ready() } } impl SendRequest where B: Body + 'static, { /// Sends a `Request` on the associated connection. /// /// Returns a future that if successful, yields the `Response`. /// /// # Error /// /// If there was an error before trying to serialize the request to the /// connection, the message will be returned as part of this error. pub fn try_send_request( &mut self, req: Request, ) -> impl Future, TrySendError>>> { let sent = self.dispatch.try_send(req); async move { match sent { Ok(rx) => match rx.await { Ok(res) => res, // this is definite bug if it happens, but it shouldn't happen! Err(_) => panic!("dispatch dropped without returning error"), }, Err(req) => { debug!("connection was not ready"); Err(TrySendError { error: Error::new_canceled().with("connection was not ready"), message: Some(req), }) } } } } } // ===== impl Connection impl Connection where T: AsyncRead + AsyncWrite + Unpin + Send, B: Body + 'static, B::Error: Into, { /// Enable this connection to support higher-level HTTP upgrades. #[inline] pub fn with_upgrades(self) -> upgrades::UpgradeableConnection { upgrades::UpgradeableConnection { inner: Some(self) } } } impl Future for Connection where T: AsyncRead + AsyncWrite + Unpin, B: Body + 'static, B::Data: Send, B::Error: Into, { type Output = Result<()>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { match ready!(Pin::new(&mut self.inner).poll(cx))? { proto::Dispatched::Shutdown => Poll::Ready(Ok(())), proto::Dispatched::Upgrade(pending) => { // With no `Send` bound on `I`, we can't try to do // upgrades here. In case a user was trying to use // `upgrade` with this API, send a special // error letting them know about that. pending.manual(); Poll::Ready(Ok(())) } } } } // ===== impl Builder impl Builder { /// Creates a new connection builder. #[inline] pub fn new() -> Builder { Builder { opts: Default::default(), } } /// Provide a options configuration for the HTTP/1 connection. #[inline] pub fn options(&mut self, opts: Http1Options) { self.opts = opts; } /// Constructs a connection with the configured options and IO. /// /// Note, if [`Connection`] is not `await`-ed, [`SendRequest`] will /// do nothing. pub async fn handshake(self, io: T) -> Result<(SendRequest, Connection)> where T: AsyncRead + AsyncWrite + Unpin, B: Body + 'static, B::Data: Send, B::Error: Into, { trace!("client handshake HTTP/1"); let (tx, rx) = dispatch::channel(); let mut conn = Conn::new(io); // Set the HTTP/1 parser configuration let h1_parser_config = { let mut h1_parser_config = ParserConfig::default(); h1_parser_config .ignore_invalid_headers_in_responses(self.opts.ignore_invalid_headers_in_responses) .allow_spaces_after_header_name_in_responses( self.opts.allow_spaces_after_header_name_in_responses, ) .allow_obsolete_multiline_headers_in_responses( self.opts.allow_obsolete_multiline_headers_in_responses, ); h1_parser_config }; conn.set_h1_parser_config(h1_parser_config); // Set the h1 write strategy if let Some(writev) = self.opts.h1_writev { if writev { conn.set_write_strategy_queue(); } else { conn.set_write_strategy_flatten(); } } // Set the maximum size of the request headers if let Some(max_headers) = self.opts.h1_max_headers { conn.set_http1_max_headers(max_headers); } // Enable HTTP/0.9 responses if requested if self.opts.h09_responses { conn.set_h09_responses(); } // Set the read buffer size if specified if let Some(sz) = self.opts.h1_read_buf_exact_size { conn.set_read_buf_exact_size(sz); } // Set the maximum buffer size for HTTP/1 connections if let Some(max) = self.opts.h1_max_buf_size { conn.set_max_buf_size(max); } let cd = http1::dispatch::Client::new(rx); let proto = http1::dispatch::Dispatcher::new(cd, conn); Ok((SendRequest { dispatch: tx }, Connection { inner: proto })) } } mod upgrades { use super::*; use crate::client::core::upgrade::Upgraded; // A future binding a connection with a Service with Upgrade support. // // This type is unnameable outside the crate. #[must_use = "futures do nothing unless polled"] pub struct UpgradeableConnection where T: AsyncRead + AsyncWrite + Unpin + Send + 'static, B: Body + 'static, B::Error: Into, { pub(super) inner: Option>, } impl Future for UpgradeableConnection where I: AsyncRead + AsyncWrite + Unpin + Send + 'static, B: Body + 'static, B::Data: Send, B::Error: Into, { type Output = Result<()>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { match ready!(Pin::new(&mut self.inner.as_mut().unwrap().inner).poll(cx)) { Ok(proto::Dispatched::Shutdown) => Poll::Ready(Ok(())), Ok(proto::Dispatched::Upgrade(pending)) => { let Parts { io, read_buf } = self.inner.take().unwrap().into_parts(); pending.fulfill(Upgraded::new(io, read_buf)); Poll::Ready(Ok(())) } Err(e) => Poll::Ready(Err(e)), } } } } ================================================ FILE: src/client/core/conn/http2.rs ================================================ //! HTTP/2 client connections use std::{ future::Future, marker::PhantomData, pin::Pin, sync::Arc, task::{Context, Poll, ready}, }; use http::{Request, Response}; use http_body::Body; use tokio::io::{AsyncRead, AsyncWrite}; use crate::client::core::{ Result, body::Incoming, dispatch::{self, TrySendError}, error::{BoxError, Error}, proto::{ self, http2::{Http2Options, ping}, }, rt::{Time, Timer, bounds::Http2ClientConnExec}, }; /// The sender side of an established connection. pub struct SendRequest { dispatch: dispatch::UnboundedSender, Response>, } impl Clone for SendRequest { #[inline] fn clone(&self) -> SendRequest { SendRequest { dispatch: self.dispatch.clone(), } } } /// A future that processes all HTTP state for the IO object. /// /// In most cases, this should just be spawned into an executor, so that it /// can process incoming and outgoing messages, notice hangups, and the like. #[must_use = "futures do nothing unless polled"] pub struct Connection where T: AsyncRead + AsyncWrite + Unpin, B: Body + 'static, E: Http2ClientConnExec + Unpin, B::Error: Into, { inner: (PhantomData, proto::http2::client::ClientTask), } /// A builder to configure an HTTP connection. /// /// After setting options, the builder is used to create a handshake future. /// /// **Note**: The default values of options are *not considered stable*. They /// are subject to change at any time. #[derive(Clone)] pub struct Builder { exec: Ex, timer: Time, opts: Http2Options, } // ===== impl SendRequest impl SendRequest { /// Polls to determine whether this sender can be used yet for a request. /// /// If the associated connection is closed, this returns an Error. #[inline] pub fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { if self.is_closed() { Poll::Ready(Err(Error::new_closed())) } else { Poll::Ready(Ok(())) } } /// Waits until the dispatcher is ready /// /// If the associated connection is closed, this returns an Error. #[inline] pub async fn ready(&mut self) -> Result<()> { std::future::poll_fn(|cx| self.poll_ready(cx)).await } /// Checks if the connection is currently ready to send a request. /// /// # Note /// /// This is mostly a hint. Due to inherent latency of networks, it is /// possible that even after checking this is ready, sending a request /// may still fail because the connection was closed in the meantime. #[inline] pub fn is_ready(&self) -> bool { self.dispatch.is_ready() } /// Checks if the connection side has been closed. #[inline] pub fn is_closed(&self) -> bool { self.dispatch.is_closed() } } impl SendRequest where B: Body + 'static, { /// Sends a `Request` on the associated connection. /// /// Returns a future that if successful, yields the `Response`. /// /// # Error /// /// If there was an error before trying to serialize the request to the /// connection, the message will be returned as part of this error. pub fn try_send_request( &mut self, req: Request, ) -> impl Future, TrySendError>>> { let sent = self.dispatch.try_send(req); async move { match sent { Ok(rx) => match rx.await { Ok(Ok(res)) => Ok(res), Ok(Err(err)) => Err(err), // this is definite bug if it happens, but it shouldn't happen! Err(_) => panic!("dispatch dropped without returning error"), }, Err(req) => { debug!("connection was not ready"); let error = Error::new_canceled().with("connection was not ready"); Err(TrySendError { error, message: Some(req), }) } } } } } // ===== impl Connection impl Future for Connection where T: AsyncRead + AsyncWrite + Unpin + 'static, B: Body + 'static + Unpin, B::Data: Send, E: Unpin, B::Error: Into, E: Http2ClientConnExec + Unpin, { type Output = Result<()>; #[inline] fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { match ready!(Pin::new(&mut self.inner.1).poll(cx))? { proto::Dispatched::Shutdown => Poll::Ready(Ok(())), proto::Dispatched::Upgrade(_pending) => unreachable!("http2 cannot upgrade"), } } } // ===== impl Builder impl Builder where Ex: Clone, { /// Creates a new connection builder. #[inline] pub fn new(exec: Ex) -> Builder { Builder { exec, timer: Time::Empty, opts: Default::default(), } } /// Provide a timer to execute background HTTP2 tasks. #[inline] pub fn timer(&mut self, timer: M) where M: Timer + Send + Sync + 'static, { self.timer = Time::Timer(Arc::new(timer)); } /// Provide a options configuration for the HTTP/2 connection. #[inline] pub fn options(&mut self, opts: Http2Options) { self.opts = opts; } /// Constructs a connection with the configured options and IO. /// /// Note, if [`Connection`] is not `await`-ed, [`SendRequest`] will /// do nothing. pub async fn handshake(self, io: T) -> Result<(SendRequest, Connection)> where T: AsyncRead + AsyncWrite + Unpin, B: Body + 'static, B::Data: Send, B::Error: Into, Ex: Http2ClientConnExec + Unpin, { trace!("client handshake HTTP/2"); // Crate the HTTP/2 client with the provided options. let mut builder = http2::client::Builder::default(); builder .initial_max_send_streams(self.opts.initial_max_send_streams) .initial_window_size(self.opts.initial_window_size) .initial_connection_window_size(self.opts.initial_conn_window_size) .max_send_buffer_size(self.opts.max_send_buffer_size); if let Some(id) = self.opts.initial_stream_id { builder.initial_stream_id(id); } if let Some(max) = self.opts.max_pending_accept_reset_streams { builder.max_pending_accept_reset_streams(max); } if let Some(max) = self.opts.max_concurrent_reset_streams { builder.max_concurrent_reset_streams(max); } if let Some(max) = self.opts.max_concurrent_streams { builder.max_concurrent_streams(max); } if let Some(max) = self.opts.max_header_list_size { builder.max_header_list_size(max); } if let Some(opt) = self.opts.enable_push { builder.enable_push(opt); } if let Some(max) = self.opts.max_frame_size { builder.max_frame_size(max); } if let Some(max) = self.opts.header_table_size { builder.header_table_size(max); } if let Some(v) = self.opts.enable_connect_protocol { builder.enable_connect_protocol(v); } if let Some(v) = self.opts.no_rfc7540_priorities { builder.no_rfc7540_priorities(v); } if let Some(order) = self.opts.settings_order { builder.settings_order(order); } if let Some(stream_dependency) = self.opts.headers_stream_dependency { builder.headers_stream_dependency(stream_dependency); } if let Some(order) = self.opts.headers_pseudo_order { builder.headers_pseudo_order(order); } if let Some(priority) = self.opts.priorities { builder.priorities(priority); } // Create the ping configuration for the connection. let ping_config = ping::Config::new( self.opts.adaptive_window, self.opts.initial_window_size, self.opts.keep_alive_interval, self.opts.keep_alive_timeout, self.opts.keep_alive_while_idle, ); let (tx, rx) = dispatch::channel(); let h2 = proto::http2::client::handshake(io, rx, builder, ping_config, self.exec, self.timer) .await?; Ok(( SendRequest { dispatch: tx.unbound(), }, Connection { inner: (PhantomData, h2), }, )) } } ================================================ FILE: src/client/core/conn.rs ================================================ //! Lower-level client connection API. //! //! The types in this module are to provide a lower-level API based around a //! single connection. Connecting to a host, pooling connections, and the like //! are not handled at this level. This module provides the building blocks to //! customize those things externally. pub mod http1; pub mod http2; ================================================ FILE: src/client/core/dispatch.rs ================================================ use std::{ future::Future, pin::Pin, task::{Context, Poll}, }; use http::{Request, Response}; use http_body::Body; use pin_project_lite::pin_project; use tokio::sync::{mpsc, oneshot}; use super::{Error, body::Incoming, proto::http2::client::ResponseFutMap}; type RetryPromise = oneshot::Receiver>>; pub(crate) fn channel() -> (Sender, Receiver) { let (tx, rx) = mpsc::unbounded_channel(); let (giver, taker) = want::new(); ( Sender { buffered_once: false, giver, inner: tx, }, Receiver { inner: rx, taker }, ) } /// An error when calling `try_send_request`. /// /// There is a possibility of an error occurring on a connection in-between the /// time that a request is queued and when it is actually written to the IO /// transport. If that happens, it is safe to return the request back to the /// caller, as it was never fully sent. #[derive(Debug)] pub struct TrySendError { pub(crate) error: Error, pub(crate) message: Option, } /// A bounded sender of requests and callbacks for when responses are ready. /// /// While the inner sender is unbounded, the Giver is used to determine /// if the Receiver is ready for another request. pub(crate) struct Sender { /// One message is always allowed, even if the Receiver hasn't asked /// for it yet. This boolean keeps track of whether we've sent one /// without notice. buffered_once: bool, /// The Giver helps watch that the Receiver side has been polled /// when the queue is empty. This helps us know when a request and /// response have been fully processed, and a connection is ready /// for more. giver: want::Giver, /// Actually bounded by the Giver, plus `buffered_once`. inner: mpsc::UnboundedSender>, } /// An unbounded version. /// /// Cannot poll the Giver, but can still use it to determine if the Receiver /// has been dropped. However, this version can be cloned. pub(crate) struct UnboundedSender { /// Only used for `is_closed`, since mpsc::UnboundedSender cannot be checked. giver: want::SharedGiver, inner: mpsc::UnboundedSender>, } impl Sender { #[inline] pub(crate) fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.giver.poll_want(cx).map_err(|_| Error::new_closed()) } #[inline] pub(crate) fn is_ready(&self) -> bool { self.giver.is_wanting() } pub(crate) fn try_send(&mut self, val: T) -> Result, T> { if self.giver.give() || !self.buffered_once { // If the receiver is ready *now*, then of course we can send. // // If the receiver isn't ready yet, but we don't have anything // in the channel yet, then allow one message. self.buffered_once = true; } else { return Err(val); }; let (tx, rx) = oneshot::channel(); self.inner .send(Envelope(Some((val, Callback(Some(tx)))))) .map(move |_| rx) .map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0) } #[inline] pub(crate) fn unbound(self) -> UnboundedSender { UnboundedSender { giver: self.giver.shared(), inner: self.inner, } } } impl UnboundedSender { #[inline] pub(crate) fn is_ready(&self) -> bool { !self.giver.is_canceled() } #[inline] pub(crate) fn is_closed(&self) -> bool { self.giver.is_canceled() } pub(crate) fn try_send(&mut self, val: T) -> Result, T> { let (tx, rx) = oneshot::channel(); self.inner .send(Envelope(Some((val, Callback(Some(tx)))))) .map(move |_| rx) .map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0) } } impl Clone for UnboundedSender { #[inline] fn clone(&self) -> Self { UnboundedSender { giver: self.giver.clone(), inner: self.inner.clone(), } } } pub(crate) struct Receiver { inner: mpsc::UnboundedReceiver>, taker: want::Taker, } impl Receiver { pub(crate) fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll)>> { match self.inner.poll_recv(cx) { Poll::Ready(item) => { Poll::Ready(item.map(|mut env| env.0.take().expect("envelope not dropped"))) } Poll::Pending => { self.taker.want(); Poll::Pending } } } #[inline] pub(crate) fn close(&mut self) { self.taker.cancel(); self.inner.close(); } #[inline] pub(crate) fn try_recv(&mut self) -> Option<(T, Callback)> { use futures_util::FutureExt; match self.inner.recv().now_or_never() { Some(Some(mut env)) => env.0.take(), _ => None, } } } impl Drop for Receiver { #[inline] fn drop(&mut self) { // Notify the giver about the closure first, before dropping // the mpsc::Receiver. self.taker.cancel(); } } struct Envelope(Option<(T, Callback)>); impl Drop for Envelope { fn drop(&mut self) { if let Some((val, cb)) = self.0.take() { cb.send(Err(TrySendError { error: Error::new_canceled().with("connection closed"), message: Some(val), })); } } } pub(crate) struct Callback(Option>>>); impl Drop for Callback { fn drop(&mut self) { if let Some(tx) = self.0.take() { let _ = tx.send(Err(TrySendError { error: dispatch_gone(), message: None, })); } } } #[cold] fn dispatch_gone() -> Error { // FIXME(nox): What errors do we want here? Error::new_user_dispatch_gone().with(if std::thread::panicking() { "user code panicked" } else { "runtime dropped the dispatch task" }) } impl Callback { const MISSING_SENDER: &'static str = "callback sender missing"; #[inline] pub(crate) fn is_canceled(&self) -> bool { self.0.as_ref().expect(Self::MISSING_SENDER).is_closed() } #[inline] pub(crate) fn poll_canceled(&mut self, cx: &mut Context<'_>) -> Poll<()> { self.0.as_mut().expect(Self::MISSING_SENDER).poll_closed(cx) } #[inline] pub(crate) fn send(mut self, val: Result>) { let _ = self.0.take().expect(Self::MISSING_SENDER).send(val); } } impl TrySendError { /// Take the message from this error. /// /// The message will not always have been recovered. If an error occurs /// after the message has been serialized onto the connection, it will not /// be available here. #[inline] pub fn take_message(&mut self) -> Option { self.message.take() } /// Consumes this to return the inner error. #[inline] pub fn into_error(self) -> Error { self.error } } pin_project! { pub struct SendWhen where B: Body, B: 'static, { #[pin] pub(crate) when: ResponseFutMap, #[pin] pub(crate) call_back: Option, Response>>, } } impl Future for SendWhen where B: Body + 'static, B::Data: Send, { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let mut this = self.project(); let mut call_back = this.call_back.take().expect("polled after complete"); match Pin::new(&mut this.when).poll(cx) { Poll::Ready(Ok(res)) => { call_back.send(Ok(res)); Poll::Ready(()) } Poll::Pending => { // check if the callback is canceled match call_back.poll_canceled(cx) { Poll::Ready(v) => v, Poll::Pending => { // Move call_back back to struct before return this.call_back.set(Some(call_back)); return Poll::Pending; } }; trace!("send_when canceled"); // Tell pipe_task to reset the h2 stream so that // RST_STREAM is sent and flow-control capacity freed. this.when.as_mut().cancel(); Poll::Ready(()) } Poll::Ready(Err((error, message))) => { call_back.send(Err(TrySendError { error, message })); Poll::Ready(()) } } } } ================================================ FILE: src/client/core/error.rs ================================================ //! Error and Result module. use std::{error::Error as StdError, fmt}; /// Result type often returned from methods that can have crate::core: `Error`s. pub type Result = std::result::Result; pub type BoxError = Box; type Cause = BoxError; /// Represents errors that can occur handling HTTP streams. /// /// # Formatting /// /// The `Display` implementation of this type will only print the details of /// this level of error, even though it may have been caused by another error /// and contain that error in its source. To print all the relevant /// information, including the source chain, using something like /// `std::error::Report`, or equivalent 3rd party types. /// /// The contents of the formatted error message of this specific `Error` type /// is unspecified. **You must not depend on it.** The wording and details may /// change in any version, with the goal of improving error messages. /// /// # Source /// /// A `crate::core::Error` may be caused by another error. To aid in debugging, /// those are exposed in `Error::source()` as erased types. While it is /// possible to check the exact type of the sources, they **can not be depended /// on**. They may come from private internal dependencies, and are subject to /// change at any moment. pub struct Error { inner: Box, } struct ErrorImpl { kind: Kind, cause: Option, } #[derive(Debug)] pub(super) enum Kind { Parse(Parse), User(User), /// A message reached EOF, but is not complete. IncompleteMessage, /// A connection received a message (or bytes) when not waiting for one. UnexpectedMessage, /// A pending item was dropped before ever being processed. Canceled, /// Indicates a channel (client or body sender) is closed. ChannelClosed, /// An `io::Error` that occurred while trying to read or write to a network stream. Io, /// Error while reading a body from connection. Body, /// Error while writing a body to connection. BodyWrite, /// Error calling AsyncWrite::shutdown() Shutdown, /// A general error from h2. Http2, } #[derive(Debug)] pub(crate) enum Parse { Method, Version, VersionH2, Uri, Header(Header), TooLarge, Status, Internal, } #[derive(Debug)] pub(crate) enum Header { Token, ContentLengthInvalid, TransferEncodingUnexpected, } #[derive(Debug)] pub(super) enum User { /// Error calling user's Body::poll_data(). Body, /// The user aborted writing of the outgoing body. BodyWriteAborted, /// User tried to send a connect request with a nonzero body InvalidConnectWithBody, /// Error from future of user's Service. Service, /// User tried polling for an upgrade that doesn't exist. NoUpgrade, /// User polled for an upgrade, but low-level API is not using upgrades. ManualUpgrade, /// The dispatch task is gone. DispatchGone, } // Sentinel type to indicate the error was caused by a timeout. #[derive(Debug)] pub(super) struct TimedOut; impl Error { /// Returns true if this was an HTTP parse error. #[inline] pub fn is_parse(&self) -> bool { matches!(self.inner.kind, Kind::Parse(_)) } /// Returns true if this was an HTTP parse error caused by an invalid response status code or /// reason phrase. #[inline] pub fn is_parse_status(&self) -> bool { matches!(self.inner.kind, Kind::Parse(Parse::Status)) } /// Returns true if this error was caused by user code. #[inline] pub fn is_user(&self) -> bool { matches!(self.inner.kind, Kind::User(_)) } /// Returns true if this was about a `Request` that was canceled. #[inline] pub fn is_canceled(&self) -> bool { matches!(self.inner.kind, Kind::Canceled) } /// Returns true if a sender's channel is closed. #[inline] pub fn is_closed(&self) -> bool { matches!(self.inner.kind, Kind::ChannelClosed) } /// Returns true if the connection closed before a message could complete. #[inline] pub fn is_incomplete_message(&self) -> bool { matches!(self.inner.kind, Kind::IncompleteMessage) } /// Returns true if the body write was aborted. #[inline] pub fn is_body_write_aborted(&self) -> bool { matches!(self.inner.kind, Kind::User(User::BodyWriteAborted)) } /// Returns true if the error was caused by a timeout. #[inline] pub fn is_timeout(&self) -> bool { self.find_source::().is_some() } #[inline] pub(super) fn new(kind: Kind) -> Error { Error { inner: Box::new(ErrorImpl { kind, cause: None }), } } #[inline] pub(super) fn with>(mut self, cause: C) -> Error { self.inner.cause = Some(cause.into()); self } pub(crate) fn find_source(&self) -> Option<&E> { let mut cause = self.source(); while let Some(err) = cause { if let Some(typed) = err.downcast_ref() { return Some(typed); } cause = err.source(); } // else None } pub(super) fn h2_reason(&self) -> http2::Reason { // Find an http2::Reason somewhere in the cause stack, if it exists, // otherwise assume an INTERNAL_ERROR. self.find_source::() .and_then(|h2_err| h2_err.reason()) .unwrap_or(http2::Reason::INTERNAL_ERROR) } #[inline] pub(super) fn new_canceled() -> Error { Error::new(Kind::Canceled) } #[inline] pub(super) fn new_incomplete() -> Error { Error::new(Kind::IncompleteMessage) } #[inline] pub(super) fn new_too_large() -> Error { Error::new(Kind::Parse(Parse::TooLarge)) } #[inline] pub(super) fn new_version_h2() -> Error { Error::new(Kind::Parse(Parse::VersionH2)) } #[inline] pub(super) fn new_unexpected_message() -> Error { Error::new(Kind::UnexpectedMessage) } #[inline] pub(super) fn new_io(cause: std::io::Error) -> Error { Error::new(Kind::Io).with(cause) } #[inline] pub(super) fn new_closed() -> Error { Error::new(Kind::ChannelClosed) } #[inline] pub(super) fn new_body>(cause: E) -> Error { Error::new(Kind::Body).with(cause) } #[inline] pub(super) fn new_body_write>(cause: E) -> Error { Error::new(Kind::BodyWrite).with(cause) } #[inline] pub(super) fn new_body_write_aborted() -> Error { Error::new(Kind::User(User::BodyWriteAborted)) } #[inline] fn new_user(user: User) -> Error { Error::new(Kind::User(user)) } #[inline] pub(super) fn new_user_no_upgrade() -> Error { Error::new_user(User::NoUpgrade) } #[inline] pub(super) fn new_user_manual_upgrade() -> Error { Error::new_user(User::ManualUpgrade) } #[inline] pub(super) fn new_user_service>(cause: E) -> Error { Error::new_user(User::Service).with(cause) } #[inline] pub(super) fn new_user_body>(cause: E) -> Error { Error::new_user(User::Body).with(cause) } #[inline] pub(super) fn new_user_invalid_connect() -> Error { Error::new_user(User::InvalidConnectWithBody) } #[inline] pub(super) fn new_shutdown(cause: std::io::Error) -> Error { Error::new(Kind::Shutdown).with(cause) } #[inline] pub(super) fn new_user_dispatch_gone() -> Error { Error::new(Kind::User(User::DispatchGone)) } pub(super) fn new_h2(cause: ::http2::Error) -> Error { if cause.is_io() { Error::new_io(cause.into_io().expect("http2::Error::is_io")) } else { Error::new(Kind::Http2).with(cause) } } fn description(&self) -> &str { match self.inner.kind { Kind::Parse(Parse::Method) => "invalid HTTP method parsed", Kind::Parse(Parse::Version) => "invalid HTTP version parsed", Kind::Parse(Parse::VersionH2) => "invalid HTTP version parsed (found HTTP2 preface)", Kind::Parse(Parse::Uri) => "invalid URI", Kind::Parse(Parse::Header(Header::Token)) => "invalid HTTP header parsed", Kind::Parse(Parse::Header(Header::ContentLengthInvalid)) => { "invalid content-length parsed" } Kind::Parse(Parse::Header(Header::TransferEncodingUnexpected)) => { "unexpected transfer-encoding parsed" } Kind::Parse(Parse::TooLarge) => "message head is too large", Kind::Parse(Parse::Status) => "invalid HTTP status-code parsed", Kind::Parse(Parse::Internal) => { "internal error inside wreq and/or its dependencies, please report" } Kind::IncompleteMessage => "connection closed before message completed", Kind::UnexpectedMessage => "received unexpected message from connection", Kind::ChannelClosed => "channel closed", Kind::Canceled => "operation was canceled", Kind::Body => "error reading a body from connection", Kind::BodyWrite => "error writing a body to connection", Kind::Shutdown => "error shutting down connection", Kind::Http2 => "http2 error", Kind::Io => "connection error", Kind::User(User::Body) => "error from user's Body stream", Kind::User(User::BodyWriteAborted) => "user body write aborted", Kind::User(User::InvalidConnectWithBody) => { "user sent CONNECT request with non-zero body" } Kind::User(User::Service) => "error from user's Service", Kind::User(User::NoUpgrade) => "no upgrade available", Kind::User(User::ManualUpgrade) => "upgrade expected but low level API in use", Kind::User(User::DispatchGone) => "dispatch task is gone", } } } impl fmt::Debug for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut f = f.debug_tuple("crate::core::Error"); f.field(&self.inner.kind); if let Some(ref cause) = self.inner.cause { f.field(cause); } f.finish() } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(self.description()) } } impl StdError for Error { fn source(&self) -> Option<&(dyn StdError + 'static)> { self.inner .cause .as_ref() .map(|cause| &**cause as &(dyn StdError + 'static)) } } #[doc(hidden)] impl From for Error { fn from(err: Parse) -> Error { Error::new(Kind::Parse(err)) } } impl Parse { #[inline] pub(crate) fn content_length_invalid() -> Self { Parse::Header(Header::ContentLengthInvalid) } #[inline] pub(crate) fn transfer_encoding_unexpected() -> Self { Parse::Header(Header::TransferEncodingUnexpected) } } impl From for Parse { fn from(err: httparse::Error) -> Parse { match err { httparse::Error::HeaderName | httparse::Error::HeaderValue | httparse::Error::NewLine | httparse::Error::Token => Parse::Header(Header::Token), httparse::Error::Status => Parse::Status, httparse::Error::TooManyHeaders => Parse::TooLarge, httparse::Error::Version => Parse::Version, } } } impl From for Parse { fn from(_: http::method::InvalidMethod) -> Parse { Parse::Method } } impl From for Parse { fn from(_: http::status::InvalidStatusCode) -> Parse { Parse::Status } } impl From for Parse { fn from(_: http::uri::InvalidUri) -> Parse { Parse::Uri } } impl From for Parse { fn from(_: http::uri::InvalidUriParts) -> Parse { Parse::Uri } } // ===== impl TimedOut ==== impl fmt::Display for TimedOut { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str("operation timed out") } } impl StdError for TimedOut {} #[cfg(test)] mod tests { use std::mem; use super::*; fn assert_send_sync() {} #[test] fn error_satisfies_send_sync() { assert_send_sync::() } #[test] fn error_size_of() { assert_eq!(mem::size_of::(), mem::size_of::()); } #[test] fn h2_reason_unknown() { let closed = Error::new_closed(); assert_eq!(closed.h2_reason(), http2::Reason::INTERNAL_ERROR); } #[test] fn h2_reason_one_level() { let body_err = Error::new_user_body(http2::Error::from(http2::Reason::ENHANCE_YOUR_CALM)); assert_eq!(body_err.h2_reason(), http2::Reason::ENHANCE_YOUR_CALM); } #[test] fn h2_reason_nested() { let recvd = Error::new_h2(http2::Error::from(http2::Reason::HTTP_1_1_REQUIRED)); // Suppose a user were proxying the received error let svc_err = Error::new_user_service(recvd); assert_eq!(svc_err.h2_reason(), http2::Reason::HTTP_1_1_REQUIRED); } } ================================================ FILE: src/client/core/proto/headers.rs ================================================ use bytes::BytesMut; use http::{ HeaderMap, Method, header::{CONTENT_LENGTH, HeaderValue, ValueIter}, }; #[inline] pub(super) fn connection_keep_alive(value: &HeaderValue) -> bool { connection_has(value, "keep-alive") } #[inline] pub(super) fn connection_close(value: &HeaderValue) -> bool { connection_has(value, "close") } fn connection_has(value: &HeaderValue, needle: &str) -> bool { if let Ok(s) = value.to_str() { for val in s.split(',') { if val.trim().eq_ignore_ascii_case(needle) { return true; } } } false } #[inline] pub(super) fn content_length_parse_all(headers: &HeaderMap) -> Option { content_length_parse_all_values(headers.get_all(CONTENT_LENGTH).into_iter()) } pub(super) fn content_length_parse_all_values(values: ValueIter<'_, HeaderValue>) -> Option { // If multiple Content-Length headers were sent, everything can still // be alright if they all contain the same value, and all parse // correctly. If not, then it's an error. let mut content_length: Option = None; for h in values { if let Ok(line) = h.to_str() { for v in line.split(',') { if let Some(n) = from_digits(v.trim().as_bytes()) { if content_length.is_none() { content_length = Some(n) } else if content_length != Some(n) { return None; } } else { return None; } } } else { return None; } } content_length } fn from_digits(bytes: &[u8]) -> Option { // cannot use FromStr for u64, since it allows a signed prefix let mut result = 0u64; const RADIX: u64 = 10; if bytes.is_empty() { return None; } for &b in bytes { // can't use char::to_digit, since we haven't verified these bytes // are utf-8. match b { b'0'..=b'9' => { result = result.checked_mul(RADIX)?; result = result.checked_add((b - b'0') as u64)?; } _ => { // not a DIGIT, get outta here! return None; } } } Some(result) } #[inline] pub(super) fn method_has_defined_payload_semantics(method: &Method) -> bool { !matches!( *method, Method::GET | Method::HEAD | Method::DELETE | Method::CONNECT | Method::OPTIONS ) } #[inline] pub(super) fn set_content_length_if_missing(headers: &mut HeaderMap, len: u64) { headers .entry(CONTENT_LENGTH) .or_insert_with(|| HeaderValue::from(len)); } #[inline] pub(super) fn transfer_encoding_is_chunked(headers: &HeaderMap) -> bool { is_chunked(headers.get_all(http::header::TRANSFER_ENCODING).into_iter()) } pub(super) fn is_chunked(mut encodings: ValueIter<'_, HeaderValue>) -> bool { // chunked must always be the last encoding, according to spec if let Some(line) = encodings.next_back() { // chunked must always be the last encoding, according to spec if let Ok(s) = line.to_str() { if let Some(encoding) = s.rsplit(',').next() { return encoding.trim().eq_ignore_ascii_case("chunked"); } } } false } pub(super) fn add_chunked(mut entry: http::header::OccupiedEntry<'_, HeaderValue>) { const CHUNKED: &str = "chunked"; if let Some(line) = entry.iter_mut().next_back() { // + 2 for ", " let new_cap = line.as_bytes().len() + CHUNKED.len() + 2; let mut buf = BytesMut::with_capacity(new_cap); buf.extend_from_slice(line.as_bytes()); buf.extend_from_slice(b", "); buf.extend_from_slice(CHUNKED.as_bytes()); *line = HeaderValue::from_maybe_shared(buf.freeze()) .expect("original header value plus ascii is valid"); return; } entry.insert(HeaderValue::from_static(CHUNKED)); } ================================================ FILE: src/client/core/proto/http1/buf.rs ================================================ use std::{collections::VecDeque, io::IoSlice}; use bytes::{Buf, BufMut, Bytes, BytesMut}; /// A list of buffers that implements `Buf` by concatenating them together. pub(crate) struct BufList { bufs: VecDeque, remaining: usize, } impl BufList { #[inline] pub(crate) fn new() -> BufList { BufList { bufs: VecDeque::new(), remaining: 0, } } #[inline] pub(crate) fn push(&mut self, buf: T) { debug_assert!(buf.has_remaining()); self.remaining += buf.remaining(); self.bufs.push_back(buf); } #[inline] pub(crate) fn bufs_cnt(&self) -> usize { self.bufs.len() } } impl Buf for BufList { #[inline] fn remaining(&self) -> usize { self.remaining } #[inline] fn chunk(&self) -> &[u8] { self.bufs.front().map(Buf::chunk).unwrap_or_default() } #[inline] fn advance(&mut self, mut cnt: usize) { assert!(cnt <= self.remaining, "`cnt` greater than remaining"); self.remaining -= cnt; while cnt > 0 { { let front = &mut self.bufs[0]; let rem = front.remaining(); if rem > cnt { front.advance(cnt); return; } else { front.advance(rem); cnt -= rem; } } self.bufs.pop_front(); } } #[inline] fn chunks_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize { if dst.is_empty() { return 0; } let mut vecs = 0; for buf in &self.bufs { vecs += buf.chunks_vectored(&mut dst[vecs..]); if vecs == dst.len() { break; } } vecs } #[inline] fn copy_to_bytes(&mut self, len: usize) -> Bytes { // Our inner buffer may have an optimized version of copy_to_bytes, and if the whole // request can be fulfilled by the front buffer, we can take advantage. match self.bufs.front_mut() { Some(front) if front.remaining() == len => { let b = front.copy_to_bytes(len); self.bufs.pop_front(); self.remaining -= len; b } Some(front) if front.remaining() > len => { self.remaining -= len; front.copy_to_bytes(len) } _ => { assert!(len <= self.remaining(), "`len` greater than remaining"); let mut bm = BytesMut::with_capacity(len); bm.put(self.take(len)); bm.freeze() } } } } #[cfg(test)] mod tests { use std::ptr; use super::*; fn hello_world_buf() -> BufList { let mut list = BufList::new(); list.push(Bytes::from("Hello")); list.push(Bytes::from(" ")); list.push(Bytes::from("World")); list } #[test] fn to_bytes_shorter() { let mut bufs = hello_world_buf(); let old_ptr = bufs.chunk().as_ptr(); let start = bufs.copy_to_bytes(4); assert_eq!(start, "Hell"); assert!(ptr::eq(old_ptr, start.as_ptr())); assert_eq!(bufs.chunk(), b"o"); assert!(ptr::eq(old_ptr.wrapping_add(4), bufs.chunk().as_ptr())); assert_eq!(bufs.remaining(), 7); } #[test] fn to_bytes_eq() { let mut bufs = hello_world_buf(); let old_ptr = bufs.chunk().as_ptr(); let start = bufs.copy_to_bytes(5); assert_eq!(start, "Hello"); assert!(ptr::eq(old_ptr, start.as_ptr())); assert_eq!(bufs.chunk(), b" "); assert_eq!(bufs.remaining(), 6); } #[test] fn to_bytes_longer() { let mut bufs = hello_world_buf(); let start = bufs.copy_to_bytes(7); assert_eq!(start, "Hello W"); assert_eq!(bufs.remaining(), 4); } #[test] fn one_long_buf_to_bytes() { let mut buf = BufList::new(); buf.push(b"Hello World" as &[_]); assert_eq!(buf.copy_to_bytes(5), "Hello"); assert_eq!(buf.chunk(), b" World"); } #[test] #[should_panic(expected = "`len` greater than remaining")] fn buf_to_bytes_too_many() { hello_world_buf().copy_to_bytes(42); } } ================================================ FILE: src/client/core/proto/http1/conn.rs ================================================ use std::{ fmt, io, marker::{PhantomData, Unpin}, pin::Pin, task::{Context, Poll, ready}, }; use bytes::{Buf, Bytes}; use http::{ HeaderMap, Method, Version, header::{CONNECTION, HeaderValue, TE}, }; use http_body::Frame; use httparse::ParserConfig; use tokio::io::{AsyncRead, AsyncWrite}; use super::{ Decoder, Encode, Http1Transaction, ParseContext, Wants, encode::{EncodedBuf, Encoder}, io::Buffered, }; use crate::client::core::{ Error, Result, body::DecodedLength, proto::{BodyLength, MessageHead, headers}, upgrade, }; /// This handles a connection, which will have been established over an /// `AsyncRead + AsyncWrite` (like a socket), and will likely include multiple /// `Transaction`s over HTTP. /// /// The connection will determine when a message begins and ends as well as /// determine if this connection can be kept alive after the message, /// or if it is complete. pub(crate) struct Conn { io: Buffered>, state: State, _marker: PhantomData, } impl Conn where I: AsyncRead + AsyncWrite + Unpin, B: Buf, T: Http1Transaction, { pub(crate) fn new(io: I) -> Conn { Conn { io: Buffered::new(io), state: State { allow_half_close: false, cached_headers: None, error: None, keep_alive: KA::Busy, method: None, h1_parser_config: ParserConfig::default(), h1_max_headers: None, h09_responses: false, notify_read: false, reading: Reading::Init, writing: Writing::Init, upgrade: None, // We assume a modern world where the remote speaks HTTP/1.1. // If they tell us otherwise, we'll downgrade in `read_head`. version: Version::HTTP_11, allow_trailer_fields: false, }, _marker: PhantomData, } } #[inline] pub(crate) fn set_write_strategy_queue(&mut self) { self.io.set_write_strategy_queue(); } #[inline] pub(crate) fn set_max_buf_size(&mut self, max: usize) { self.io.set_max_buf_size(max); } #[inline] pub(crate) fn set_read_buf_exact_size(&mut self, sz: usize) { self.io.set_read_buf_exact_size(sz); } #[inline] pub(crate) fn set_write_strategy_flatten(&mut self) { self.io.set_write_strategy_flatten(); } #[inline] pub(crate) fn set_h1_parser_config(&mut self, parser_config: ParserConfig) { self.state.h1_parser_config = parser_config; } #[inline] pub(crate) fn set_h09_responses(&mut self) { self.state.h09_responses = true; } #[inline] pub(crate) fn set_http1_max_headers(&mut self, val: usize) { self.state.h1_max_headers = Some(val); } #[inline] pub(super) fn into_inner(self) -> (I, Bytes) { self.io.into_inner() } #[inline] pub(super) fn pending_upgrade(&mut self) -> Option { self.state.upgrade.take() } #[inline] pub(super) fn is_read_closed(&self) -> bool { self.state.is_read_closed() } #[inline] pub(super) fn is_write_closed(&self) -> bool { self.state.is_write_closed() } pub(super) fn can_read_head(&self) -> bool { if !matches!(self.state.reading, Reading::Init) { return false; } !matches!(self.state.writing, Writing::Init) } #[inline] pub(super) fn can_read_body(&self) -> bool { matches!( self.state.reading, Reading::Body(..) | Reading::Continue(..) ) } #[inline] fn should_error_on_eof(&self) -> bool { // If we're idle, it's probably just the connection closing gracefully. !self.state.is_idle() } #[inline] fn has_h2_prefix(&self) -> bool { const H2_PREFACE: &[u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; let read_buf = self.io.read_buf(); read_buf.starts_with(H2_PREFACE) } #[allow(clippy::type_complexity)] pub(super) fn poll_read_head( &mut self, cx: &mut Context<'_>, ) -> Poll, DecodedLength, Wants)>>> { debug_assert!(self.can_read_head()); trace!("Conn::read_head"); let msg = match self.io.parse::( cx, ParseContext { cached_headers: &mut self.state.cached_headers, req_method: &mut self.state.method, h1_parser_config: &self.state.h1_parser_config, h1_max_headers: self.state.h1_max_headers, h09_responses: self.state.h09_responses, }, ) { Poll::Ready(Ok(msg)) => msg, Poll::Ready(Err(e)) => return self.on_read_head_error(e), Poll::Pending => { return Poll::Pending; } }; // Note: don't deconstruct `msg` into local variables, it appears // the optimizer doesn't remove the extra copies. debug!("incoming body is {}", msg.decode); // Prevent accepting HTTP/0.9 responses after the initial one, if any. self.state.h09_responses = false; self.state.busy(); self.state.keep_alive &= msg.keep_alive; self.state.version = msg.head.version; let mut wants = if msg.wants_upgrade { Wants::UPGRADE } else { Wants::EMPTY }; if msg.decode == DecodedLength::ZERO { if msg.expect_continue { debug!("ignoring expect-continue since body is empty"); } self.state.reading = Reading::KeepAlive; self.try_keep_alive(cx); } else if msg.expect_continue && msg.head.version.gt(&Version::HTTP_10) { // TODO: remove this when we land h1_max_header_size support let h1_max_header_size = None; self.state.reading = Reading::Continue(Decoder::new( msg.decode, self.state.h1_max_headers, h1_max_header_size, )); wants = wants.add(Wants::EXPECT); } else { // TODO: remove this when we land h1_max_header_size support let h1_max_header_size = None; self.state.reading = Reading::Body(Decoder::new( msg.decode, self.state.h1_max_headers, h1_max_header_size, )); } self.state.allow_trailer_fields = msg .head .headers .get(TE) .is_some_and(|te_header| te_header == "trailers"); Poll::Ready(Some(Ok((msg.head, msg.decode, wants)))) } fn on_read_head_error(&mut self, e: Error) -> Poll>> { // If we are currently waiting on a message, then an empty // message should be reported as an error. If not, it is just // the connection closing gracefully. let must_error = self.should_error_on_eof(); self.close_read(); self.io.consume_leading_lines(); let was_mid_parse = e.is_parse() || !self.io.read_buf().is_empty(); if was_mid_parse || must_error { // We check if the buf contains the h2 Preface debug!( "parse error ({}) with {} bytes", e, self.io.read_buf().len() ); match self.on_parse_error(e) { Ok(()) => Poll::Pending, // XXX: wat? Err(e) => Poll::Ready(Some(Err(e))), } } else { debug!("read eof"); self.close_write(); Poll::Ready(None) } } pub(super) fn poll_read_body( &mut self, cx: &mut Context<'_>, ) -> Poll>>> { debug_assert!(self.can_read_body()); let (reading, ret) = match self.state.reading { Reading::Body(ref mut decoder) => { match ready!(decoder.decode(cx, &mut self.io)) { Ok(frame) => { if frame.is_data() { let slice = frame.data_ref().unwrap_or_else(|| unreachable!()); let (reading, maybe_frame) = if decoder.is_eof() { debug!("incoming body completed"); ( Reading::KeepAlive, if !slice.is_empty() { Some(Ok(frame)) } else { None }, ) } else if slice.is_empty() { error!("incoming body unexpectedly ended"); // This should be unreachable, since all 3 decoders // either set eof=true or return an Err when reading // an empty slice... (Reading::Closed, None) } else { return Poll::Ready(Some(Ok(frame))); }; (reading, Poll::Ready(maybe_frame)) } else if frame.is_trailers() { debug!("incoming body completed with trailers"); (Reading::KeepAlive, Poll::Ready(Some(Ok(frame)))) } else { trace!("discarding unknown frame"); (Reading::Closed, Poll::Ready(None)) } } Err(e) => { debug!("incoming body decode error: {}", e); (Reading::Closed, Poll::Ready(Some(Err(e)))) } } } Reading::Continue(ref decoder) => { // Write the 100 Continue if not already responded... if let Writing::Init = self.state.writing { trace!("automatically sending 100 Continue"); let cont = b"HTTP/1.1 100 Continue\r\n\r\n"; self.io.headers_buf().extend_from_slice(cont); } // And now recurse once in the Reading::Body state... self.state.reading = Reading::Body(decoder.clone()); return self.poll_read_body(cx); } _ => unreachable!("poll_read_body invalid state: {:?}", self.state.reading), }; self.state.reading = reading; self.try_keep_alive(cx); ret } #[inline] pub(super) fn wants_read_again(&mut self) -> bool { let ret = self.state.notify_read; self.state.notify_read = false; ret } pub(super) fn poll_read_keep_alive(&mut self, cx: &mut Context<'_>) -> Poll> { debug_assert!(!self.can_read_head() && !self.can_read_body()); if self.is_read_closed() { Poll::Pending } else if self.is_mid_message() { self.mid_message_detect_eof(cx) } else { self.require_empty_read(cx) } } #[inline] fn is_mid_message(&self) -> bool { !matches!( (&self.state.reading, &self.state.writing), (&Reading::Init, &Writing::Init) ) } // This will check to make sure the io object read is empty. // // This should only be called for Clients wanting to enter the idle // state. fn require_empty_read(&mut self, cx: &mut Context<'_>) -> Poll> { debug_assert!(!self.can_read_head() && !self.can_read_body() && !self.is_read_closed()); debug_assert!(!self.is_mid_message()); if !self.io.read_buf().is_empty() { debug!("received an unexpected {} bytes", self.io.read_buf().len()); return Poll::Ready(Err(Error::new_unexpected_message())); } let num_read = ready!(self.force_io_read(cx)).map_err(Error::new_io)?; if num_read == 0 { let ret = if self.should_error_on_eof() { trace!("found unexpected EOF on busy connection: {:?}", self.state); Poll::Ready(Err(Error::new_incomplete())) } else { trace!("found EOF on idle connection, closing"); Poll::Ready(Ok(())) }; // order is important: should_error needs state BEFORE close_read self.state.close_read(); return ret; } debug!( "received unexpected {} bytes on an idle connection", num_read ); Poll::Ready(Err(Error::new_unexpected_message())) } fn mid_message_detect_eof(&mut self, cx: &mut Context<'_>) -> Poll> { debug_assert!(!self.can_read_head() && !self.can_read_body() && !self.is_read_closed()); debug_assert!(self.is_mid_message()); if self.state.allow_half_close || !self.io.read_buf().is_empty() { return Poll::Pending; } let num_read = ready!(self.force_io_read(cx)).map_err(Error::new_io)?; if num_read == 0 { trace!("found unexpected EOF on busy connection: {:?}", self.state); self.state.close_read(); Poll::Ready(Err(Error::new_incomplete())) } else { Poll::Ready(Ok(())) } } fn force_io_read(&mut self, cx: &mut Context<'_>) -> Poll> { debug_assert!(!self.state.is_read_closed()); let result = ready!(self.io.poll_read_from_io(cx)); #[allow(clippy::manual_inspect)] Poll::Ready(result.map_err(|e| { trace!(error = %e, "force_io_read; io error"); self.state.close(); e })) } fn maybe_notify(&mut self, cx: &mut Context<'_>) { // its possible that we returned NotReady from poll() without having // exhausted the underlying Io. We would have done this when we // determined we couldn't keep reading until we knew how writing // would finish. match self.state.reading { Reading::Continue(..) | Reading::Body(..) | Reading::KeepAlive | Reading::Closed => { return; } Reading::Init => (), }; match self.state.writing { Writing::Body(..) => return, Writing::Init | Writing::KeepAlive | Writing::Closed => (), } if !self.io.is_read_blocked() { if self.io.read_buf().is_empty() { match self.io.poll_read_from_io(cx) { Poll::Ready(Ok(n)) => { if n == 0 { trace!("maybe_notify; read eof"); if self.state.is_idle() { self.state.close(); } else { self.close_read() } return; } } Poll::Pending => { trace!("maybe_notify; read_from_io blocked"); return; } Poll::Ready(Err(e)) => { trace!("maybe_notify; read_from_io error: {}", e); self.state.close(); self.state.error = Some(Error::new_io(e)); } } } self.state.notify_read = true; } } #[inline] fn try_keep_alive(&mut self, cx: &mut Context<'_>) { self.state.try_keep_alive::(); self.maybe_notify(cx); } #[inline] pub(super) fn can_write_head(&self) -> bool { if matches!(self.state.reading, Reading::Closed) { return false; } match self.state.writing { Writing::Init => self.io.can_headers_buf(), _ => false, } } #[inline] pub(super) fn can_write_body(&self) -> bool { match self.state.writing { Writing::Body(..) => true, Writing::Init | Writing::KeepAlive | Writing::Closed => false, } } #[inline] pub(super) fn can_buffer_body(&self) -> bool { self.io.can_buffer() } pub(super) fn write_head(&mut self, head: MessageHead, body: Option) { if let Some(encoder) = self.encode_head(head, body) { self.state.writing = if !encoder.is_eof() { Writing::Body(encoder) } else if encoder.is_last() { Writing::Closed } else { Writing::KeepAlive }; } } fn encode_head( &mut self, mut head: MessageHead, body: Option, ) -> Option { debug_assert!(self.can_write_head()); self.state.busy(); self.enforce_version(&mut head); let buf = self.io.headers_buf(); trace_span!("encode_headers"); match T::encode( Encode { head: &mut head, body, req_method: &mut self.state.method, }, buf, ) { Ok(encoder) => { debug_assert!(self.state.cached_headers.is_none()); debug_assert!(head.headers.is_empty()); self.state.cached_headers = Some(head.headers); Some(encoder) } Err(err) => { self.state.error = Some(err); self.state.writing = Writing::Closed; None } } } // Fix keep-alive when Connection: keep-alive header is not present fn fix_keep_alive(&mut self, head: &mut MessageHead) { let outgoing_is_keep_alive = head .headers .get(CONNECTION) .is_some_and(headers::connection_keep_alive); if !outgoing_is_keep_alive { match head.version { // If response is version 1.0 and keep-alive is not present in the response, // disable keep-alive so the server closes the connection Version::HTTP_10 => self.state.disable_keep_alive(), // If response is version 1.1 and keep-alive is wanted, add // Connection: keep-alive header when not present Version::HTTP_11 if self.state.wants_keep_alive() => { head.headers .insert(CONNECTION, HeaderValue::from_static("keep-alive")); } _ => (), } } } // If we know the remote speaks an older version, we try to fix up any messages // to work with our older peer. fn enforce_version(&mut self, head: &mut MessageHead) { match self.state.version { Version::HTTP_10 => { // Fixes response or connection when keep-alive header is not present self.fix_keep_alive(head); // If the remote only knows HTTP/1.0, we should force ourselves // to do only speak HTTP/1.0 as well. head.version = Version::HTTP_10; } Version::HTTP_11 => { if let KA::Disabled = self.state.keep_alive.status() { head.headers .insert(CONNECTION, HeaderValue::from_static("close")); } } _ => (), } // If the remote speaks HTTP/1.1, then it *should* be fine with // both HTTP/1.0 and HTTP/1.1 from us. So again, we just let // the user's headers be. } pub(super) fn write_body(&mut self, chunk: B) { debug_assert!(self.can_write_body() && self.can_buffer_body()); // empty chunks should be discarded at Dispatcher level debug_assert!(chunk.remaining() != 0); let state = match self.state.writing { Writing::Body(ref mut encoder) => { self.io.buffer(encoder.encode(chunk)); if !encoder.is_eof() { return; } if encoder.is_last() { Writing::Closed } else { Writing::KeepAlive } } _ => unreachable!("write_body invalid state: {:?}", self.state.writing), }; self.state.writing = state; } pub(super) fn write_trailers(&mut self, trailers: HeaderMap) { debug_assert!(self.can_write_body() && self.can_buffer_body()); match self.state.writing { Writing::Body(ref encoder) => { if let Some(enc_buf) = encoder.encode_trailers(trailers) { self.io.buffer(enc_buf); self.state.writing = if encoder.is_last() || encoder.is_close_delimited() { Writing::Closed } else { Writing::KeepAlive }; } } _ => unreachable!("write_trailers invalid state: {:?}", self.state.writing), } } pub(super) fn write_body_and_end(&mut self, chunk: B) { debug_assert!(self.can_write_body() && self.can_buffer_body()); // empty chunks should be discarded at Dispatcher level debug_assert!(chunk.remaining() != 0); let state = match self.state.writing { Writing::Body(ref encoder) => { let can_keep_alive = encoder.encode_and_end(chunk, self.io.write_buf()); if can_keep_alive { Writing::KeepAlive } else { Writing::Closed } } _ => unreachable!("write_body invalid state: {:?}", self.state.writing), }; self.state.writing = state; } pub(super) fn end_body(&mut self) -> Result<()> { debug_assert!(self.can_write_body()); let encoder = match self.state.writing { Writing::Body(ref mut enc) => enc, _ => return Ok(()), }; // end of stream, that means we should try to eof match encoder.end() { Ok(end) => { if let Some(end) = end { self.io.buffer(end); } self.state.writing = if encoder.is_last() || encoder.is_close_delimited() { Writing::Closed } else { Writing::KeepAlive }; Ok(()) } Err(not_eof) => { self.state.writing = Writing::Closed; Err(Error::new_body_write_aborted().with(not_eof)) } } } // When we get a parse error, depending on what side we are, we might be able // to write a response before closing the connection. // // - Client: there is nothing we can do // - Server: if Response hasn't been written yet, we can send a 4xx response fn on_parse_error(&mut self, err: Error) -> Result<()> { if let Writing::Init = self.state.writing { if self.has_h2_prefix() { return Err(Error::new_version_h2()); } if let Some(msg) = T::on_error(&err) { // Drop the cached headers so as to not trigger a debug // assert in `write_head`... self.state.cached_headers.take(); self.write_head(msg, None); self.state.error = Some(err); return Ok(()); } } // fallback is pass the error back up Err(err) } pub(super) fn poll_flush(&mut self, cx: &mut Context<'_>) -> Poll> { ready!(Pin::new(&mut self.io).poll_flush(cx))?; self.try_keep_alive(cx); trace!("flushed({}): {:?}", T::LOG, self.state); Poll::Ready(Ok(())) } pub(super) fn poll_shutdown(&mut self, cx: &mut Context<'_>) -> Poll> { match ready!(Pin::new(self.io.io_mut()).poll_shutdown(cx)) { Ok(()) => { trace!("shut down IO complete"); Poll::Ready(Ok(())) } Err(e) => { debug!("error shutting down IO: {}", e); Poll::Ready(Err(e)) } } } /// If the read side can be cheaply drained, do so. Otherwise, close. pub(super) fn poll_drain_or_close_read(&mut self, cx: &mut Context<'_>) { if let Reading::Continue(ref decoder) = self.state.reading { // skip sending the 100-continue // just move forward to a read, in case a tiny body was included self.state.reading = Reading::Body(decoder.clone()); } let _ = self.poll_read_body(cx); // If still in Reading::Body, just give up match self.state.reading { Reading::Init | Reading::KeepAlive => { trace!("body drained") } _ => self.close_read(), } } #[inline] pub(super) fn close_read(&mut self) { self.state.close_read(); } #[inline] pub(super) fn close_write(&mut self) { self.state.close_write(); } pub(super) fn take_error(&mut self) -> Result<()> { if let Some(err) = self.state.error.take() { Err(err) } else { Ok(()) } } #[inline] pub(super) fn on_upgrade(&mut self) -> upgrade::OnUpgrade { trace!("{}: prepare possible HTTP upgrade", T::LOG); self.state.prepare_upgrade() } } impl fmt::Debug for Conn { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Conn") .field("state", &self.state) .field("io", &self.io) .finish() } } // B and T are never pinned impl Unpin for Conn {} struct State { allow_half_close: bool, /// Re-usable HeaderMap to reduce allocating new ones. cached_headers: Option, /// If an error occurs when there wasn't a direct way to return it /// back to the user, this is set. error: Option, /// Current keep-alive status. keep_alive: KA, /// If mid-message, the HTTP Method that started it. /// /// This is used to know things such as if the message can include /// a body or not. method: Option, h1_parser_config: ParserConfig, h1_max_headers: Option, h09_responses: bool, /// Set to true when the Dispatcher should poll read operations /// again. See the `maybe_notify` method for more. notify_read: bool, /// State of allowed reads reading: Reading, /// State of allowed writes writing: Writing, /// An expected pending HTTP upgrade. upgrade: Option, /// Either HTTP/1.0 or 1.1 connection version: Version, /// Flag to track if trailer fields are allowed to be sent allow_trailer_fields: bool, } #[derive(Debug)] enum Reading { Init, Continue(Decoder), Body(Decoder), KeepAlive, Closed, } enum Writing { Init, Body(Encoder), KeepAlive, Closed, } impl fmt::Debug for State { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut builder = f.debug_struct("State"); builder .field("reading", &self.reading) .field("writing", &self.writing) .field("keep_alive", &self.keep_alive); // Only show error field if it's interesting... if let Some(ref error) = self.error { builder.field("error", error); } if self.allow_half_close { builder.field("allow_half_close", &true); } // Purposefully leaving off other fields... builder.finish() } } impl fmt::Debug for Writing { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { Writing::Init => f.write_str("Init"), Writing::Body(ref enc) => f.debug_tuple("Body").field(enc).finish(), Writing::KeepAlive => f.write_str("KeepAlive"), Writing::Closed => f.write_str("Closed"), } } } impl std::ops::BitAndAssign for KA { fn bitand_assign(&mut self, enabled: bool) { if !enabled { trace!("remote disabling keep-alive"); *self = KA::Disabled; } } } #[derive(Clone, Copy, Debug, Default)] enum KA { Idle, #[default] Busy, Disabled, } impl KA { #[inline] fn idle(&mut self) { *self = KA::Idle; } #[inline] fn busy(&mut self) { *self = KA::Busy; } #[inline] fn disable(&mut self) { *self = KA::Disabled; } #[inline] fn status(&self) -> KA { *self } } impl State { fn close(&mut self) { trace!("State::close()"); self.reading = Reading::Closed; self.writing = Writing::Closed; self.keep_alive.disable(); } fn close_read(&mut self) { trace!("State::close_read()"); self.reading = Reading::Closed; self.keep_alive.disable(); } fn close_write(&mut self) { trace!("State::close_write()"); self.writing = Writing::Closed; self.keep_alive.disable(); } #[inline] fn wants_keep_alive(&self) -> bool { !matches!(self.keep_alive.status(), KA::Disabled) } fn try_keep_alive(&mut self) { match (&self.reading, &self.writing) { (&Reading::KeepAlive, &Writing::KeepAlive) => { if let KA::Busy = self.keep_alive.status() { self.idle::(); } else { trace!( "try_keep_alive({}): could keep-alive, but status = {:?}", T::LOG, self.keep_alive ); self.close(); } } (&Reading::Closed, &Writing::KeepAlive) | (&Reading::KeepAlive, &Writing::Closed) => { self.close() } _ => (), } } #[inline] fn disable_keep_alive(&mut self) { self.keep_alive.disable() } fn busy(&mut self) { if let KA::Disabled = self.keep_alive.status() { return; } self.keep_alive.busy(); } fn idle(&mut self) { debug_assert!(!self.is_idle(), "State::idle() called while idle"); self.method = None; self.keep_alive.idle(); if !self.is_idle() { self.close(); return; } self.reading = Reading::Init; self.writing = Writing::Init; // If Client connection has just gone idle, the Dispatcher // should try the poll loop one more time, so as to poll the // pending requests stream. self.notify_read = true; } #[inline] fn is_idle(&self) -> bool { matches!(self.keep_alive.status(), KA::Idle) } #[inline] fn is_read_closed(&self) -> bool { matches!(self.reading, Reading::Closed) } #[inline] fn is_write_closed(&self) -> bool { matches!(self.writing, Writing::Closed) } #[inline] fn prepare_upgrade(&mut self) -> upgrade::OnUpgrade { let (tx, rx) = upgrade::pending(); self.upgrade = Some(tx); rx } } ================================================ FILE: src/client/core/proto/http1/decode.rs ================================================ use std::{ error::Error as StdError, fmt, io, task::{Context, Poll, ready}, }; use bytes::{BufMut, Bytes, BytesMut}; use http::{HeaderMap, HeaderName, HeaderValue}; use http_body::Frame; use self::Kind::{Chunked, Eof, Length}; use super::{DecodedLength, io::MemRead, role::DEFAULT_MAX_HEADERS}; /// Maximum amount of bytes allowed in chunked extensions. /// /// This limit is currentlty applied for the entire body, not per chunk. const CHUNKED_EXTENSIONS_LIMIT: u64 = 1024 * 16; /// Maximum number of bytes allowed for all trailer fields. /// /// TODO: remove this when we land h1_max_header_size support const TRAILER_LIMIT: usize = 1024 * 16; /// Decoders to handle different Transfer-Encodings. /// /// If a message body does not include a Transfer-Encoding, it *should* /// include a Content-Length header. #[derive(Clone, PartialEq)] pub(crate) struct Decoder { kind: Kind, } #[derive(Debug, Clone, PartialEq)] enum Kind { /// A Reader used when a Content-Length header is passed with a positive integer. Length(u64), /// A Reader used when Transfer-Encoding is `chunked`. Chunked { state: ChunkedState, chunk_len: u64, extensions_cnt: u64, trailers_buf: Option, trailers_cnt: usize, h1_max_headers: Option, h1_max_header_size: Option, }, /// A Reader used for responses that don't indicate a length or chunked. /// /// The bool tracks when EOF is seen on the transport. /// /// Note: This should only used for `Response`s. It is illegal for a /// `Request` to be made with both `Content-Length` and /// `Transfer-Encoding: chunked` missing, as explained from the spec: /// /// > If a Transfer-Encoding header field is present in a response and /// > the chunked transfer coding is not the final encoding, the /// > message body length is determined by reading the connection until /// > it is closed by the server. If a Transfer-Encoding header field /// > is present in a request and the chunked transfer coding is not /// > the final encoding, the message body length cannot be determined /// > reliably; the server MUST respond with the 400 (Bad Request) /// > status code and then close the connection. Eof(bool), } #[derive(Debug, PartialEq, Clone, Copy)] enum ChunkedState { Start, Size, SizeLws, Extension, SizeLf, Body, BodyCr, BodyLf, Trailer, TrailerLf, EndCr, EndLf, End, } struct StepArgs<'a> { chunk_size: &'a mut u64, chunk_buf: &'a mut Option, extensions_cnt: &'a mut u64, trailers_buf: &'a mut Option, trailers_cnt: &'a mut usize, max_headers_cnt: usize, max_headers_bytes: usize, } // ===== impl Decoder ===== impl Decoder { #[inline] fn length(x: u64) -> Decoder { Decoder { kind: Kind::Length(x), } } #[inline] fn eof() -> Decoder { Decoder { kind: Kind::Eof(false), } } #[inline] fn chunked(h1_max_headers: Option, h1_max_header_size: Option) -> Decoder { Decoder { kind: Kind::Chunked { state: ChunkedState::new(), chunk_len: 0, extensions_cnt: 0, trailers_buf: None, trailers_cnt: 0, h1_max_headers, h1_max_header_size, }, } } pub(super) fn new( len: DecodedLength, h1_max_headers: Option, h1_max_header_size: Option, ) -> Self { match len { DecodedLength::CHUNKED => Decoder::chunked(h1_max_headers, h1_max_header_size), DecodedLength::CLOSE_DELIMITED => Decoder::eof(), length => Decoder::length(length.danger_len()), } } #[inline] pub(crate) fn is_eof(&self) -> bool { matches!( self.kind, Length(0) | Chunked { state: ChunkedState::End, .. } | Eof(true) ) } pub(crate) fn decode( &mut self, cx: &mut Context<'_>, body: &mut R, ) -> Poll, io::Error>> { trace!("decode; state={:?}", self.kind); match self.kind { Length(ref mut remaining) => { if *remaining == 0 { Poll::Ready(Ok(Frame::data(Bytes::new()))) } else { let to_read = *remaining as usize; let buf = ready!(body.read_mem(cx, to_read))?; let num = buf.as_ref().len() as u64; if num > *remaining { *remaining = 0; } else if num == 0 { return Poll::Ready(Err(io::Error::new( io::ErrorKind::UnexpectedEof, IncompleteBody, ))); } else { *remaining -= num; } Poll::Ready(Ok(Frame::data(buf))) } } Chunked { ref mut state, ref mut chunk_len, ref mut extensions_cnt, ref mut trailers_buf, ref mut trailers_cnt, ref h1_max_headers, ref h1_max_header_size, } => { let h1_max_headers = h1_max_headers.unwrap_or(DEFAULT_MAX_HEADERS); let h1_max_header_size = h1_max_header_size.unwrap_or(TRAILER_LIMIT); loop { let mut buf = None; // advances the chunked state *state = ready!(state.step( cx, body, StepArgs { chunk_size: chunk_len, extensions_cnt, chunk_buf: &mut buf, trailers_buf, trailers_cnt, max_headers_cnt: h1_max_headers, max_headers_bytes: h1_max_header_size, } ))?; if *state == ChunkedState::End { trace!("end of chunked"); if trailers_buf.is_some() { trace!("found possible trailers"); // decoder enforces that trailers count will not exceed h1_max_headers if *trailers_cnt >= h1_max_headers { return Poll::Ready(Err(io::Error::new( io::ErrorKind::InvalidData, "chunk trailers count overflow", ))); } match decode_trailers( &mut trailers_buf.take().expect("Trailer is None"), *trailers_cnt, ) { Ok(headers) => { return Poll::Ready(Ok(Frame::trailers(headers))); } Err(e) => { return Poll::Ready(Err(e)); } } } return Poll::Ready(Ok(Frame::data(Bytes::new()))); } if let Some(buf) = buf { return Poll::Ready(Ok(Frame::data(buf))); } } } Eof(ref mut is_eof) => { if *is_eof { Poll::Ready(Ok(Frame::data(Bytes::new()))) } else { // 8192 chosen because its about 2 packets, there probably // won't be that much available, so don't have MemReaders // allocate buffers to big body.read_mem(cx, 8192).map_ok(|slice| { *is_eof = slice.is_empty(); Frame::data(slice) }) } } } } #[cfg(test)] async fn decode_fut(&mut self, body: &mut R) -> Result, io::Error> { std::future::poll_fn(move |cx| self.decode(cx, body)).await } } impl fmt::Debug for Decoder { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&self.kind, f) } } macro_rules! byte ( ($rdr:ident, $cx:expr) => ({ let buf = ready!($rdr.read_mem($cx, 1))?; if !buf.is_empty() { buf[0] } else { return Poll::Ready(Err(io::Error::new(io::ErrorKind::UnexpectedEof, "unexpected EOF during chunk size line"))); } }) ); macro_rules! or_overflow { ($e:expr) => ( match $e { Some(val) => val, None => return Poll::Ready(Err(io::Error::new( io::ErrorKind::InvalidData, "invalid chunk size: overflow", ))), } ) } macro_rules! put_u8 { ($trailers_buf:expr, $byte:expr, $limit:expr) => { $trailers_buf.put_u8($byte); if $trailers_buf.len() >= $limit { return Poll::Ready(Err(io::Error::new( io::ErrorKind::InvalidData, "chunk trailers bytes over limit", ))); } }; } // ===== impl ChunkedState ===== impl ChunkedState { fn new() -> ChunkedState { ChunkedState::Start } #[allow(clippy::too_many_arguments)] fn step( &self, cx: &mut Context<'_>, body: &mut R, step: StepArgs<'_>, ) -> Poll> { use self::ChunkedState::*; match *self { Start => ChunkedState::read_start(cx, body, step.chunk_size), Size => ChunkedState::read_size(cx, body, step.chunk_size), SizeLws => ChunkedState::read_size_lws(cx, body), Extension => ChunkedState::read_extension(cx, body, step.extensions_cnt), SizeLf => ChunkedState::read_size_lf(cx, body, *step.chunk_size), Body => ChunkedState::read_body(cx, body, step.chunk_size, step.chunk_buf), BodyCr => ChunkedState::read_body_cr(cx, body), BodyLf => ChunkedState::read_body_lf(cx, body), Trailer => { ChunkedState::read_trailer(cx, body, step.trailers_buf, step.max_headers_bytes) } TrailerLf => ChunkedState::read_trailer_lf( cx, body, step.trailers_buf, step.trailers_cnt, step.max_headers_cnt, step.max_headers_bytes, ), EndCr => ChunkedState::read_end_cr(cx, body, step.trailers_buf, step.max_headers_bytes), EndLf => ChunkedState::read_end_lf(cx, body, step.trailers_buf, step.max_headers_bytes), End => Poll::Ready(Ok(ChunkedState::End)), } } fn read_start( cx: &mut Context<'_>, rdr: &mut R, size: &mut u64, ) -> Poll> { trace!("Read chunk start"); let radix = 16; match byte!(rdr, cx) { b @ b'0'..=b'9' => { *size = or_overflow!(size.checked_mul(radix)); *size = or_overflow!(size.checked_add((b - b'0') as u64)); } b @ b'a'..=b'f' => { *size = or_overflow!(size.checked_mul(radix)); *size = or_overflow!(size.checked_add((b + 10 - b'a') as u64)); } b @ b'A'..=b'F' => { *size = or_overflow!(size.checked_mul(radix)); *size = or_overflow!(size.checked_add((b + 10 - b'A') as u64)); } _ => { return Poll::Ready(Err(io::Error::new( io::ErrorKind::InvalidInput, "Invalid chunk size line: missing size digit", ))); } } Poll::Ready(Ok(ChunkedState::Size)) } fn read_size( cx: &mut Context<'_>, rdr: &mut R, size: &mut u64, ) -> Poll> { trace!("Read chunk hex size"); let radix = 16; match byte!(rdr, cx) { b @ b'0'..=b'9' => { *size = or_overflow!(size.checked_mul(radix)); *size = or_overflow!(size.checked_add((b - b'0') as u64)); } b @ b'a'..=b'f' => { *size = or_overflow!(size.checked_mul(radix)); *size = or_overflow!(size.checked_add((b + 10 - b'a') as u64)); } b @ b'A'..=b'F' => { *size = or_overflow!(size.checked_mul(radix)); *size = or_overflow!(size.checked_add((b + 10 - b'A') as u64)); } b'\t' | b' ' => return Poll::Ready(Ok(ChunkedState::SizeLws)), b';' => return Poll::Ready(Ok(ChunkedState::Extension)), b'\r' => return Poll::Ready(Ok(ChunkedState::SizeLf)), _ => { return Poll::Ready(Err(io::Error::new( io::ErrorKind::InvalidInput, "Invalid chunk size line: Invalid Size", ))); } } Poll::Ready(Ok(ChunkedState::Size)) } fn read_size_lws( cx: &mut Context<'_>, rdr: &mut R, ) -> Poll> { trace!("read_size_lws"); match byte!(rdr, cx) { // LWS can follow the chunk size, but no more digits can come b'\t' | b' ' => Poll::Ready(Ok(ChunkedState::SizeLws)), b';' => Poll::Ready(Ok(ChunkedState::Extension)), b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)), _ => Poll::Ready(Err(io::Error::new( io::ErrorKind::InvalidInput, "Invalid chunk size linear white space", ))), } } fn read_extension( cx: &mut Context<'_>, rdr: &mut R, extensions_cnt: &mut u64, ) -> Poll> { trace!("read_extension"); // We don't care about extensions really at all. Just ignore them. // They "end" at the next CRLF. // // However, some implementations may not check for the CR, so to save // them from themselves, we reject extensions containing plain LF as // well. match byte!(rdr, cx) { b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)), b'\n' => Poll::Ready(Err(io::Error::new( io::ErrorKind::InvalidData, "invalid chunk extension contains newline", ))), _ => { *extensions_cnt += 1; if *extensions_cnt >= CHUNKED_EXTENSIONS_LIMIT { Poll::Ready(Err(io::Error::new( io::ErrorKind::InvalidData, "chunk extensions over limit", ))) } else { Poll::Ready(Ok(ChunkedState::Extension)) } } // no supported extensions } } fn read_size_lf( cx: &mut Context<'_>, rdr: &mut R, size: u64, ) -> Poll> { trace!("Chunk size is {:?}", size); match byte!(rdr, cx) { b'\n' => { if size == 0 { Poll::Ready(Ok(ChunkedState::EndCr)) } else { debug!("incoming chunked header: {0:#X} ({0} bytes)", size); Poll::Ready(Ok(ChunkedState::Body)) } } _ => Poll::Ready(Err(io::Error::new( io::ErrorKind::InvalidInput, "Invalid chunk size LF", ))), } } fn read_body( cx: &mut Context<'_>, rdr: &mut R, rem: &mut u64, buf: &mut Option, ) -> Poll> { trace!("Chunked read, remaining={:?}", rem); // cap remaining bytes at the max capacity of usize let rem_cap = match *rem { r if r > usize::MAX as u64 => usize::MAX, r => r as usize, }; let to_read = rem_cap; let slice = ready!(rdr.read_mem(cx, to_read))?; let count = slice.len(); if count == 0 { *rem = 0; return Poll::Ready(Err(io::Error::new( io::ErrorKind::UnexpectedEof, IncompleteBody, ))); } *buf = Some(slice); *rem -= count as u64; if *rem > 0 { Poll::Ready(Ok(ChunkedState::Body)) } else { Poll::Ready(Ok(ChunkedState::BodyCr)) } } fn read_body_cr( cx: &mut Context<'_>, rdr: &mut R, ) -> Poll> { match byte!(rdr, cx) { b'\r' => Poll::Ready(Ok(ChunkedState::BodyLf)), _ => Poll::Ready(Err(io::Error::new( io::ErrorKind::InvalidInput, "Invalid chunk body CR", ))), } } fn read_body_lf( cx: &mut Context<'_>, rdr: &mut R, ) -> Poll> { match byte!(rdr, cx) { b'\n' => Poll::Ready(Ok(ChunkedState::Start)), _ => Poll::Ready(Err(io::Error::new( io::ErrorKind::InvalidInput, "Invalid chunk body LF", ))), } } fn read_trailer( cx: &mut Context<'_>, rdr: &mut R, trailers_buf: &mut Option, h1_max_header_size: usize, ) -> Poll> { trace!("read_trailer"); let byte = byte!(rdr, cx); put_u8!( trailers_buf.as_mut().expect("trailers_buf is None"), byte, h1_max_header_size ); match byte { b'\r' => Poll::Ready(Ok(ChunkedState::TrailerLf)), _ => Poll::Ready(Ok(ChunkedState::Trailer)), } } fn read_trailer_lf( cx: &mut Context<'_>, rdr: &mut R, trailers_buf: &mut Option, trailers_cnt: &mut usize, h1_max_headers: usize, h1_max_header_size: usize, ) -> Poll> { let byte = byte!(rdr, cx); match byte { b'\n' => { if *trailers_cnt >= h1_max_headers { return Poll::Ready(Err(io::Error::new( io::ErrorKind::InvalidData, "chunk trailers count overflow", ))); } *trailers_cnt += 1; put_u8!( trailers_buf.as_mut().expect("trailers_buf is None"), byte, h1_max_header_size ); Poll::Ready(Ok(ChunkedState::EndCr)) } _ => Poll::Ready(Err(io::Error::new( io::ErrorKind::InvalidInput, "Invalid trailer end LF", ))), } } fn read_end_cr( cx: &mut Context<'_>, rdr: &mut R, trailers_buf: &mut Option, h1_max_header_size: usize, ) -> Poll> { let byte = byte!(rdr, cx); match byte { b'\r' => { if let Some(trailers_buf) = trailers_buf { put_u8!(trailers_buf, byte, h1_max_header_size); } Poll::Ready(Ok(ChunkedState::EndLf)) } byte => { match trailers_buf { None => { // 64 will fit a single Expires header without reallocating let mut buf = BytesMut::with_capacity(64); buf.put_u8(byte); *trailers_buf = Some(buf); } Some(trailers_buf) => { put_u8!(trailers_buf, byte, h1_max_header_size); } } Poll::Ready(Ok(ChunkedState::Trailer)) } } } fn read_end_lf( cx: &mut Context<'_>, rdr: &mut R, trailers_buf: &mut Option, h1_max_header_size: usize, ) -> Poll> { let byte = byte!(rdr, cx); match byte { b'\n' => { if let Some(trailers_buf) = trailers_buf { put_u8!(trailers_buf, byte, h1_max_header_size); } Poll::Ready(Ok(ChunkedState::End)) } _ => Poll::Ready(Err(io::Error::new( io::ErrorKind::InvalidInput, "Invalid chunk end LF", ))), } } } // TODO: disallow Transfer-Encoding, Content-Length, Trailer, etc in trailers ?? fn decode_trailers(buf: &mut BytesMut, count: usize) -> Result { let mut trailers = HeaderMap::new(); let mut headers = vec![httparse::EMPTY_HEADER; count]; let res = httparse::parse_headers(buf, &mut headers); match res { Ok(httparse::Status::Complete((_, headers))) => { for header in headers.iter() { use std::convert::TryFrom; let name = match HeaderName::try_from(header.name) { Ok(name) => name, Err(_) => { return Err(io::Error::new( io::ErrorKind::InvalidInput, format!("Invalid header name: {:?}", &header), )); } }; let value = match HeaderValue::from_bytes(header.value) { Ok(value) => value, Err(_) => { return Err(io::Error::new( io::ErrorKind::InvalidInput, format!("Invalid header value: {:?}", &header), )); } }; trailers.insert(name, value); } Ok(trailers) } Ok(httparse::Status::Partial) => Err(io::Error::new( io::ErrorKind::InvalidInput, "Partial header", )), Err(e) => Err(io::Error::new(io::ErrorKind::InvalidInput, e)), } } #[derive(Debug)] struct IncompleteBody; // === impl IncompleteBody === impl fmt::Display for IncompleteBody { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "end of file before message length reached") } } impl StdError for IncompleteBody {} #[cfg(test)] mod tests { use std::{pin::Pin, time::Duration}; use tokio::io::{AsyncRead, ReadBuf}; use super::*; impl MemRead for &[u8] { fn read_mem(&mut self, _: &mut Context<'_>, len: usize) -> Poll> { let n = std::cmp::min(len, self.len()); if n > 0 { let (a, b) = self.split_at(n); let buf = Bytes::copy_from_slice(a); *self = b; Poll::Ready(Ok(buf)) } else { Poll::Ready(Ok(Bytes::new())) } } } impl MemRead for &mut (dyn AsyncRead + Unpin) { fn read_mem(&mut self, cx: &mut Context<'_>, len: usize) -> Poll> { let mut v = vec![0; len]; let mut buf = ReadBuf::new(&mut v); ready!(Pin::new(self).poll_read(cx, &mut buf)?); Poll::Ready(Ok(Bytes::copy_from_slice(buf.filled()))) } } impl MemRead for Bytes { fn read_mem(&mut self, _: &mut Context<'_>, len: usize) -> Poll> { let n = std::cmp::min(len, self.len()); let ret = self.split_to(n); Poll::Ready(Ok(ret)) } } #[tokio::test] async fn test_read_chunk_size() { use std::io::ErrorKind::{InvalidData, InvalidInput, UnexpectedEof}; async fn read(s: &str) -> u64 { let mut state = ChunkedState::new(); let rdr = &mut s.as_bytes(); let mut size = 0; let mut ext_cnt = 0; let mut trailers_cnt = 0; loop { let result = std::future::poll_fn(|cx| { state.step( cx, rdr, StepArgs { chunk_size: &mut size, extensions_cnt: &mut ext_cnt, chunk_buf: &mut None, trailers_buf: &mut None, trailers_cnt: &mut trailers_cnt, max_headers_cnt: DEFAULT_MAX_HEADERS, max_headers_bytes: TRAILER_LIMIT, }, ) }) .await; let desc = format!("read_size failed for {s:?}"); state = result.expect(&desc); if state == ChunkedState::Body || state == ChunkedState::EndCr { break; } } size } async fn read_err(s: &str, expected_err: io::ErrorKind) { let mut state = ChunkedState::new(); let rdr = &mut s.as_bytes(); let mut size = 0; let mut ext_cnt = 0; let mut trailers_cnt = 0; loop { let result = std::future::poll_fn(|cx| { state.step( cx, rdr, StepArgs { chunk_size: &mut size, extensions_cnt: &mut ext_cnt, chunk_buf: &mut None, trailers_buf: &mut None, trailers_cnt: &mut trailers_cnt, max_headers_cnt: DEFAULT_MAX_HEADERS, max_headers_bytes: TRAILER_LIMIT, }, ) }) .await; state = match result { Ok(s) => s, Err(e) => { assert!( expected_err == e.kind(), "Reading {:?}, expected {:?}, but got {:?}", s, expected_err, e.kind() ); return; } }; if state == ChunkedState::Body || state == ChunkedState::End { panic!("Was Ok. Expected Err for {s:?}"); } } } assert_eq!(1, read("1\r\n").await); assert_eq!(1, read("01\r\n").await); assert_eq!(0, read("0\r\n").await); assert_eq!(0, read("00\r\n").await); assert_eq!(10, read("A\r\n").await); assert_eq!(10, read("a\r\n").await); assert_eq!(255, read("Ff\r\n").await); assert_eq!(255, read("Ff \r\n").await); // Missing LF or CRLF read_err("F\rF", InvalidInput).await; read_err("F", UnexpectedEof).await; // Missing digit read_err("\r\n\r\n", InvalidInput).await; read_err("\r\n", InvalidInput).await; // Invalid hex digit read_err("X\r\n", InvalidInput).await; read_err("1X\r\n", InvalidInput).await; read_err("-\r\n", InvalidInput).await; read_err("-1\r\n", InvalidInput).await; // Acceptable (if not fully valid) extensions do not influence the size assert_eq!(1, read("1;extension\r\n").await); assert_eq!(10, read("a;ext name=value\r\n").await); assert_eq!(1, read("1;extension;extension2\r\n").await); assert_eq!(1, read("1;;; ;\r\n").await); assert_eq!(2, read("2; extension...\r\n").await); assert_eq!(3, read("3 ; extension=123\r\n").await); assert_eq!(3, read("3 ;\r\n").await); assert_eq!(3, read("3 ; \r\n").await); // Invalid extensions cause an error read_err("1 invalid extension\r\n", InvalidInput).await; read_err("1 A\r\n", InvalidInput).await; read_err("1;no CRLF", UnexpectedEof).await; read_err("1;reject\nnewlines\r\n", InvalidData).await; // Overflow read_err("f0000000000000003\r\n", InvalidData).await; } #[tokio::test] async fn test_read_sized_early_eof() { let mut bytes = &b"foo bar"[..]; let mut decoder = Decoder::length(10); assert_eq!( decoder .decode_fut(&mut bytes) .await .unwrap() .data_ref() .unwrap() .len(), 7 ); let e = decoder.decode_fut(&mut bytes).await.unwrap_err(); assert_eq!(e.kind(), io::ErrorKind::UnexpectedEof); } #[tokio::test] async fn test_read_chunked_early_eof() { let mut bytes = &b"\ 9\r\n\ foo bar\ "[..]; let mut decoder = Decoder::chunked(None, None); assert_eq!( decoder .decode_fut(&mut bytes) .await .unwrap() .data_ref() .unwrap() .len(), 7 ); let e = decoder.decode_fut(&mut bytes).await.unwrap_err(); assert_eq!(e.kind(), io::ErrorKind::UnexpectedEof); } #[tokio::test] async fn test_read_chunked_single_read() { let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\n"[..]; let buf = Decoder::chunked(None, None) .decode_fut(&mut mock_buf) .await .expect("decode") .into_data() .expect("unknown frame type"); assert_eq!(16, buf.len()); let result = String::from_utf8(buf.as_ref().to_vec()).expect("decode String"); assert_eq!("1234567890abcdef", &result); } #[tokio::test] async fn test_read_chunked_with_missing_zero_digit() { // After reading a valid chunk, the ending is missing a zero. let mut mock_buf = &b"1\r\nZ\r\n\r\n\r\n"[..]; let mut decoder = Decoder::chunked(None, None); let buf = decoder .decode_fut(&mut mock_buf) .await .expect("decode") .into_data() .expect("unknown frame type"); assert_eq!("Z", buf); let err = decoder .decode_fut(&mut mock_buf) .await .expect_err("decode 2"); assert_eq!(err.kind(), io::ErrorKind::InvalidInput); } #[tokio::test] async fn test_read_chunked_extensions_over_limit() { // construct a chunked body where each individual chunked extension // is totally fine, but combined is over the limit. let per_chunk = super::CHUNKED_EXTENSIONS_LIMIT * 2 / 3; let mut scratch = vec![]; for _ in 0..2 { scratch.extend(b"1;"); scratch.extend(b"x".repeat(per_chunk as usize)); scratch.extend(b"\r\nA\r\n"); } scratch.extend(b"0\r\n\r\n"); let mut mock_buf = Bytes::from(scratch); let mut decoder = Decoder::chunked(None, None); let buf1 = decoder .decode_fut(&mut mock_buf) .await .expect("decode1") .into_data() .expect("unknown frame type"); assert_eq!(&buf1[..], b"A"); let err = decoder .decode_fut(&mut mock_buf) .await .expect_err("decode2"); assert_eq!(err.kind(), io::ErrorKind::InvalidData); assert_eq!(err.to_string(), "chunk extensions over limit"); } #[tokio::test] async fn test_read_chunked_trailer_with_missing_lf() { let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\nbad\r\r\n"[..]; let mut decoder = Decoder::chunked(None, None); decoder.decode_fut(&mut mock_buf).await.expect("decode"); let e = decoder.decode_fut(&mut mock_buf).await.unwrap_err(); assert_eq!(e.kind(), io::ErrorKind::InvalidInput); } #[tokio::test] async fn test_read_chunked_after_eof() { let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\n\r\n"[..]; let mut decoder = Decoder::chunked(None, None); // normal read let buf = decoder .decode_fut(&mut mock_buf) .await .unwrap() .into_data() .expect("unknown frame type"); assert_eq!(16, buf.len()); let result = String::from_utf8(buf.as_ref().to_vec()).expect("decode String"); assert_eq!("1234567890abcdef", &result); // eof read let buf = decoder .decode_fut(&mut mock_buf) .await .expect("decode") .into_data() .expect("unknown frame type"); assert_eq!(0, buf.len()); // ensure read after eof also returns eof let buf = decoder .decode_fut(&mut mock_buf) .await .expect("decode") .into_data() .expect("unknown frame type"); assert_eq!(0, buf.len()); } // perform an async read using a custom buffer size and causing a blocking // read at the specified byte async fn read_async(mut decoder: Decoder, content: &[u8], block_at: usize) -> String { let mut outs = Vec::new(); let mut ins = if block_at == 0 { tokio_test::io::Builder::new() .wait(Duration::from_millis(10)) .read(content) .build() } else { tokio_test::io::Builder::new() .read(&content[..block_at]) .wait(Duration::from_millis(10)) .read(&content[block_at..]) .build() }; let mut ins = &mut ins as &mut (dyn AsyncRead + Unpin); loop { let buf = decoder .decode_fut(&mut ins) .await .expect("unexpected decode error") .into_data() .expect("unexpected frame type"); if buf.is_empty() { break; // eof } outs.extend(buf.as_ref()); } String::from_utf8(outs).expect("decode String") } // iterate over the different ways that this async read could go. // tests blocking a read at each byte along the content - The shotgun approach async fn all_async_cases(content: &str, expected: &str, decoder: Decoder) { let content_len = content.len(); for block_at in 0..content_len { let actual = read_async(decoder.clone(), content.as_bytes(), block_at).await; assert_eq!(expected, &actual) //, "Failed async. Blocking at {}", block_at); } } #[tokio::test] async fn test_read_length_async() { let content = "foobar"; all_async_cases(content, content, Decoder::length(content.len() as u64)).await; } #[tokio::test] async fn test_read_chunked_async() { let content = "3\r\nfoo\r\n3\r\nbar\r\n0\r\n\r\n"; let expected = "foobar"; all_async_cases(content, expected, Decoder::chunked(None, None)).await; } #[tokio::test] async fn test_read_eof_async() { let content = "foobar"; all_async_cases(content, content, Decoder::eof()).await; } #[test] fn test_decode_trailers() { let mut buf = BytesMut::new(); buf.extend_from_slice( b"Expires: Wed, 21 Oct 2015 07:28:00 GMT\r\nX-Stream-Error: failed to decode\r\n\r\n", ); let headers = decode_trailers(&mut buf, 2).expect("decode_trailers"); assert_eq!(headers.len(), 2); assert_eq!( headers.get("Expires").unwrap(), "Wed, 21 Oct 2015 07:28:00 GMT" ); assert_eq!(headers.get("X-Stream-Error").unwrap(), "failed to decode"); } #[tokio::test] async fn test_trailer_max_headers_enforced() { let h1_max_headers = 10; let mut scratch = vec![]; scratch.extend(b"10\r\n1234567890abcdef\r\n0\r\n"); for i in 0..h1_max_headers { scratch.extend(format!("trailer{i}: {i}\r\n").as_bytes()); } scratch.extend(b"\r\n"); let mut mock_buf = Bytes::from(scratch); let mut decoder = Decoder::chunked(Some(h1_max_headers), None); // ready chunked body let buf = decoder .decode_fut(&mut mock_buf) .await .unwrap() .into_data() .expect("unknown frame type"); assert_eq!(16, buf.len()); // eof read let err = decoder .decode_fut(&mut mock_buf) .await .expect_err("trailer fields over limit"); assert_eq!(err.kind(), io::ErrorKind::InvalidData); } #[tokio::test] async fn test_trailer_max_header_size_huge_trailer() { let max_header_size = 1024; let mut scratch = vec![]; scratch.extend(b"10\r\n1234567890abcdef\r\n0\r\n"); scratch.extend(format!("huge_trailer: {}\r\n", "x".repeat(max_header_size)).as_bytes()); scratch.extend(b"\r\n"); let mut mock_buf = Bytes::from(scratch); let mut decoder = Decoder::chunked(None, Some(max_header_size)); // ready chunked body let buf = decoder .decode_fut(&mut mock_buf) .await .unwrap() .into_data() .expect("unknown frame type"); assert_eq!(16, buf.len()); // eof read let err = decoder .decode_fut(&mut mock_buf) .await .expect_err("trailers over limit"); assert_eq!(err.kind(), io::ErrorKind::InvalidData); } #[tokio::test] async fn test_trailer_max_header_size_many_small_trailers() { let max_headers = 10; let header_size = 64; let mut scratch = vec![]; scratch.extend(b"10\r\n1234567890abcdef\r\n0\r\n"); for i in 0..max_headers { scratch.extend(format!("trailer{}: {}\r\n", i, "x".repeat(header_size)).as_bytes()); } scratch.extend(b"\r\n"); let mut mock_buf = Bytes::from(scratch); let mut decoder = Decoder::chunked(None, Some(max_headers * header_size)); // ready chunked body let buf = decoder .decode_fut(&mut mock_buf) .await .unwrap() .into_data() .expect("unknown frame type"); assert_eq!(16, buf.len()); // eof read let err = decoder .decode_fut(&mut mock_buf) .await .expect_err("trailers over limit"); assert_eq!(err.kind(), io::ErrorKind::InvalidData); } } ================================================ FILE: src/client/core/proto/http1/dispatch.rs ================================================ use std::{ convert::Infallible, future::Future, marker::Unpin, pin::Pin, task::{Context, Poll, ready}, }; use bytes::{Buf, Bytes}; use http::Request; use http_body::Body; use tokio::io::{AsyncRead, AsyncWrite}; use super::{BodyLength, Conn, Http1Transaction, MessageHead, Wants}; use crate::client::core::{ Error, Result, body::{self, DecodedLength, Incoming}, dispatch::{self, TrySendError}, error::BoxError, proto::{self, Dispatched, RequestHead}, upgrade::OnUpgrade, }; pub(crate) struct Dispatcher { conn: Conn, dispatch: D, body_tx: SenderGuard, body_rx: Pin>>, is_closing: bool, } pub(crate) trait Dispatch { type PollItem; type PollBody; type PollError; type RecvItem; #[allow(clippy::type_complexity)] fn poll_msg( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll>>; fn recv_msg(&mut self, msg: Result<(Self::RecvItem, Incoming)>) -> Result<()>; fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll>; fn should_poll(&self) -> bool; } pin_project_lite::pin_project! { pub(crate) struct Client { callback: Option, http::Response>>, #[pin] rx: ClientRx, rx_closed: bool, } } type ClientRx = dispatch::Receiver, http::Response>; impl Dispatcher where D: Dispatch< PollItem = MessageHead, PollBody = Bs, RecvItem = MessageHead, > + Unpin, D::PollError: Into, I: AsyncRead + AsyncWrite + Unpin, T: Http1Transaction + Unpin, Bs: Body + 'static, Bs::Error: Into, { #[inline] pub(crate) fn new(dispatch: D, conn: Conn) -> Self { Dispatcher { conn, dispatch, body_tx: SenderGuard(None), body_rx: Box::pin(None), is_closing: false, } } #[inline] pub(crate) fn into_inner(self) -> (I, Bytes, D) { let (io, buf) = self.conn.into_inner(); (io, buf, self.dispatch) } fn poll_catch( &mut self, cx: &mut Context<'_>, should_shutdown: bool, ) -> Poll> { Poll::Ready(ready!(self.poll_inner(cx, should_shutdown)).or_else(|e| { // Be sure to alert a streaming body of the failure. if let Some(mut body) = self.body_tx.take() { body.send_error(Error::new_body("connection error")) } // An error means we're shutting down either way. // We just try to give the error to the user, // and close the connection with an Ok. If we // cannot give it to the user, then return the Err. self.dispatch.recv_msg(Err(e))?; Ok(Dispatched::Shutdown) })) } fn poll_inner( &mut self, cx: &mut Context<'_>, should_shutdown: bool, ) -> Poll> { T::update_date(); ready!(self.poll_loop(cx))?; if self.is_done() { if let Some(pending) = self.conn.pending_upgrade() { self.conn.take_error()?; return Poll::Ready(Ok(Dispatched::Upgrade(pending))); } else if should_shutdown { ready!(self.conn.poll_shutdown(cx)).map_err(Error::new_shutdown)?; } self.conn.take_error()?; Poll::Ready(Ok(Dispatched::Shutdown)) } else { Poll::Pending } } fn poll_loop(&mut self, cx: &mut Context<'_>) -> Poll> { // Limit the looping on this connection, in case it is ready far too // often, so that other futures don't starve. // // 16 was chosen arbitrarily, as that is number of pipelined requests // benchmarks often use. Perhaps it should be a config option instead. for _ in 0..16 { let _ = self.poll_read(cx)?; let write_ready = self.poll_write(cx)?.is_ready(); let flush_ready = self.poll_flush(cx)?.is_ready(); // If we can write more body and the connection is ready, we should // write again. If we return `Ready(Ok(())` here, we will yield // without a guaranteed wake-up from the write side of the connection. // This would lead to a deadlock if we also don't expect reads. let wants_write_again = self.can_write_again() && (write_ready || flush_ready); // This could happen if reading paused before blocking on IO, // such as getting to the end of a framed message, but then // writing/flushing set the state back to Init. In that case, // if the read buffer still had bytes, we'd want to try poll_read // again, or else we wouldn't ever be woken up again. // // Using this instead of task::current() and notify() inside // the Conn is noticeably faster in pipelined benchmarks. let wants_read_again = self.conn.wants_read_again(); // If we cannot write or read again, we yield and rely on the // wake-up from the connection futures. if !(wants_write_again || wants_read_again) { return Poll::Ready(Ok(())); } // If we are continuing only because "wants_write_again", check if write is ready. if !wants_read_again && wants_write_again { // If write was ready, just proceed with the loop if write_ready { continue; } // Write was previously pending, but may have become ready since polling flush, so // we need to check it again. If we simply proceeded, the case of an unbuffered // writer where flush is always ready would cause us to hot loop. if self.poll_write(cx)?.is_pending() { // write is pending, so it is safe to yield and rely on wake-up from connection // futures. return Poll::Ready(Ok(())); } } } trace!("poll_loop yielding (self = {:p})", self); // Now we yield to allow other tasks to run. cx.waker().wake_by_ref(); Poll::Pending } fn poll_read(&mut self, cx: &mut Context<'_>) -> Poll> { loop { if self.is_closing { return Poll::Ready(Ok(())); } else if self.conn.can_read_head() { ready!(self.poll_read_head(cx))?; } else if let Some(mut body) = self.body_tx.take() { if self.conn.can_read_body() { match body.poll_ready(cx) { Poll::Ready(Ok(())) => (), Poll::Pending => { self.body_tx.set(body); return Poll::Pending; } Poll::Ready(Err(_canceled)) => { // user doesn't care about the body // so we should stop reading trace!("body receiver dropped before eof, draining or closing"); self.conn.poll_drain_or_close_read(cx); continue; } } match self.conn.poll_read_body(cx) { Poll::Ready(Some(Ok(frame))) => { if frame.is_data() { let chunk = frame.into_data().unwrap_or_else(|_| unreachable!()); match body.send_data(chunk) { Ok(()) => { self.body_tx.set(body); } Err(_canceled) => { if self.conn.can_read_body() { trace!("body receiver dropped before eof, closing"); self.conn.close_read(); } } } } else if frame.is_trailers() { let trailers = frame.into_trailers().unwrap_or_else(|_| unreachable!()); match body.send_trailers(trailers) { Ok(()) => { self.body_tx.set(body); } Err(_canceled) => { if self.conn.can_read_body() { trace!("body receiver dropped before eof, closing"); self.conn.close_read(); } } } } else { // we should have dropped all unknown frames in poll_read_body error!("unexpected frame"); } } Poll::Ready(None) => { // just drop, the body will close automatically } Poll::Pending => { self.body_tx.set(body); return Poll::Pending; } Poll::Ready(Some(Err(e))) => { body.send_error(Error::new_body(e)); } } } else { // just drop, the body will close automatically } } else { return self.conn.poll_read_keep_alive(cx); } } } fn poll_read_head(&mut self, cx: &mut Context<'_>) -> Poll> { // can dispatch receive, or does it still care about other incoming message? match ready!(self.dispatch.poll_ready(cx)) { Ok(()) => (), Err(()) => { trace!("dispatch no longer receiving messages"); self.close(); return Poll::Ready(Ok(())); } } // dispatch is ready for a message, try to read one match ready!(self.conn.poll_read_head(cx)) { Some(Ok((mut head, body_len, wants))) => { let body = match body_len { DecodedLength::ZERO => Incoming::empty(), other => { let (tx, rx) = Incoming::h1(other, wants.contains(Wants::EXPECT)); self.body_tx.set(tx); rx } }; if wants.contains(Wants::UPGRADE) { let upgrade = self.conn.on_upgrade(); debug_assert!(!upgrade.is_none(), "empty upgrade"); debug_assert!( head.extensions.get::().is_none(), "OnUpgrade already set" ); head.extensions.insert(upgrade); } self.dispatch.recv_msg(Ok((head, body)))?; Poll::Ready(Ok(())) } Some(Err(err)) => { debug!("read_head error: {}", err); self.dispatch.recv_msg(Err(err))?; // if here, the dispatcher gave the user the error // somewhere else. we still need to shutdown, but // not as a second error. self.close(); Poll::Ready(Ok(())) } None => { // read eof, the write side will have been closed too unless // allow_read_close was set to true, in which case just do // nothing... debug_assert!(self.conn.is_read_closed()); if self.conn.is_write_closed() { self.close(); } Poll::Ready(Ok(())) } } } fn poll_write(&mut self, cx: &mut Context<'_>) -> Poll> { loop { if self.is_closing { return Poll::Ready(Ok(())); } else if self.body_rx.is_none() && self.conn.can_write_head() && self.dispatch.should_poll() { if let Some(msg) = ready!(Pin::new(&mut self.dispatch).poll_msg(cx)) { let (head, body) = msg.map_err(Error::new_user_service)?; let body_type = if body.is_end_stream() { self.body_rx.set(None); None } else { let btype = body .size_hint() .exact() .map(BodyLength::Known) .or(Some(BodyLength::Unknown)); self.body_rx.set(Some(body)); btype }; self.conn.write_head(head, body_type); } else { self.close(); return Poll::Ready(Ok(())); } } else if !self.conn.can_buffer_body() { ready!(self.poll_flush(cx))?; } else { // A new scope is needed :( if let (Some(mut body), clear_body) = OptGuard::new(self.body_rx.as_mut()).guard_mut() { debug_assert!(!*clear_body, "opt guard defaults to keeping body"); if !self.conn.can_write_body() { trace!( "no more write body allowed, user body is_end_stream = {}", body.is_end_stream(), ); *clear_body = true; continue; } let item = ready!(body.as_mut().poll_frame(cx)); if let Some(item) = item { let frame = item.map_err(|e| { *clear_body = true; Error::new_user_body(e) })?; if frame.is_data() { let chunk = frame.into_data().unwrap_or_else(|_| unreachable!()); let eos = body.is_end_stream(); if eos { *clear_body = true; if chunk.remaining() == 0 { trace!("discarding empty chunk"); self.conn.end_body()?; } else { self.conn.write_body_and_end(chunk); } } else { if chunk.remaining() == 0 { trace!("discarding empty chunk"); continue; } self.conn.write_body(chunk); } } else if frame.is_trailers() { *clear_body = true; self.conn.write_trailers( frame.into_trailers().unwrap_or_else(|_| unreachable!()), ); } else { trace!("discarding unknown frame"); continue; } } else { *clear_body = true; self.conn.end_body()?; } } else { // If there's no body_rx, end the body if self.conn.can_write_body() { self.conn.end_body()?; } else { return Poll::Pending; } } } } } #[inline] fn poll_flush(&mut self, cx: &mut Context<'_>) -> Poll> { self.conn.poll_flush(cx).map_err(|err| { debug!("error writing: {}", err); Error::new_body_write(err) }) } #[inline] fn close(&mut self) { self.is_closing = true; self.conn.close_read(); self.conn.close_write(); } #[inline] fn can_write_again(&mut self) -> bool { self.body_rx.is_some() } fn is_done(&self) -> bool { if self.is_closing { return true; } let read_done = self.conn.is_read_closed(); if read_done { // a client that cannot read may was well be done. true } else { let write_done = self.conn.is_write_closed() || (!self.dispatch.should_poll() && self.body_rx.is_none()); read_done && write_done } } } impl Future for Dispatcher where D: Dispatch< PollItem = MessageHead, PollBody = Bs, RecvItem = MessageHead, > + Unpin, D::PollError: Into, I: AsyncRead + AsyncWrite + Unpin, T: Http1Transaction + Unpin, Bs: Body + 'static, Bs::Error: Into, { type Output = Result; #[inline] fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { self.poll_catch(cx, true) } } // ===== impl OptGuard ===== /// A drop guard to allow a mutable borrow of an Option while being able to /// set whether the `Option` should be cleared on drop. struct OptGuard<'a, T>(Pin<&'a mut Option>, bool); impl<'a, T> OptGuard<'a, T> { #[inline] fn new(pin: Pin<&'a mut Option>) -> Self { OptGuard(pin, false) } #[inline] fn guard_mut(&mut self) -> (Option>, &mut bool) { (self.0.as_mut().as_pin_mut(), &mut self.1) } } impl Drop for OptGuard<'_, T> { #[inline] fn drop(&mut self) { if self.1 { self.0.set(None); } } } // ===== impl SenderGuard ===== /// A guard for the body `Sender`. /// /// If the `Dispatcher` future is dropped (e.g. the runtime driving the /// connection is shut down) while it still owns a body `Sender`, the guard /// sends an incomplete-message error so the receiver sees an error instead /// of a silent, clean end-of-stream. struct SenderGuard(Option); impl SenderGuard { #[inline] fn set(&mut self, sender: body::Sender) { self.0 = Some(sender); } #[inline] fn take(&mut self) -> Option { self.0.take() } } impl Drop for SenderGuard { #[inline] fn drop(&mut self) { if let Some(mut sender) = self.0.take() { sender.send_error(Error::new_incomplete()); } } } // ===== impl Client ===== impl Client { #[inline] pub(crate) fn new(rx: ClientRx) -> Client { Client { callback: None, rx, rx_closed: false, } } } impl Dispatch for Client where B: Body, { type PollItem = RequestHead; type PollBody = B; type PollError = Infallible; type RecvItem = proto::ResponseHead; fn poll_msg( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll>> { let mut this = self.as_mut(); debug_assert!(!this.rx_closed); match this.rx.poll_recv(cx) { Poll::Ready(Some((req, mut cb))) => { // check that future hasn't been canceled already match cb.poll_canceled(cx) { Poll::Ready(()) => { trace!("request canceled"); Poll::Ready(None) } Poll::Pending => { let (parts, body) = req.into_parts(); let head = RequestHead { version: parts.version, subject: proto::RequestLine(parts.method, parts.uri), headers: parts.headers, extensions: parts.extensions, }; this.callback = Some(cb); Poll::Ready(Some(Ok((head, body)))) } } } Poll::Ready(None) => { // user has dropped sender handle trace!("client tx closed"); this.rx_closed = true; Poll::Ready(None) } Poll::Pending => Poll::Pending, } } fn recv_msg(&mut self, msg: Result<(Self::RecvItem, Incoming)>) -> Result<()> { match msg { Ok((msg, body)) => { if let Some(cb) = self.callback.take() { let res = msg.into_response(body); cb.send(Ok(res)); Ok(()) } else { // Getting here is likely a bug! An error should have happened // in Conn::require_empty_read() before ever parsing a // full message! Err(Error::new_unexpected_message()) } } Err(err) => { if let Some(cb) = self.callback.take() { cb.send(Err(TrySendError { error: err, message: None, })); Ok(()) } else if !self.rx_closed { self.rx.close(); if let Some((req, cb)) = self.rx.try_recv() { trace!("canceling queued request with connection error: {}", err); // in this case, the message was never even started, so it's safe to tell // the user that the request was completely canceled cb.send(Err(TrySendError { error: Error::new_canceled().with(err), message: Some(req), })); Ok(()) } else { Err(err) } } else { Err(err) } } } } fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { match self.callback { Some(ref mut cb) => match cb.poll_canceled(cx) { Poll::Ready(()) => { trace!("callback receiver has dropped"); Poll::Ready(Err(())) } Poll::Pending => Poll::Ready(Ok(())), }, None => Poll::Ready(Err(())), } } #[inline] fn should_poll(&self) -> bool { self.callback.is_none() } } #[cfg(test)] mod tests { use std::time::Duration; use super::{proto::http1, *}; #[test] fn client_read_bytes_before_writing_request() { let _ = pretty_env_logger::try_init(); tokio_test::task::spawn(()).enter(|cx, _| { let (io, mut handle) = tokio_test::io::Builder::new().build_with_handle(); // Block at 0 for now, but we will release this response before // the request is ready to write later... let (mut tx, rx) = dispatch::channel(); let conn = Conn::<_, bytes::Bytes, http1::role::Client>::new(io); let mut dispatcher = Dispatcher::new(Client::new(rx), conn); // First poll is needed to allow tx to send... assert!(Pin::new(&mut dispatcher).poll(cx).is_pending()); // Unblock our IO, which has a response before we've sent request! // handle.read(b"HTTP/1.1 200 OK\r\n\r\n"); let mut res_rx = tx.try_send(http::Request::new(Incoming::empty())).unwrap(); tokio_test::assert_ready_ok!(Pin::new(&mut dispatcher).poll(cx)); let err = tokio_test::assert_ready_ok!(Pin::new(&mut res_rx).poll(cx)) .expect_err("callback should send error"); match (err.error.is_canceled(), err.message.as_ref()) { (true, Some(_)) => (), _ => panic!("expected Canceled, got {err:?}"), } }); } #[tokio::test] async fn client_flushing_is_not_ready_for_next_request() { let _ = pretty_env_logger::try_init(); let (io, _handle) = tokio_test::io::Builder::new() .write(b"POST / HTTP/1.1\r\ncontent-length: 4\r\n\r\n") .read(b"HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\n") .wait(std::time::Duration::from_secs(2)) .build_with_handle(); let (mut tx, rx) = dispatch::channel(); let mut conn = Conn::<_, bytes::Bytes, http1::role::Client>::new(io); conn.set_write_strategy_queue(); let dispatcher = Dispatcher::new(Client::new(rx), conn); let _dispatcher = tokio::spawn(dispatcher); let body = { let (mut tx, body) = Incoming::h1(DecodedLength::new(4), false); std::future::poll_fn(|cx| tx.poll_ready(cx)) .await .expect("ready"); tx.send_data("reee".into()).unwrap(); body }; let req = http::Request::builder().method("POST").body(body).unwrap(); let res = tx.try_send(req).unwrap().await.expect("response"); drop(res); assert!(!tx.is_ready()); } #[tokio::test] async fn body_empty_chunks_ignored() { let _ = pretty_env_logger::try_init(); let io = tokio_test::io::Builder::new() // no reading or writing, just be blocked for the test... .wait(Duration::from_secs(5)) .build(); let (mut tx, rx) = dispatch::channel(); let conn = Conn::<_, bytes::Bytes, http1::role::Client>::new(io); let mut dispatcher = tokio_test::task::spawn(Dispatcher::new(Client::new(rx), conn)); // First poll is needed to allow tx to send... assert!(dispatcher.poll().is_pending()); let body = { let (mut tx, body) = Incoming::channel(); std::future::poll_fn(|cx| tx.poll_ready(cx)) .await .expect("ready"); tx.send_data("".into()).unwrap(); body }; let _res_rx = tx.try_send(http::Request::new(body)).unwrap(); // Ensure conn.write_body wasn't called with the empty chunk. // If it is, it will trigger an assertion. assert!(dispatcher.poll().is_pending()); } } ================================================ FILE: src/client/core/proto/http1/encode.rs ================================================ use std::{collections::HashSet, fmt, io::IoSlice}; use bytes::{ Buf, Bytes, buf::{Chain, Take}, }; use http::{ HeaderMap, HeaderName, header::{ AUTHORIZATION, CACHE_CONTROL, CONTENT_ENCODING, CONTENT_LENGTH, CONTENT_RANGE, CONTENT_TYPE, HOST, MAX_FORWARDS, SET_COOKIE, TE, TRAILER, TRANSFER_ENCODING, }, }; use super::{io::WriteBuf, role::write_headers}; type StaticBuf = &'static [u8]; /// Encoders to handle different Transfer-Encodings. #[derive(Debug, Clone, PartialEq)] pub(crate) struct Encoder { kind: Kind, is_last: bool, } #[derive(Debug)] pub(crate) struct EncodedBuf { kind: BufKind, } #[derive(Debug)] pub(crate) struct NotEof(u64); #[derive(Debug, PartialEq, Clone)] enum Kind { /// An Encoder for when Transfer-Encoding includes `chunked`. Chunked(Option>), /// An Encoder for when Content-Length is set. /// /// Enforces that the body is not longer than the Content-Length header. Length(u64), } #[derive(Debug)] enum BufKind { Exact(B), Limited(Take), Chunked(Chain, StaticBuf>), ChunkedEnd(StaticBuf), Trailers(Chain, StaticBuf>), } impl Encoder { #[inline] fn new(kind: Kind) -> Encoder { Encoder { kind, is_last: false, } } #[inline] pub(crate) fn chunked() -> Encoder { Encoder::new(Kind::Chunked(None)) } #[inline] pub(crate) fn length(len: u64) -> Encoder { Encoder::new(Kind::Length(len)) } #[inline] pub(crate) fn into_chunked_with_trailing_fields(self, trailers: Vec) -> Encoder { match self.kind { Kind::Chunked(_) => Encoder { kind: Kind::Chunked(Some(trailers)), is_last: self.is_last, }, _ => self, } } #[inline] pub(crate) fn is_eof(&self) -> bool { matches!(self.kind, Kind::Length(0)) } #[inline] pub(crate) fn is_last(&self) -> bool { self.is_last } #[inline] pub(crate) fn is_close_delimited(&self) -> bool { false } #[inline] pub(crate) fn is_chunked(&self) -> bool { matches!(self.kind, Kind::Chunked(_)) } pub(crate) fn end(&self) -> Result>, NotEof> { match self.kind { Kind::Length(0) => Ok(None), Kind::Chunked(_) => Ok(Some(EncodedBuf { kind: BufKind::ChunkedEnd(b"0\r\n\r\n"), })), Kind::Length(n) => Err(NotEof(n)), } } pub(crate) fn encode(&mut self, msg: B) -> EncodedBuf where B: Buf, { let len = msg.remaining(); debug_assert!(len > 0, "encode() called with empty buf"); let kind = match self.kind { Kind::Chunked(_) => { trace!("encoding chunked {}B", len); let buf = ChunkSize::new(len) .chain(msg) .chain(b"\r\n" as &'static [u8]); BufKind::Chunked(buf) } Kind::Length(ref mut remaining) => { trace!("sized write, len = {}", len); if len as u64 > *remaining { let limit = *remaining as usize; *remaining = 0; BufKind::Limited(msg.take(limit)) } else { *remaining -= len as u64; BufKind::Exact(msg) } } }; EncodedBuf { kind } } pub(crate) fn encode_trailers(&self, trailers: HeaderMap) -> Option> { trace!("encoding trailers"); match &self.kind { Kind::Chunked(Some(allowed_trailer_fields)) => { let allowed_set: HashSet<&HeaderName> = allowed_trailer_fields.iter().collect(); let mut cur_name = None; let mut allowed_trailers = HeaderMap::new(); for (opt_name, value) in trailers { if let Some(n) = opt_name { cur_name = Some(n); } let name = cur_name.as_ref().expect("current header name"); if allowed_set.contains(name) { if is_valid_trailer_field(name) { allowed_trailers.insert(name, value); } else { debug!("trailer field is not valid: {}", &name); } } else { debug!("trailer header name not found in trailer header: {}", &name); } } let mut buf = Vec::new(); write_headers(&allowed_trailers, &mut buf); if buf.is_empty() { return None; } Some(EncodedBuf { kind: BufKind::Trailers(b"0\r\n".chain(Bytes::from(buf)).chain(b"\r\n")), }) } Kind::Chunked(None) => { debug!("attempted to encode trailers, but the trailer header is not set"); None } _ => { debug!("attempted to encode trailers for non-chunked response"); None } } } pub(super) fn encode_and_end(&self, msg: B, dst: &mut WriteBuf>) -> bool where B: Buf, { let len = msg.remaining(); debug_assert!(len > 0, "encode() called with empty buf"); match self.kind { Kind::Chunked(_) => { trace!("encoding chunked {}B", len); let buf = ChunkSize::new(len) .chain(msg) .chain(b"\r\n0\r\n\r\n" as &'static [u8]); dst.buffer(buf); !self.is_last } Kind::Length(remaining) => { use std::cmp::Ordering; trace!("sized write, len = {}", len); match (len as u64).cmp(&remaining) { Ordering::Equal => { dst.buffer(msg); !self.is_last } Ordering::Greater => { dst.buffer(msg.take(remaining as usize)); !self.is_last } Ordering::Less => { dst.buffer(msg); false } } } } } } fn is_valid_trailer_field(name: &HeaderName) -> bool { !matches!( *name, AUTHORIZATION | CACHE_CONTROL | CONTENT_ENCODING | CONTENT_LENGTH | CONTENT_RANGE | CONTENT_TYPE | HOST | MAX_FORWARDS | SET_COOKIE | TRAILER | TRANSFER_ENCODING | TE ) } impl Buf for EncodedBuf where B: Buf, { #[inline] fn remaining(&self) -> usize { match self.kind { BufKind::Exact(ref b) => b.remaining(), BufKind::Limited(ref b) => b.remaining(), BufKind::Chunked(ref b) => b.remaining(), BufKind::ChunkedEnd(ref b) => b.remaining(), BufKind::Trailers(ref b) => b.remaining(), } } #[inline] fn chunk(&self) -> &[u8] { match self.kind { BufKind::Exact(ref b) => b.chunk(), BufKind::Limited(ref b) => b.chunk(), BufKind::Chunked(ref b) => b.chunk(), BufKind::ChunkedEnd(ref b) => b.chunk(), BufKind::Trailers(ref b) => b.chunk(), } } #[inline] fn advance(&mut self, cnt: usize) { match self.kind { BufKind::Exact(ref mut b) => b.advance(cnt), BufKind::Limited(ref mut b) => b.advance(cnt), BufKind::Chunked(ref mut b) => b.advance(cnt), BufKind::ChunkedEnd(ref mut b) => b.advance(cnt), BufKind::Trailers(ref mut b) => b.advance(cnt), } } #[inline] fn chunks_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize { match self.kind { BufKind::Exact(ref b) => b.chunks_vectored(dst), BufKind::Limited(ref b) => b.chunks_vectored(dst), BufKind::Chunked(ref b) => b.chunks_vectored(dst), BufKind::ChunkedEnd(ref b) => b.chunks_vectored(dst), BufKind::Trailers(ref b) => b.chunks_vectored(dst), } } } #[cfg(target_pointer_width = "32")] const USIZE_BYTES: usize = 4; #[cfg(target_pointer_width = "64")] const USIZE_BYTES: usize = 8; // each byte will become 2 hex const CHUNK_SIZE_MAX_BYTES: usize = USIZE_BYTES * 2; #[derive(Clone, Copy)] struct ChunkSize { bytes: [u8; CHUNK_SIZE_MAX_BYTES + 2], pos: u8, len: u8, } impl ChunkSize { fn new(len: usize) -> ChunkSize { use std::fmt::Write; let mut size = ChunkSize { bytes: [0; CHUNK_SIZE_MAX_BYTES + 2], pos: 0, len: 0, }; write!(&mut size, "{len:X}\r\n").expect("CHUNK_SIZE_MAX_BYTES should fit any usize"); size } } impl Buf for ChunkSize { #[inline] fn remaining(&self) -> usize { (self.len - self.pos).into() } #[inline] fn chunk(&self) -> &[u8] { &self.bytes[self.pos.into()..self.len.into()] } #[inline] fn advance(&mut self, cnt: usize) { assert!(cnt <= self.remaining()); // just asserted cnt fits in u8 self.pos += cnt as u8; } } impl fmt::Debug for ChunkSize { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ChunkSize") .field("bytes", &&self.bytes[..self.len.into()]) .field("pos", &self.pos) .finish() } } impl fmt::Write for ChunkSize { fn write_str(&mut self, num: &str) -> fmt::Result { use std::io::Write; (&mut self.bytes[self.len.into()..]) .write_all(num.as_bytes()) .expect("&mut [u8].write() cannot error"); self.len += num.len() as u8; // safe because bytes is never bigger than 256 Ok(()) } } impl From for EncodedBuf { fn from(buf: B) -> Self { EncodedBuf { kind: BufKind::Exact(buf), } } } impl From> for EncodedBuf { fn from(buf: Take) -> Self { EncodedBuf { kind: BufKind::Limited(buf), } } } impl From, StaticBuf>> for EncodedBuf { fn from(buf: Chain, StaticBuf>) -> Self { EncodedBuf { kind: BufKind::Chunked(buf), } } } impl fmt::Display for NotEof { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "early end, expected {} more bytes", self.0) } } impl std::error::Error for NotEof {} #[cfg(test)] mod tests { use bytes::BufMut; use http::{ HeaderMap, HeaderName, HeaderValue, header::{ AUTHORIZATION, CACHE_CONTROL, CONTENT_ENCODING, CONTENT_LENGTH, CONTENT_RANGE, CONTENT_TYPE, HOST, MAX_FORWARDS, SET_COOKIE, TE, TRAILER, TRANSFER_ENCODING, }, }; use super::{super::io::Cursor, Encoder}; #[test] fn chunked() { let mut encoder = Encoder::chunked(); let mut dst = Vec::new(); let msg1 = b"foo bar".as_ref(); let buf1 = encoder.encode(msg1); dst.put(buf1); assert_eq!(dst, b"7\r\nfoo bar\r\n"); let msg2 = b"baz quux herp".as_ref(); let buf2 = encoder.encode(msg2); dst.put(buf2); assert_eq!(dst, b"7\r\nfoo bar\r\nD\r\nbaz quux herp\r\n"); let end = encoder.end::>>().unwrap().unwrap(); dst.put(end); assert_eq!( dst, b"7\r\nfoo bar\r\nD\r\nbaz quux herp\r\n0\r\n\r\n".as_ref() ); } #[test] fn length() { let max_len = 8; let mut encoder = Encoder::length(max_len as u64); let mut dst = Vec::new(); let msg1 = b"foo bar".as_ref(); let buf1 = encoder.encode(msg1); dst.put(buf1); assert_eq!(dst, b"foo bar"); assert!(!encoder.is_eof()); encoder.end::<()>().unwrap_err(); let msg2 = b"baz".as_ref(); let buf2 = encoder.encode(msg2); dst.put(buf2); assert_eq!(dst.len(), max_len); assert_eq!(dst, b"foo barb"); assert!(encoder.is_eof()); assert!(encoder.end::<()>().unwrap().is_none()); } #[test] fn chunked_with_valid_trailers() { let encoder = Encoder::chunked(); let trailers = vec![HeaderName::from_static("chunky-trailer")]; let encoder = encoder.into_chunked_with_trailing_fields(trailers); let headers = HeaderMap::from_iter(vec![ ( HeaderName::from_static("chunky-trailer"), HeaderValue::from_static("header data"), ), ( HeaderName::from_static("should-not-be-included"), HeaderValue::from_static("oops"), ), ]); let buf1 = encoder.encode_trailers::<&[u8]>(headers).unwrap(); let mut dst = Vec::new(); dst.put(buf1); assert_eq!(dst, b"0\r\nchunky-trailer: header data\r\n\r\n"); } #[test] fn chunked_with_multiple_trailer_headers() { let encoder = Encoder::chunked(); let trailers = vec![ HeaderName::from_static("chunky-trailer"), HeaderName::from_static("chunky-trailer-2"), ]; let encoder = encoder.into_chunked_with_trailing_fields(trailers); let headers = HeaderMap::from_iter(vec![ ( HeaderName::from_static("chunky-trailer"), HeaderValue::from_static("header data"), ), ( HeaderName::from_static("chunky-trailer-2"), HeaderValue::from_static("more header data"), ), ]); let buf1 = encoder.encode_trailers::<&[u8]>(headers).unwrap(); let mut dst = Vec::new(); dst.put(buf1); assert_eq!( dst, b"0\r\nchunky-trailer: header data\r\nchunky-trailer-2: more header data\r\n\r\n" ); } #[test] fn chunked_with_no_trailer_header() { let encoder = Encoder::chunked(); let headers = HeaderMap::from_iter(vec![( HeaderName::from_static("chunky-trailer"), HeaderValue::from_static("header data"), )]); assert!(encoder.encode_trailers::<&[u8]>(headers.clone()).is_none()); let trailers = vec![]; let encoder = encoder.into_chunked_with_trailing_fields(trailers); assert!(encoder.encode_trailers::<&[u8]>(headers).is_none()); } #[test] fn chunked_with_invalid_trailers() { let encoder = Encoder::chunked(); let trailers = vec![ AUTHORIZATION, CACHE_CONTROL, CONTENT_ENCODING, TRAILER, TRANSFER_ENCODING, TE, ]; let encoder = encoder.into_chunked_with_trailing_fields(trailers); let mut headers = HeaderMap::new(); headers.insert(AUTHORIZATION, HeaderValue::from_static("header data")); headers.insert(CACHE_CONTROL, HeaderValue::from_static("header data")); headers.insert(CONTENT_ENCODING, HeaderValue::from_static("header data")); headers.insert(CONTENT_LENGTH, HeaderValue::from_static("header data")); headers.insert(CONTENT_RANGE, HeaderValue::from_static("header data")); headers.insert(CONTENT_TYPE, HeaderValue::from_static("header data")); headers.insert(HOST, HeaderValue::from_static("header data")); headers.insert(MAX_FORWARDS, HeaderValue::from_static("header data")); headers.insert(SET_COOKIE, HeaderValue::from_static("header data")); headers.insert(TRAILER, HeaderValue::from_static("header data")); headers.insert(TRANSFER_ENCODING, HeaderValue::from_static("header data")); headers.insert(TE, HeaderValue::from_static("header data")); assert!(encoder.encode_trailers::<&[u8]>(headers).is_none()); } #[test] fn chunked_trailers_case_insensitive_matching() { // Regression test for issue #4010: HTTP/1.1 trailers are case-sensitive // // Previously, the Trailer header values were stored as HeaderValue (preserving case) // and compared against HeaderName (which is always lowercase). This caused trailers // declared as "Chunky-Trailer" to not match actual trailers sent as "chunky-trailer". // // The fix converts Trailer header values to HeaderName during parsing, which // normalizes the case and enables proper case-insensitive matching. // // Note: HeaderName::from_static() requires lowercase input. In real usage, // HeaderName::from_bytes() is used to parse the Trailer header value, which // normalizes mixed-case input like "Chunky-Trailer" to "chunky-trailer". let encoder = Encoder::chunked(); let trailers = vec![HeaderName::from_static("chunky-trailer")]; let encoder = encoder.into_chunked_with_trailing_fields(trailers); // The actual trailer being sent let headers = HeaderMap::from_iter(vec![( HeaderName::from_static("chunky-trailer"), HeaderValue::from_static("trailer value"), )]); let buf = encoder.encode_trailers::<&[u8]>(headers).unwrap(); let mut dst = Vec::new(); dst.put(buf); assert_eq!(dst, b"0\r\nchunky-trailer: trailer value\r\n\r\n"); } } ================================================ FILE: src/client/core/proto/http1/ext.rs ================================================ //! HTTP extensions. use bytes::Bytes; /// A reason phrase in an HTTP/1 response. /// /// # Clients /// /// For clients, a `ReasonPhrase` will be present in the extensions of the `http::Response` returned /// for a request if the reason phrase is different from the canonical reason phrase for the /// response's status code. For example, if a server returns `HTTP/1.1 200 Awesome`, the /// `ReasonPhrase` will be present and contain `Awesome`, but if a server returns `HTTP/1.1 200 OK`, /// the response will not contain a `ReasonPhrase`. #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct ReasonPhrase(Bytes); impl ReasonPhrase { // Not public on purpose. /// Converts a `Bytes` directly into a `ReasonPhrase` without validating. /// /// Use with care; invalid bytes in a reason phrase can cause serious security problems if /// emitted in a response. #[inline] pub(crate) fn from_bytes_unchecked(reason: Bytes) -> Self { Self(reason) } } impl AsRef<[u8]> for ReasonPhrase { /// Gets the reason phrase as bytes. #[inline] fn as_ref(&self) -> &[u8] { &self.0 } } ================================================ FILE: src/client/core/proto/http1/io.rs ================================================ use std::{ cmp, fmt::{self, Debug}, io::{self, IoSlice}, pin::Pin, task::{Context, Poll, ready}, }; use bytes::{Buf, Bytes, BytesMut}; use tokio::io::{AsyncRead, AsyncWrite}; use super::{Http1Transaction, ParseContext, ParsedMessage, buf::BufList}; use crate::client::core::{Error, Result}; /// The initial buffer size allocated before trying to read from IO. pub(crate) const INIT_BUFFER_SIZE: usize = 8192; /// The minimum value that can be set to max buffer size. pub(crate) const MINIMUM_MAX_BUFFER_SIZE: usize = INIT_BUFFER_SIZE; /// The default maximum read buffer size. If the buffer gets this big and /// a message is still not complete, a `TooLarge` error is triggered. // Note: if this changes, update server::conn::Http::max_buf_size docs. pub(crate) const DEFAULT_MAX_BUFFER_SIZE: usize = 8192 + 4096 * 100; /// The maximum number of distinct `Buf`s to hold in a list before requiring /// a flush. Only affects when the buffer strategy is to queue buffers. /// /// Note that a flush can happen before reaching the maximum. This simply /// forces a flush if the queue gets this big. const MAX_BUF_LIST_BUFFERS: usize = 16; pub(crate) struct Buffered { flush_pipeline: bool, io: T, partial_len: Option, read_blocked: bool, read_buf: BytesMut, read_buf_strategy: ReadStrategy, write_buf: WriteBuf, } impl fmt::Debug for Buffered where B: Buf, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Buffered") .field("read_buf", &self.read_buf) .field("write_buf", &self.write_buf) .finish() } } impl Buffered where T: AsyncRead + AsyncWrite + Unpin, B: Buf, { pub(crate) fn new(io: T) -> Buffered { let strategy = if io.is_write_vectored() { WriteStrategy::Queue } else { WriteStrategy::Flatten }; let write_buf = WriteBuf::new(strategy); Buffered { flush_pipeline: false, io, partial_len: None, read_blocked: false, read_buf: BytesMut::with_capacity(0), read_buf_strategy: ReadStrategy::default(), write_buf, } } #[inline] pub(crate) fn set_max_buf_size(&mut self, max: usize) { assert!( max >= MINIMUM_MAX_BUFFER_SIZE, "The max_buf_size cannot be smaller than {MINIMUM_MAX_BUFFER_SIZE}.", ); self.read_buf_strategy = ReadStrategy::with_max(max); self.write_buf.max_buf_size = max; } #[inline] pub(crate) fn set_read_buf_exact_size(&mut self, sz: usize) { self.read_buf_strategy = ReadStrategy::Exact(sz); } #[inline] pub(crate) fn set_write_strategy_flatten(&mut self) { // this should always be called only at construction time, // so this assert is here to catch myself debug_assert!(self.write_buf.queue.bufs_cnt() == 0); self.write_buf.set_strategy(WriteStrategy::Flatten); } #[inline] pub(crate) fn set_write_strategy_queue(&mut self) { // this should always be called only at construction time, // so this assert is here to catch myself debug_assert!(self.write_buf.queue.bufs_cnt() == 0); self.write_buf.set_strategy(WriteStrategy::Queue); } #[inline] pub(crate) fn read_buf(&self) -> &[u8] { self.read_buf.as_ref() } /// Return the "allocated" available space, not the potential space /// that could be allocated in the future. #[inline] fn read_buf_remaining_mut(&self) -> usize { self.read_buf.capacity() - self.read_buf.len() } /// Return whether we can append to the headers buffer. /// /// Reasons we can't: /// - The write buf is in queue mode, and some of the past body is still needing to be flushed. #[inline] pub(crate) fn can_headers_buf(&self) -> bool { !self.write_buf.queue.has_remaining() } #[inline] pub(crate) fn headers_buf(&mut self) -> &mut Vec { let buf = self.write_buf.headers_mut(); &mut buf.bytes } #[inline] pub(super) fn write_buf(&mut self) -> &mut WriteBuf { &mut self.write_buf } #[inline] pub(crate) fn buffer>(&mut self, buf: BB) { self.write_buf.buffer(buf) } #[inline] pub(crate) fn can_buffer(&self) -> bool { self.flush_pipeline || self.write_buf.can_buffer() } pub(crate) fn consume_leading_lines(&mut self) { if !self.read_buf.is_empty() { let mut i = 0; while i < self.read_buf.len() { match self.read_buf[i] { b'\r' | b'\n' => i += 1, _ => break, } } self.read_buf.advance(i); } } pub(super) fn parse( &mut self, cx: &mut Context<'_>, parse_ctx: ParseContext<'_>, ) -> Poll>> where S: Http1Transaction, { loop { match super::role::parse_headers::( &mut self.read_buf, self.partial_len, ParseContext { cached_headers: parse_ctx.cached_headers, req_method: parse_ctx.req_method, h1_parser_config: parse_ctx.h1_parser_config, h1_max_headers: parse_ctx.h1_max_headers, h09_responses: parse_ctx.h09_responses, }, )? { Some(msg) => { debug!("parsed {} headers", msg.head.headers.len()); self.partial_len = None; return Poll::Ready(Ok(msg)); } None => { let max = self.read_buf_strategy.max(); let curr_len = self.read_buf.len(); if curr_len >= max { debug!("max_buf_size ({}) reached, closing", max); return Poll::Ready(Err(Error::new_too_large())); } if curr_len > 0 { trace!("partial headers; {} bytes so far", curr_len); self.partial_len = Some(curr_len); } else { // 1xx gobled some bytes self.partial_len = None; } } } if ready!(self.poll_read_from_io(cx)).map_err(Error::new_io)? == 0 { trace!("parse eof"); return Poll::Ready(Err(Error::new_incomplete())); } } } pub(crate) fn poll_read_from_io(&mut self, cx: &mut Context<'_>) -> Poll> { self.read_blocked = false; let next = self.read_buf_strategy.next(); if self.read_buf_remaining_mut() < next { self.read_buf.reserve(next); } match tokio_util::io::poll_read_buf(Pin::new(&mut self.io), cx, &mut self.read_buf) { Poll::Ready(Ok(n)) => { trace!("received {} bytes", n); self.read_buf_strategy.record(n); Poll::Ready(Ok(n)) } Poll::Pending => { self.read_blocked = true; Poll::Pending } Poll::Ready(Err(e)) => Poll::Ready(Err(e)), } } #[inline] pub(crate) fn into_inner(self) -> (T, Bytes) { (self.io, self.read_buf.freeze()) } #[inline] pub(crate) fn io_mut(&mut self) -> &mut T { &mut self.io } #[inline] pub(crate) fn is_read_blocked(&self) -> bool { self.read_blocked } pub(crate) fn poll_flush(&mut self, cx: &mut Context<'_>) -> Poll> { if self.flush_pipeline && !self.read_buf.is_empty() { Poll::Ready(Ok(())) } else if self.write_buf.remaining() == 0 { Pin::new(&mut self.io).poll_flush(cx) } else { if let WriteStrategy::Flatten = self.write_buf.strategy { return self.poll_flush_flattened(cx); } loop { // Let Tokio pick the write path. // With `tokio-btls` this currently falls back to plain writes; // if we later support vectored TLS writes like `tokio-rustls`, // `poll_write_buf` will pick that up automatically. let n = ready!(tokio_util::io::poll_write_buf( Pin::new(&mut self.io), cx, &mut self.write_buf, )?); debug!("flushed {} bytes", n); if self.write_buf.remaining() == 0 { break; } else if n == 0 { trace!( "write returned zero, but {} bytes remaining", self.write_buf.remaining() ); return Poll::Ready(Err(io::ErrorKind::WriteZero.into())); } } Pin::new(&mut self.io).poll_flush(cx) } } /// Specialized version of `flush` when strategy is Flatten. /// /// Since all buffered bytes are flattened into the single headers buffer, /// that skips some bookkeeping around using multiple buffers. fn poll_flush_flattened(&mut self, cx: &mut Context<'_>) -> Poll> { loop { let n = ready!(Pin::new(&mut self.io).poll_write(cx, self.write_buf.headers.chunk()))?; debug!("flushed {} bytes", n); self.write_buf.headers.advance(n); if self.write_buf.headers.remaining() == 0 { self.write_buf.headers.reset(); break; } else if n == 0 { trace!( "write returned zero, but {} bytes remaining", self.write_buf.remaining() ); return Poll::Ready(Err(io::ErrorKind::WriteZero.into())); } } Pin::new(&mut self.io).poll_flush(cx) } } // The `B` is a `Buf`, we never project a pin to it impl Unpin for Buffered {} // TODO: This trait is old... at least rename to PollBytes or something... pub(crate) trait MemRead { fn read_mem(&mut self, cx: &mut Context<'_>, len: usize) -> Poll>; } impl MemRead for Buffered where T: AsyncRead + AsyncWrite + Unpin, B: Buf, { fn read_mem(&mut self, cx: &mut Context<'_>, len: usize) -> Poll> { if !self.read_buf.is_empty() { let n = std::cmp::min(len, self.read_buf.len()); Poll::Ready(Ok(self.read_buf.split_to(n).freeze())) } else { let n = ready!(self.poll_read_from_io(cx))?; Poll::Ready(Ok(self.read_buf.split_to(::std::cmp::min(len, n)).freeze())) } } } #[derive(Clone, Copy, Debug)] enum ReadStrategy { Adaptive { decrease_now: bool, next: usize, max: usize, }, Exact(usize), } impl ReadStrategy { fn with_max(max: usize) -> ReadStrategy { ReadStrategy::Adaptive { decrease_now: false, next: INIT_BUFFER_SIZE, max, } } fn next(&self) -> usize { match *self { ReadStrategy::Adaptive { next, .. } => next, ReadStrategy::Exact(exact) => exact, } } fn max(&self) -> usize { match *self { ReadStrategy::Adaptive { max, .. } => max, ReadStrategy::Exact(exact) => exact, } } fn record(&mut self, bytes_read: usize) { match *self { ReadStrategy::Adaptive { ref mut decrease_now, ref mut next, max, .. } => { if bytes_read >= *next { *next = cmp::min(incr_power_of_two(*next), max); *decrease_now = false; } else { let decr_to = prev_power_of_two(*next); if bytes_read < decr_to { if *decrease_now { *next = cmp::max(decr_to, INIT_BUFFER_SIZE); *decrease_now = false; } else { // Decreasing is a two "record" process. *decrease_now = true; } } else { // A read within the current range should cancel // a potential decrease, since we just saw proof // that we still need this size. *decrease_now = false; } } } ReadStrategy::Exact(_) => (), } } } fn incr_power_of_two(n: usize) -> usize { n.saturating_mul(2) } fn prev_power_of_two(n: usize) -> usize { // Only way this shift can underflow is if n is less than 4. // (Which would means `usize::MAX >> 64` and underflowed!) debug_assert!(n >= 4); (usize::MAX >> (n.leading_zeros() + 2)) + 1 } impl Default for ReadStrategy { fn default() -> ReadStrategy { ReadStrategy::with_max(DEFAULT_MAX_BUFFER_SIZE) } } #[derive(Clone)] pub(crate) struct Cursor { bytes: T, pos: usize, } impl> Cursor { #[inline] pub(crate) fn new(bytes: T) -> Cursor { Cursor { bytes, pos: 0 } } } impl Cursor> { /// If we've advanced the position a bit in this cursor, and wish to /// extend the underlying vector, we may wish to unshift the "read" bytes /// off, and move everything else over. fn maybe_unshift(&mut self, additional: usize) { if self.pos == 0 { // nothing to do return; } if self.bytes.capacity() - self.bytes.len() >= additional { // there's room! return; } self.bytes.drain(0..self.pos); self.pos = 0; } fn reset(&mut self) { self.pos = 0; self.bytes.clear(); } } impl> fmt::Debug for Cursor { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Cursor") .field("pos", &self.pos) .field("len", &self.bytes.as_ref().len()) .finish() } } impl> Buf for Cursor { #[inline] fn remaining(&self) -> usize { self.bytes.as_ref().len() - self.pos } #[inline] fn chunk(&self) -> &[u8] { &self.bytes.as_ref()[self.pos..] } #[inline] fn advance(&mut self, cnt: usize) { debug_assert!(self.pos + cnt <= self.bytes.as_ref().len()); self.pos += cnt; } } // an internal buffer to collect writes before flushes pub(super) struct WriteBuf { /// Re-usable buffer that holds message headers headers: Cursor>, max_buf_size: usize, /// Deque of user buffers if strategy is Queue queue: BufList, strategy: WriteStrategy, } impl WriteBuf { fn new(strategy: WriteStrategy) -> WriteBuf { WriteBuf { headers: Cursor::new(Vec::with_capacity(INIT_BUFFER_SIZE)), max_buf_size: DEFAULT_MAX_BUFFER_SIZE, queue: BufList::new(), strategy, } } } impl WriteBuf where B: Buf, { #[inline] fn set_strategy(&mut self, strategy: WriteStrategy) { self.strategy = strategy; } pub(super) fn buffer>(&mut self, mut buf: BB) { debug_assert!(buf.has_remaining()); match self.strategy { WriteStrategy::Flatten => { let head = self.headers_mut(); head.maybe_unshift(buf.remaining()); trace!( self.len = head.remaining(), buf.len = buf.remaining(), "buffer.flatten" ); //perf: This is a little faster than >::put, //but accomplishes the same result. loop { let adv = { let slice = buf.chunk(); if slice.is_empty() { return; } head.bytes.extend_from_slice(slice); slice.len() }; buf.advance(adv); } } WriteStrategy::Queue => { trace!( self.len = self.remaining(), buf.len = buf.remaining(), "buffer.queue" ); self.queue.push(buf.into()); } } } #[inline] fn can_buffer(&self) -> bool { match self.strategy { WriteStrategy::Flatten => self.remaining() < self.max_buf_size, WriteStrategy::Queue => { self.queue.bufs_cnt() < MAX_BUF_LIST_BUFFERS && self.remaining() < self.max_buf_size } } } #[inline] fn headers_mut(&mut self) -> &mut Cursor> { debug_assert!(!self.queue.has_remaining()); &mut self.headers } } impl fmt::Debug for WriteBuf { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("WriteBuf") .field("remaining", &self.remaining()) .field("strategy", &self.strategy) .finish() } } impl Buf for WriteBuf { #[inline] fn remaining(&self) -> usize { self.headers.remaining() + self.queue.remaining() } #[inline] fn chunk(&self) -> &[u8] { let headers = self.headers.chunk(); if !headers.is_empty() { headers } else { self.queue.chunk() } } #[inline] fn advance(&mut self, cnt: usize) { let hrem = self.headers.remaining(); match hrem.cmp(&cnt) { cmp::Ordering::Equal => self.headers.reset(), cmp::Ordering::Greater => self.headers.advance(cnt), cmp::Ordering::Less => { let qcnt = cnt - hrem; self.headers.reset(); self.queue.advance(qcnt); } } } #[inline] fn chunks_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize { let n = self.headers.chunks_vectored(dst); self.queue.chunks_vectored(&mut dst[n..]) + n } } #[derive(Debug)] enum WriteStrategy { Flatten, Queue, } #[cfg(test)] mod tests { use std::time::Duration; use tokio_test::io::Builder as Mock; use super::*; impl Buffered where T: AsyncRead + AsyncWrite + Unpin, B: Buf, { fn flush(&mut self) -> impl std::future::Future> + '_ { std::future::poll_fn(move |cx| self.poll_flush(cx)) } } #[tokio::test] async fn parse_reads_until_blocked() { use crate::client::core::proto::http1; let _ = pretty_env_logger::try_init(); let mock = Mock::new() // Split over multiple reads will read all of it .read(b"HTTP/1.1 200 OK\r\n") .read(b"Server: crate::core:\r\n") // missing last line ending .wait(Duration::from_secs(1)) .build(); let mut buffered = Buffered::<_, Cursor>>::new(mock); // We expect a `parse` to be not ready, and so can't await it directly. // Rather, this `poll_fn` will wrap the `Poll` result. std::future::poll_fn(|cx| { let parse_ctx = ParseContext { cached_headers: &mut None, req_method: &mut None, h1_parser_config: &Default::default(), h1_max_headers: None, h09_responses: false, }; assert!( buffered .parse::(cx, parse_ctx) .is_pending() ); Poll::Ready(()) }) .await; assert_eq!( buffered.read_buf, b"HTTP/1.1 200 OK\r\nServer: crate::core:\r\n"[..] ); } #[test] fn read_strategy_adaptive_increments() { let mut strategy = ReadStrategy::default(); assert_eq!(strategy.next(), 8192); // Grows if record == next strategy.record(8192); assert_eq!(strategy.next(), 16384); strategy.record(16384); assert_eq!(strategy.next(), 32768); // Enormous records still increment at same rate strategy.record(usize::MAX); assert_eq!(strategy.next(), 65536); let max = strategy.max(); while strategy.next() < max { strategy.record(max); } assert_eq!(strategy.next(), max, "never goes over max"); strategy.record(max + 1); assert_eq!(strategy.next(), max, "never goes over max"); } #[test] fn read_strategy_adaptive_decrements() { let mut strategy = ReadStrategy::default(); strategy.record(8192); assert_eq!(strategy.next(), 16384); strategy.record(1); assert_eq!( strategy.next(), 16384, "first smaller record doesn't decrement yet" ); strategy.record(8192); assert_eq!(strategy.next(), 16384, "record was with range"); strategy.record(1); assert_eq!( strategy.next(), 16384, "in-range record should make this the 'first' again" ); strategy.record(1); assert_eq!(strategy.next(), 8192, "second smaller record decrements"); strategy.record(1); assert_eq!(strategy.next(), 8192, "first doesn't decrement"); strategy.record(1); assert_eq!(strategy.next(), 8192, "doesn't decrement under minimum"); } #[test] fn read_strategy_adaptive_stays_the_same() { let mut strategy = ReadStrategy::default(); strategy.record(8192); assert_eq!(strategy.next(), 16384); strategy.record(8193); assert_eq!( strategy.next(), 16384, "first smaller record doesn't decrement yet" ); strategy.record(8193); assert_eq!( strategy.next(), 16384, "with current step does not decrement" ); } #[test] fn read_strategy_adaptive_max_fuzz() { fn fuzz(max: usize) { let mut strategy = ReadStrategy::with_max(max); while strategy.next() < max { strategy.record(usize::MAX); } let mut next = strategy.next(); while next > 8192 { strategy.record(1); strategy.record(1); next = strategy.next(); assert!( next.is_power_of_two(), "decrement should be powers of two: {next} (max = {max})", ); } } let mut max = 8192; while max < usize::MAX { fuzz(max); max = (max / 2).saturating_mul(3); } fuzz(usize::MAX); } #[test] #[should_panic] #[cfg(debug_assertions)] // needs to trigger a debug_assert fn write_buf_requires_non_empty_bufs() { let mock = Mock::new().build(); let mut buffered = Buffered::<_, Cursor>>::new(mock); buffered.buffer(Cursor::new(Vec::new())); } #[tokio::test] async fn write_buf_flatten() { let _ = pretty_env_logger::try_init(); let mock = Mock::new() .write(b"hello world, it's crate::core:!") .build(); let mut buffered = Buffered::<_, Cursor>>::new(mock); buffered.write_buf.set_strategy(WriteStrategy::Flatten); buffered.headers_buf().extend(b"hello "); buffered.buffer(Cursor::new(b"world, ".to_vec())); buffered.buffer(Cursor::new(b"it's ".to_vec())); buffered.buffer(Cursor::new(b"crate::core:!".to_vec())); assert_eq!(buffered.write_buf.queue.bufs_cnt(), 0); buffered.flush().await.expect("flush"); } #[test] fn write_buf_flatten_partially_flushed() { let _ = pretty_env_logger::try_init(); let b = |s: &str| Cursor::new(s.as_bytes().to_vec()); let mut write_buf = WriteBuf::>>::new(WriteStrategy::Flatten); write_buf.buffer(b("hello ")); write_buf.buffer(b("world, ")); assert_eq!(write_buf.chunk(), b"hello world, "); // advance most of the way, but not all write_buf.advance(11); assert_eq!(write_buf.chunk(), b", "); assert_eq!(write_buf.headers.pos, 11); assert_eq!(write_buf.headers.bytes.capacity(), INIT_BUFFER_SIZE); // there's still room in the headers buffer, so just push on the end write_buf.buffer(b("it's crate::core:!")); assert_eq!(write_buf.chunk(), b", it's crate::core:!"); assert_eq!(write_buf.headers.pos, 11); let rem1 = write_buf.remaining(); let cap = write_buf.headers.bytes.capacity(); // but when this would go over capacity, don't copy the old bytes write_buf.buffer(Cursor::new(vec![b'X'; cap])); assert_eq!(write_buf.remaining(), cap + rem1); assert_eq!(write_buf.headers.pos, 0); } #[tokio::test] async fn write_buf_queue_disable_auto() { let _ = pretty_env_logger::try_init(); let mock = Mock::new() .write(b"hello ") .write(b"world, ") .write(b"it's ") .write(b"crate::core:!") .build(); let mut buffered = Buffered::<_, Cursor>>::new(mock); buffered.write_buf.set_strategy(WriteStrategy::Queue); // we have 4 buffers, and vec IO disabled, but explicitly said // don't try to auto detect (via setting strategy above) buffered.headers_buf().extend(b"hello "); buffered.buffer(Cursor::new(b"world, ".to_vec())); buffered.buffer(Cursor::new(b"it's ".to_vec())); buffered.buffer(Cursor::new(b"crate::core:!".to_vec())); assert_eq!(buffered.write_buf.queue.bufs_cnt(), 3); buffered.flush().await.expect("flush"); assert_eq!(buffered.write_buf.queue.bufs_cnt(), 0); } // #[cfg(feature = "nightly")] // #[bench] // fn bench_write_buf_flatten_buffer_chunk(b: &mut Bencher) { // let s = "Hello, World!"; // b.bytes = s.len() as u64; // let mut write_buf = WriteBuf::::new(); // write_buf.set_strategy(WriteStrategy::Flatten); // b.iter(|| { // let chunk = bytes::Bytes::from(s); // write_buf.buffer(chunk); // ::test::black_box(&write_buf); // write_buf.headers.bytes.clear(); // }) // } } ================================================ FILE: src/client/core/proto/http1/role.rs ================================================ use std::{ fmt::{self, Write as _}, mem::MaybeUninit, }; use bytes::{Bytes, BytesMut}; use http::{ Method, StatusCode, Version, header::{self, Entry, HeaderMap, HeaderName, HeaderValue}, }; use smallvec::{SmallVec, smallvec, smallvec_inline}; use super::{Encode, Encoder, Http1Transaction, ParseContext, ParsedMessage, ext::ReasonPhrase}; use crate::{ client::core::{ Error, Result, body::DecodedLength, error::Parse, proto::{BodyLength, MessageHead, RequestHead, RequestLine, headers}, }, config::RequestConfig, header::OrigHeaderMap, }; /// totally scientific const AVERAGE_HEADER_SIZE: usize = 30; pub(crate) const DEFAULT_MAX_HEADERS: usize = 100; macro_rules! header_name { ($bytes:expr) => {{ { match HeaderName::from_bytes($bytes) { Ok(name) => name, Err(e) => maybe_panic!(e), } } }}; } macro_rules! header_value { ($bytes:expr) => {{ { #[allow(unsafe_code)] unsafe { HeaderValue::from_maybe_shared_unchecked($bytes) } } }}; } macro_rules! maybe_panic { ($($arg:tt)*) => ({ let _err = ($($arg)*); if cfg!(debug_assertions) { panic!("{:?}", _err); } else { error!("Internal core error, please report {:?}", _err); return Err(Parse::Internal) } }) } pub(super) fn parse_headers( bytes: &mut BytesMut, prev_len: Option, ctx: ParseContext<'_>, ) -> Result>, Parse> where T: Http1Transaction, { // If the buffer is empty, don't bother entering the span, it's just noise. if bytes.is_empty() { return Ok(None); } trace_span!("parse_headers"); if let Some(prev_len) = prev_len { if !is_complete_fast(bytes, prev_len) { return Ok(None); } } T::parse(bytes, ctx) } /// A fast scan for the end of a message. /// Used when there was a partial read, to skip full parsing on a /// a slow connection. fn is_complete_fast(bytes: &[u8], prev_len: usize) -> bool { let start = prev_len.saturating_sub(3); let bytes = &bytes[start..]; for (i, b) in bytes.iter().copied().enumerate() { if b == b'\r' { if bytes[i + 1..].chunks(3).next() == Some(&b"\n\r\n"[..]) { return true; } } else if b == b'\n' && bytes.get(i + 1) == Some(&b'\n') { return true; } } false } pub(crate) enum Client {} impl Http1Transaction for Client { type Incoming = StatusCode; type Outgoing = RequestLine; #[cfg(feature = "tracing")] const LOG: &'static str = "{role=client}"; fn parse( buf: &mut BytesMut, ctx: ParseContext<'_>, ) -> Result>, Parse> { debug_assert!(!buf.is_empty(), "parse called with empty buf"); // Loop to skip information status code headers (100 Continue, etc). loop { let mut headers_indices: SmallVec<[MaybeUninit; DEFAULT_MAX_HEADERS]> = match ctx.h1_max_headers { Some(cap) => smallvec![MaybeUninit::uninit(); cap], None => smallvec_inline![MaybeUninit::uninit(); DEFAULT_MAX_HEADERS], }; let (len, status, reason, version, headers_len) = { let mut headers: SmallVec< [MaybeUninit>; DEFAULT_MAX_HEADERS], > = match ctx.h1_max_headers { Some(cap) => smallvec![MaybeUninit::uninit(); cap], None => smallvec_inline![MaybeUninit::uninit(); DEFAULT_MAX_HEADERS], }; trace!(bytes = buf.len(), "Response.parse"); let mut res = httparse::Response::new(&mut []); let bytes = buf.as_ref(); match ctx.h1_parser_config.parse_response_with_uninit_headers( &mut res, bytes, &mut headers, ) { Ok(httparse::Status::Complete(len)) => { trace!("Response.parse Complete({})", len); let status = StatusCode::from_u16(res.code.unwrap())?; let reason = { let reason = res.reason.unwrap(); // Only save the reason phrase if it isn't the canonical reason if Some(reason) != status.canonical_reason() { Some(Bytes::copy_from_slice(reason.as_bytes())) } else { None } }; let version = if res.version.unwrap() == 1 { Version::HTTP_11 } else { Version::HTTP_10 }; record_header_indices(bytes, res.headers, &mut headers_indices)?; let headers_len = res.headers.len(); (len, status, reason, version, headers_len) } Ok(httparse::Status::Partial) => return Ok(None), Err(httparse::Error::Version) if ctx.h09_responses => { trace!("Response.parse accepted HTTP/0.9 response"); (0, StatusCode::OK, None, Version::HTTP_09, 0) } Err(e) => return Err(e.into()), } }; let mut slice = buf.split_to(len); if ctx .h1_parser_config .obsolete_multiline_headers_in_responses_are_allowed() { for header in &mut headers_indices[..headers_len] { // SAFETY: array is valid up to `headers_len` #[allow(unsafe_code)] let header = unsafe { header.assume_init_mut() }; Client::obs_fold_line(&mut slice, header); } } let slice = slice.freeze(); let mut headers = ctx.cached_headers.take().unwrap_or_default(); let mut keep_alive = version == Version::HTTP_11; headers.reserve(headers_len); for header in &headers_indices[..headers_len] { // SAFETY: array is valid up to `headers_len` #[allow(unsafe_code)] let header = unsafe { header.assume_init_ref() }; let name = header_name!(&slice[header.name.0..header.name.1]); let value = header_value!(slice.slice(header.value.0..header.value.1)); if let header::CONNECTION = name { // keep_alive was previously set to default for Version if keep_alive { // HTTP/1.1 keep_alive = !headers::connection_close(&value); } else { // HTTP/1.0 keep_alive = headers::connection_keep_alive(&value); } } headers.append(name, value); } let mut extensions = http::Extensions::default(); if let Some(reason) = reason { // Safety: httparse ensures that only valid reason phrase bytes are present in this // field. let reason = ReasonPhrase::from_bytes_unchecked(reason); extensions.insert(reason); } let head = MessageHead { version, subject: status, headers, extensions, }; if let Some((decode, is_upgrade)) = Client::decoder(&head, ctx.req_method)? { return Ok(Some(ParsedMessage { head, decode, expect_continue: false, // a client upgrade means the connection can't be used // again, as it is definitely upgrading. keep_alive: keep_alive && !is_upgrade, wants_upgrade: is_upgrade, })); } // Parsing a 1xx response could have consumed the buffer, check if // it is empty now... if buf.is_empty() { return Ok(None); } } } fn encode(msg: Encode<'_, Self::Outgoing>, dst: &mut Vec) -> Result { trace!( "Client::encode method={:?}, body={:?}", msg.head.subject.0, msg.body ); *msg.req_method = Some(msg.head.subject.0.clone()); let body = Client::set_length(msg.head, msg.body); let init_cap = 30 + msg.head.headers.len() * AVERAGE_HEADER_SIZE; dst.reserve(init_cap); extend(dst, msg.head.subject.0.as_str().as_bytes()); extend(dst, b" "); //TODO: add API to http::Uri to encode without std::fmt let _ = write!(FastWrite(dst), "{} ", msg.head.subject.1); match msg.head.version { Version::HTTP_10 => extend(dst, b"HTTP/1.0"), Version::HTTP_11 => extend(dst, b"HTTP/1.1"), Version::HTTP_2 => { debug!("request with HTTP2 version coerced to HTTP/1.1"); extend(dst, b"HTTP/1.1"); } other => panic!("unexpected request version: {other:?}"), } extend(dst, b"\r\n"); if let Some(orig_headers) = RequestConfig::::get(&msg.head.extensions) { write_headers_original_case(&mut msg.head.headers, orig_headers, dst); } else { write_headers(&msg.head.headers, dst); } extend(dst, b"\r\n"); msg.head.headers.clear(); //TODO: remove when switching to drain() Ok(body) } fn on_error(_err: &Error) -> Option> { // we can't tell the server about any errors it creates None } } impl Client { /// Returns Some(length, wants_upgrade) if successful. /// /// Returns None if this message head should be skipped (like a 100 status). fn decoder( inc: &MessageHead, method: &mut Option, ) -> Result, Parse> { // According to https://tools.ietf.org/html/rfc7230#section-3.3.3 // 1. HEAD responses, and Status 1xx, 204, and 304 cannot have a body. // 2. Status 2xx to a CONNECT cannot have a body. // 3. Transfer-Encoding: chunked has a chunked body. // 4. If multiple differing Content-Length headers or invalid, close connection. // 5. Content-Length header has a sized body. // 6. (irrelevant to Response) // 7. Read till EOF. match inc.subject.as_u16() { 101 => { return Ok(Some((DecodedLength::ZERO, true))); } 100 | 102..=199 => { trace!("ignoring informational response: {}", inc.subject.as_u16()); return Ok(None); } 204 | 304 => return Ok(Some((DecodedLength::ZERO, false))), _ => (), } match *method { Some(Method::HEAD) => { return Ok(Some((DecodedLength::ZERO, false))); } Some(Method::CONNECT) => { if let 200..=299 = inc.subject.as_u16() { return Ok(Some((DecodedLength::ZERO, true))); } } Some(_) => {} None => { trace!("Client::decoder is missing the Method"); } } if inc.headers.contains_key(header::TRANSFER_ENCODING) { // https://tools.ietf.org/html/rfc7230#section-3.3.3 // If Transfer-Encoding header is present, and 'chunked' is // not the final encoding, and this is a Request, then it is // malformed. A server should respond with 400 Bad Request. return if inc.version == Version::HTTP_10 { debug!("HTTP/1.0 cannot have Transfer-Encoding header"); Err(Parse::transfer_encoding_unexpected()) } else if headers::transfer_encoding_is_chunked(&inc.headers) { Ok(Some((DecodedLength::CHUNKED, false))) } else { trace!("not chunked, read till eof"); Ok(Some((DecodedLength::CLOSE_DELIMITED, false))) }; } if let Some(len) = headers::content_length_parse_all(&inc.headers) { return Ok(Some((DecodedLength::checked_new(len)?, false))); } if inc.headers.contains_key(header::CONTENT_LENGTH) { debug!("illegal Content-Length header"); return Err(Parse::content_length_invalid()); } trace!("neither Transfer-Encoding nor Content-Length"); Ok(Some((DecodedLength::CLOSE_DELIMITED, false))) } fn set_length(head: &mut RequestHead, body: Option) -> Encoder { let body = if let Some(body) = body { body } else { head.headers.remove(header::TRANSFER_ENCODING); return Encoder::length(0); }; // HTTP/1.0 doesn't know about chunked let can_chunked = head.version == Version::HTTP_11; let headers = &mut head.headers; // If the user already set specific headers, we should respect them, regardless // of what the Body knows about itself. They set them for a reason. // Because of the borrow checker, we can't check the for an existing // Content-Length header while holding an `Entry` for the Transfer-Encoding // header, so unfortunately, we must do the check here, first. let existing_con_len = headers::content_length_parse_all(headers); let mut should_remove_con_len = false; if !can_chunked { // Chunked isn't legal, so if it is set, we need to remove it. if headers.remove(header::TRANSFER_ENCODING).is_some() { trace!("removing illegal transfer-encoding header"); } return if let Some(len) = existing_con_len { Encoder::length(len) } else if let BodyLength::Known(len) = body { set_content_length(headers, len) } else { // HTTP/1.0 client requests without a content-length // cannot have any body at all. Encoder::length(0) }; } // If the user set a transfer-encoding, respect that. Let's just // make sure `chunked` is the final encoding. let encoder = match headers.entry(header::TRANSFER_ENCODING) { Entry::Occupied(te) => { should_remove_con_len = true; if headers::is_chunked(te.iter()) { Some(Encoder::chunked()) } else { warn!("user provided transfer-encoding does not end in 'chunked'"); // There's a Transfer-Encoding, but it doesn't end in 'chunked'! // An example that could trigger this: // // Transfer-Encoding: gzip // // This can be bad, depending on if this is a request or a // response. // // - A request is illegal if there is a `Transfer-Encoding` but it doesn't end // in `chunked`. // - A response that has `Transfer-Encoding` but doesn't end in `chunked` isn't // illegal, it just forces this to be close-delimited. // // We can try to repair this, by adding `chunked` ourselves. headers::add_chunked(te); Some(Encoder::chunked()) } } Entry::Vacant(te) => { if let Some(len) = existing_con_len { Some(Encoder::length(len)) } else if let BodyLength::Unknown = body { // GET, HEAD, and CONNECT almost never have bodies. // // So instead of sending a "chunked" body with a 0-chunk, // assume no body here. If you *must* send a body, // set the headers explicitly. match head.subject.0 { Method::GET | Method::HEAD | Method::CONNECT => Some(Encoder::length(0)), _ => { te.insert(HeaderValue::from_static("chunked")); Some(Encoder::chunked()) } } } else { None } } }; let encoder = encoder.map(|enc| { if enc.is_chunked() { // Parse Trailer header values into HeaderNames. // Each Trailer header value may contain comma-separated names. // HeaderName normalizes to lowercase, enabling case-insensitive matching. let allowed_trailer_fields: Vec = headers .get_all(header::TRAILER) .iter() .filter_map(|hv| hv.to_str().ok()) .flat_map(|s| s.split(',')) .filter_map(|s| HeaderName::from_bytes(s.trim().as_bytes()).ok()) .collect(); if !allowed_trailer_fields.is_empty() { return enc.into_chunked_with_trailing_fields(allowed_trailer_fields); } } enc }); // This is because we need a second mutable borrow to remove // content-length header. if let Some(encoder) = encoder { if should_remove_con_len && existing_con_len.is_some() { headers.remove(header::CONTENT_LENGTH); } return encoder; } // User didn't set transfer-encoding, AND we know body length, // so we can just set the Content-Length automatically. let len = if let BodyLength::Known(len) = body { len } else { unreachable!("BodyLength::Unknown would set chunked"); }; set_content_length(headers, len) } fn obs_fold_line(all: &mut [u8], idx: &mut HeaderIndices) { // If the value has obs-folded text, then in-place shift the bytes out // of here. // // https://httpwg.org/specs/rfc9112.html#line.folding // // > A user agent that receives an obs-fold MUST replace each received // > obs-fold with one or more SP octets prior to interpreting the // > field value. // // This means strings like "\r\n\t foo" must replace the "\r\n\t " with // a single space. let buf = &mut all[idx.value.0..idx.value.1]; // look for a newline, otherwise bail out let first_nl = match buf.iter().position(|b| *b == b'\n') { Some(i) => i, None => return, }; // not on standard slices because whatever, sigh fn trim_start(mut s: &[u8]) -> &[u8] { while let [first, rest @ ..] = s { if first.is_ascii_whitespace() { s = rest; } else { break; } } s } fn trim_end(mut s: &[u8]) -> &[u8] { while let [rest @ .., last] = s { if last.is_ascii_whitespace() { s = rest; } else { break; } } s } fn trim(s: &[u8]) -> &[u8] { trim_start(trim_end(s)) } // TODO(perf): we could do the moves in-place, but this is so uncommon // that it shouldn't matter. let mut unfolded = trim_end(&buf[..first_nl]).to_vec(); for line in buf[first_nl + 1..].split(|b| *b == b'\n') { unfolded.push(b' '); unfolded.extend_from_slice(trim(line)); } buf[..unfolded.len()].copy_from_slice(&unfolded); idx.value.1 = idx.value.0 + unfolded.len(); } } fn set_content_length(headers: &mut HeaderMap, len: u64) -> Encoder { // At this point, there should not be a valid Content-Length // header. However, since we'll be indexing in anyways, we can // warn the user if there was an existing illegal header. // // Or at least, we can in theory. It's actually a little bit slower, // so perhaps only do that while the user is developing/testing. if cfg!(debug_assertions) { match headers.entry(header::CONTENT_LENGTH) { Entry::Occupied(mut cl) => { // Internal sanity check, we should have already determined // that the header was illegal before calling this function. debug_assert!(headers::content_length_parse_all_values(cl.iter()).is_none()); // Uh oh, the user set `Content-Length` headers, but set bad ones. // This would be an illegal message anyways, so let's try to repair // with our known good length. error!("user provided content-length header was invalid"); cl.insert(HeaderValue::from(len)); Encoder::length(len) } Entry::Vacant(cl) => { cl.insert(HeaderValue::from(len)); Encoder::length(len) } } } else { headers.insert(header::CONTENT_LENGTH, HeaderValue::from(len)); Encoder::length(len) } } #[derive(Clone, Copy)] struct HeaderIndices { name: (usize, usize), value: (usize, usize), } fn record_header_indices( bytes: &[u8], headers: &[httparse::Header<'_>], indices: &mut [MaybeUninit], ) -> Result<(), Parse> { let bytes_ptr = bytes.as_ptr() as usize; for (header, indices) in headers.iter().zip(indices.iter_mut()) { if header.name.len() >= (1 << 16) { debug!("header name larger than 64kb: {:?}", header.name); return Err(Parse::TooLarge); } let name_start = header.name.as_ptr() as usize - bytes_ptr; let name_end = name_start + header.name.len(); let value_start = header.value.as_ptr() as usize - bytes_ptr; let value_end = value_start + header.value.len(); indices.write(HeaderIndices { name: (name_start, name_end), value: (value_start, value_end), }); } Ok(()) } pub(crate) fn write_headers(headers: &HeaderMap, dst: &mut Vec) { for (name, value) in headers { extend(dst, name.as_ref()); extend(dst, b": "); extend(dst, value.as_bytes()); extend(dst, b"\r\n"); } } fn write_headers_original_case( headers: &mut HeaderMap, orig_headers: &OrigHeaderMap, dst: &mut Vec, ) { orig_headers.sort_headers_for_each(headers, |orig_name, value| { extend(dst, orig_name); // Wanted for curl test cases that send `X-Custom-Header:\r\n` if value.is_empty() { extend(dst, b":\r\n"); } else { extend(dst, b": "); extend(dst, value.as_bytes()); extend(dst, b"\r\n"); } }); } struct FastWrite<'a>(&'a mut Vec); impl fmt::Write for FastWrite<'_> { #[inline] fn write_str(&mut self, s: &str) -> fmt::Result { extend(self.0, s.as_bytes()); Ok(()) } #[inline] fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result { fmt::write(self, args) } } #[inline] fn extend(dst: &mut Vec, data: &[u8]) { dst.extend_from_slice(data); } ================================================ FILE: src/client/core/proto/http1.rs ================================================ //! HTTP/1 protocol implementation and utilities. mod buf; mod decode; mod encode; mod io; pub(crate) mod conn; pub(crate) mod dispatch; pub(crate) mod ext; pub(crate) mod role; use bytes::BytesMut; use http::{HeaderMap, Method}; use httparse::ParserConfig; use self::{conn::Conn, decode::Decoder, encode::Encoder, io::MINIMUM_MAX_BUFFER_SIZE}; use super::{BodyLength, MessageHead}; use crate::client::core::{ body::DecodedLength, error::{Error, Parse, Result}, }; pub(crate) trait Http1Transaction { type Incoming; type Outgoing: Default; #[cfg(feature = "tracing")] const LOG: &'static str; fn parse( bytes: &mut BytesMut, ctx: ParseContext<'_>, ) -> Result>, Parse>; fn encode(enc: Encode<'_, Self::Outgoing>, dst: &mut Vec) -> Result; fn on_error(err: &Error) -> Option>; fn update_date() {} } #[derive(Debug)] pub(crate) struct ParsedMessage { head: MessageHead, decode: DecodedLength, expect_continue: bool, keep_alive: bool, wants_upgrade: bool, } pub(crate) struct ParseContext<'a> { cached_headers: &'a mut Option, req_method: &'a mut Option, h1_parser_config: &'a ParserConfig, h1_max_headers: Option, h09_responses: bool, } /// Passed to Http1Transaction::encode pub(crate) struct Encode<'a, T> { head: &'a mut MessageHead, body: Option, req_method: &'a mut Option, } /// Extra flags that a request "wants", like expect-continue or upgrades. #[derive(Clone, Copy, Debug)] struct Wants(u8); impl Wants { const EMPTY: Wants = Wants(0b00); const EXPECT: Wants = Wants(0b01); const UPGRADE: Wants = Wants(0b10); #[inline] #[must_use] fn add(self, other: Wants) -> Wants { Wants(self.0 | other.0) } #[inline] fn contains(&self, other: Wants) -> bool { (self.0 & other.0) == other.0 } } /// Builder for `Http1Options`. #[must_use] #[derive(Debug)] pub struct Http1OptionsBuilder { opts: Http1Options, } /// Options for tweaking HTTP/1 connection behavior. /// /// Use `Http1Options` to adjust how HTTP/1 connections work—things like allowing HTTP/0.9 /// responses, controlling header handling, buffer sizes, and more. Most settings are optional and /// have reasonable defaults. #[non_exhaustive] #[derive(Debug, Default, Clone)] pub struct Http1Options { /// Enable support for HTTP/0.9 responses. pub h09_responses: bool, /// Whether to use vectored writes for HTTP/1 connections. pub h1_writev: Option, /// Maximum number of headers allowed in HTTP/1 responses. pub h1_max_headers: Option, /// Exact size of the read buffer to use for HTTP/1 connections. pub h1_read_buf_exact_size: Option, /// Maximum buffer size for HTTP/1 connections. pub h1_max_buf_size: Option, /// Whether to ignore invalid headers in HTTP/1 responses. pub ignore_invalid_headers_in_responses: bool, /// Whether to allow spaces after header names in HTTP/1 responses. pub allow_spaces_after_header_name_in_responses: bool, /// Whether to allow obsolete multiline headers in HTTP/1 responses. pub allow_obsolete_multiline_headers_in_responses: bool, } impl Http1OptionsBuilder { /// Set the `http09_responses` field. #[inline] pub fn http09_responses(mut self, enabled: bool) -> Self { self.opts.h09_responses = enabled; self } /// Set whether HTTP/1 connections should try to use vectored writes, /// or always flatten into a single buffer. /// /// Note that setting this to false may mean more copies of body data, /// but may also improve performance when an IO transport doesn't /// support vectored writes well, such as most TLS implementations. /// /// Setting this to true will force crate::core: to use queued strategy /// which may eliminate unnecessary cloning on some TLS backends /// /// Default is `auto`. In this mode crate::core: will try to guess which /// mode to use #[inline] pub fn writev(mut self, writev: Option) -> Self { self.opts.h1_writev = writev; self } /// Set the maximum number of headers. /// /// When a response is received, the parser will reserve a buffer to store headers for optimal /// performance. /// /// If client receives more headers than the buffer size, the error "message header too large" /// is returned. /// /// Note that headers is allocated on the stack by default, which has higher performance. After /// setting this value, headers will be allocated in heap memory, that is, heap memory /// allocation will occur for each response, and there will be a performance drop of about 5%. /// /// Default is 100. #[inline] pub fn max_headers(mut self, max_headers: usize) -> Self { self.opts.h1_max_headers = Some(max_headers); self } /// Sets the exact size of the read buffer to *always* use. /// /// Note that setting this option unsets the `max_buf_size` option. /// /// Default is an adaptive read buffer. #[inline] pub fn read_buf_exact_size(mut self, sz: Option) -> Self { self.opts.h1_read_buf_exact_size = sz; self.opts.h1_max_buf_size = None; self } /// Set the maximum buffer size for the connection. /// /// Default is ~400kb. /// /// Note that setting this option unsets the `read_exact_buf_size` option. /// /// # Panics /// /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the /// minimum. #[inline] pub fn max_buf_size(mut self, max: usize) -> Self { assert!( max >= MINIMUM_MAX_BUFFER_SIZE, "the max_buf_size cannot be smaller than the minimum that h1 specifies." ); self.opts.h1_max_buf_size = Some(max); self.opts.h1_read_buf_exact_size = None; self } /// Set whether HTTP/1 connections will accept spaces between header names /// and the colon that follow them in responses. /// /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has /// to say about it: /// /// > No whitespace is allowed between the header field-name and colon. In /// > the past, differences in the handling of such whitespace have led to /// > security vulnerabilities in request routing and response handling. A /// > server MUST reject any received request message that contains /// > whitespace between a header field-name and colon with a response code /// > of 400 (Bad Request). A proxy MUST remove any such whitespace from a /// > response message before forwarding the message downstream. /// /// Default is false. /// /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 #[inline] pub fn allow_spaces_after_header_name_in_responses(mut self, enabled: bool) -> Self { self.opts.allow_spaces_after_header_name_in_responses = enabled; self } /// Set whether HTTP/1 connections will silently ignored malformed header lines. /// /// If this is enabled and a header line does not start with a valid header /// name, or does not include a colon at all, the line will be silently ignored /// and no error will be reported. /// /// Default is false. #[inline] pub fn ignore_invalid_headers_in_responses(mut self, enabled: bool) -> Self { self.opts.ignore_invalid_headers_in_responses = enabled; self } /// Set the `allow_obsolete_multiline_headers_in_responses` field. #[inline] pub fn allow_obsolete_multiline_headers_in_responses(mut self, value: bool) -> Self { self.opts.allow_obsolete_multiline_headers_in_responses = value; self } /// Build the [`Http1Options`] instance. #[inline] pub fn build(self) -> Http1Options { self.opts } } impl Http1Options { /// Create a new [`Http1OptionsBuilder`]. pub fn builder() -> Http1OptionsBuilder { Http1OptionsBuilder { opts: Http1Options::default(), } } } ================================================ FILE: src/client/core/proto/http2/client.rs ================================================ use std::{ convert::Infallible, future::Future, marker::PhantomData, pin::Pin, task::{Context, Poll, ready}, }; use bytes::Bytes; use futures_util::future::{Either, FusedFuture}; use http::{Method, Request, Response, StatusCode}; use http_body::Body; use http2::{ SendStream, client::{Builder, Connection, ResponseFuture, SendRequest}, }; use pin_project_lite::pin_project; use tokio::{ io::{AsyncRead, AsyncWrite}, sync::{ mpsc, mpsc::{Receiver, Sender}, oneshot, }, }; use super::{ H2Upgraded, PipeToSendStream, SendBuf, ping, ping::{Ponger, Recorder}, }; use crate::{ client::core::{ Error, Result, body::{self, Incoming}, dispatch::{self, Callback, SendWhen, TrySendError}, error::BoxError, proto::{Dispatched, headers}, rt::{Time, bounds::Http2ClientConnExec}, upgrade::{self, Upgraded}, }, config::RequestConfig, header::OrigHeaderMap, }; /// Receiver for HTTP/2 client requests type ClientRx = dispatch::Receiver, Response>; ///// An mpsc channel is used to help notify the `Connection` task when *all* ///// other handles to it have been dropped, so that it can shutdown. type ConnDropRef = mpsc::Sender; ///// A oneshot channel watches the `Connection` task, and when it completes, ///// the "dispatch" task will be notified and can shutdown sooner. type ConnEof = oneshot::Receiver; pub(crate) async fn handshake( io: T, req_rx: ClientRx, builder: Builder, ping_config: ping::Config, mut exec: E, timer: Time, ) -> Result> where T: AsyncRead + AsyncWrite + Unpin, B: Body + 'static, B::Data: Send + 'static, E: Http2ClientConnExec + Unpin, B::Error: Into, { let (h2_tx, mut conn) = builder .handshake::<_, SendBuf>(io) .await .map_err(Error::new_h2)?; // An mpsc channel is used entirely to detect when the // 'Client' has been dropped. This is to get around a bug // in h2 where dropping all SendRequests won't notify a // parked Connection. let (conn_drop_ref, conn_drop_rx) = mpsc::channel(1); let (cancel_tx, conn_eof) = oneshot::channel(); let (conn, ping) = if ping_config.is_enabled() { let pp = conn.ping_pong().expect("conn.ping_pong"); let (recorder, ponger) = ping::channel(pp, ping_config, timer); let conn: Conn<_, B> = Conn { ponger, conn }; (Either::Left(conn), recorder) } else { (Either::Right(conn), ping::Recorder::disabled()) }; let conn: ConnMapErr = ConnMapErr { conn, is_terminated: false, }; exec.execute_h2_future(H2ClientFuture::Task { task: ConnTask::new(conn, conn_drop_rx, cancel_tx), }); Ok(ClientTask { ping, conn_drop_ref, conn_eof, executor: exec, h2_tx, req_rx, fut_ctx: None, marker: PhantomData, }) } pin_project! { struct Conn where B: Body, { #[pin] ponger: Ponger, #[pin] conn: Connection::Data>>, } } impl Future for Conn where B: Body, T: AsyncRead + AsyncWrite + Unpin, { type Output = Result<(), http2::Error>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let mut this = self.project(); match this.ponger.poll(cx) { Poll::Ready(ping::Ponged::SizeUpdate(wnd)) => { this.conn.set_target_window_size(wnd); this.conn.set_initial_window_size(wnd)?; } Poll::Ready(ping::Ponged::KeepAliveTimedOut) => { debug!("connection keep-alive timed out"); return Poll::Ready(Ok(())); } Poll::Pending => {} } Pin::new(&mut this.conn).poll(cx) } } pin_project! { struct ConnMapErr where B: Body, T: AsyncRead, T: AsyncWrite, T: Unpin, { #[pin] conn: Either, Connection::Data>>>, #[pin] is_terminated: bool, } } impl Future for ConnMapErr where B: Body, T: AsyncRead + AsyncWrite + Unpin, { type Output = Result<(), ()>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let mut this = self.project(); if *this.is_terminated { return Poll::Pending; } let polled = this.conn.poll(cx); if polled.is_ready() { *this.is_terminated = true; } polled.map_err(|_e| { debug!(error = %_e, "connection error"); }) } } impl FusedFuture for ConnMapErr where B: Body, T: AsyncRead + AsyncWrite + Unpin, { #[inline] fn is_terminated(&self) -> bool { self.is_terminated } } pin_project! { pub struct ConnTask where B: Body, T: AsyncRead, T: AsyncWrite, T: Unpin, { #[pin] drop_rx: Receiver, #[pin] cancel_tx: Option>, #[pin] conn: ConnMapErr, } } impl ConnTask where B: Body, T: AsyncRead + AsyncWrite + Unpin, { #[inline] fn new( conn: ConnMapErr, drop_rx: Receiver, cancel_tx: oneshot::Sender, ) -> Self { Self { drop_rx, cancel_tx: Some(cancel_tx), conn, } } } impl Future for ConnTask where B: Body, T: AsyncRead + AsyncWrite + Unpin, { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let mut this = self.project(); if !this.conn.is_terminated() && Pin::new(&mut this.conn).poll(cx).is_ready() { // ok or err, the `conn` has finished. return Poll::Ready(()); } if this.cancel_tx.is_some() && Pin::new(&mut this.drop_rx).poll_recv(cx).is_ready() { // mpsc has been dropped, hopefully polling // the connection some more should start shutdown // and then close. trace!("send_request dropped, starting conn shutdown"); drop(this.cancel_tx.take().expect("ConnTask Future polled twice")); } Poll::Pending } } pin_project! { #[project = H2ClientFutureProject] pub enum H2ClientFuture where B: http_body::Body, B: 'static, B::Error: Into, T: AsyncRead, T: AsyncWrite, T: Unpin, { Pipe { #[pin] pipe: PipeMap, }, Send { #[pin] send_when: SendWhen, }, Task { #[pin] task: ConnTask, }, } } impl Future for H2ClientFuture where B: Body + 'static, B::Data: Send, B::Error: Into, T: AsyncRead + AsyncWrite + Unpin, { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> std::task::Poll { let this = self.project(); match this { H2ClientFutureProject::Pipe { pipe } => pipe.poll(cx), H2ClientFutureProject::Send { send_when } => send_when.poll(cx), H2ClientFutureProject::Task { task } => task.poll(cx), } } } struct FutCtx where B: Body, { is_connect: bool, eos: bool, fut: ResponseFuture, body_tx: SendStream>, body: B, cb: Callback, Response>, } impl Unpin for FutCtx {} pub(crate) struct ClientTask where B: Body, E: Unpin, { ping: ping::Recorder, conn_drop_ref: ConnDropRef, conn_eof: ConnEof, executor: E, h2_tx: SendRequest>, req_rx: ClientRx, fut_ctx: Option>, marker: PhantomData, } pin_project! { pub struct PipeMap where S: Body, { #[pin] pipe: PipeToSendStream, #[pin] conn_drop_ref: Option>, #[pin] ping: Option, cancel_rx: Option>, } } impl Future for PipeMap where B: http_body::Body, B::Error: Into, { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> std::task::Poll { const EXPECT_TAKEN_ONCE_MSG: &str = "Future polled twice"; let mut this = self.project(); // Check if the client cancelled the request (e.g. dropped the // response future due to a timeout). If so, reset the h2 stream // so that a RST_STREAM is sent and flow-control capacity is freed. match this.cancel_rx.as_mut().map(|rx| Pin::new(rx).poll(cx)) { Some(Poll::Ready(Ok(()))) => { debug!("client request body send cancelled, resetting stream"); this.pipe.as_mut().send_reset(http2::Reason::CANCEL); this.conn_drop_ref.take().expect(EXPECT_TAKEN_ONCE_MSG); this.ping.take().expect(EXPECT_TAKEN_ONCE_MSG); return Poll::Ready(()); } Some(Poll::Ready(Err(_))) => { // Sender dropped without cancelling (normal response or error). // Stop polling the receiver. *this.cancel_rx = None; } Some(Poll::Pending) | None => {} } match Pin::new(&mut this.pipe).poll(cx) { Poll::Ready(result) => { if let Err(_e) = result { debug!("client request body error: {}", _e); } drop(this.conn_drop_ref.take().expect(EXPECT_TAKEN_ONCE_MSG)); drop(this.ping.take().expect(EXPECT_TAKEN_ONCE_MSG)); return Poll::Ready(()); } Poll::Pending => (), }; Poll::Pending } } impl ClientTask where B: Body + 'static + Unpin, B::Data: Send, E: Http2ClientConnExec + Unpin, B::Error: Into, T: AsyncRead + AsyncWrite + Unpin, { fn poll_pipe(&mut self, f: FutCtx, cx: &mut Context<'_>) { let ping = self.ping.clone(); // A one-shot channel so that send_task can tell pipe_task to // reset the stream when the client cancels the request. let (cancel_tx, cancel_rx) = oneshot::channel::<()>(); let send_stream = if !f.is_connect { if !f.eos { let mut pipe = PipeToSendStream::new(f.body, f.body_tx); // eagerly see if the body pipe is ready and // can thus skip allocating in the executor match Pin::new(&mut pipe).poll(cx) { Poll::Ready(_) => (), Poll::Pending => { let conn_drop_ref = self.conn_drop_ref.clone(); // keep the ping recorder's knowledge of an // "open stream" alive while this body is // still sending... let ping = ping.clone(); let pipe = PipeMap { pipe, conn_drop_ref: Some(conn_drop_ref), ping: Some(ping), cancel_rx: Some(cancel_rx), }; // Clear send task self.executor .execute_h2_future(H2ClientFuture::Pipe { pipe }); } } } None } else { Some(f.body_tx) }; self.executor.execute_h2_future(H2ClientFuture::Send { send_when: SendWhen { when: ResponseFutMap { fut: f.fut, ping: Some(ping), send_stream: Some(send_stream), cancel_tx: Some(cancel_tx), }, call_back: Some(f.cb), }, }); } } pin_project! { pub(crate) struct ResponseFutMap where B: Body, B: 'static, { #[pin] fut: ResponseFuture, #[pin] ping: Option, #[pin] send_stream: Option::Data>>>>, cancel_tx: Option>, } } impl ResponseFutMap { /// Signal the pipe_task to reset the stream (e.g. on client cancellation). pub(crate) fn cancel(self: Pin<&mut Self>) { if let Some(cancel_tx) = self.project().cancel_tx.take() { let _ = cancel_tx.send(()); } } } impl Future for ResponseFutMap where B: Body + 'static, B::Data: Send, { type Output = Result, (Error, Option>)>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let mut this = self.project(); let result = ready!(this.fut.poll(cx)); let ping = this.ping.take().expect("Future polled twice"); let send_stream = this.send_stream.take().expect("Future polled twice"); match result { Ok(res) => { // record that we got the response headers ping.record_non_data(); let content_length = headers::content_length_parse_all(res.headers()); if let (Some(mut send_stream), StatusCode::OK) = (send_stream, res.status()) { if content_length.is_some_and(|len| len != 0) { warn!("h2 connect response with non-zero body not supported"); send_stream.send_reset(http2::Reason::INTERNAL_ERROR); return Poll::Ready(Err(( Error::new_h2(http2::Reason::INTERNAL_ERROR.into()), None::>, ))); } let (parts, recv_stream) = res.into_parts(); let mut res = Response::from_parts(parts, Incoming::empty()); let (pending, on_upgrade) = upgrade::pending(); let io = H2Upgraded { ping, send_stream, recv_stream, buf: Bytes::new(), }; let upgraded = Upgraded::new(io, Bytes::new()); pending.fulfill(upgraded); res.extensions_mut().insert(on_upgrade); Poll::Ready(Ok(res)) } else { let res = res.map(|stream| { let ping = ping.for_stream(&stream); Incoming::h2(stream, content_length.into(), ping) }); Poll::Ready(Ok(res)) } } Err(err) => { ping.ensure_not_timed_out().map_err(|e| (e, None))?; debug!("client response error: {}", err); Poll::Ready(Err((Error::new_h2(err), None::>))) } } } } impl Future for ClientTask where B: Body + 'static + Unpin, B::Data: Send, B::Error: Into, E: Http2ClientConnExec + Unpin, T: AsyncRead + AsyncWrite + Unpin, { type Output = Result; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { loop { match ready!(self.h2_tx.poll_ready(cx)) { Ok(()) => (), Err(err) => { self.ping.ensure_not_timed_out()?; return if err.reason() == Some(::http2::Reason::NO_ERROR) { trace!("connection gracefully shutdown"); Poll::Ready(Ok(Dispatched::Shutdown)) } else { Poll::Ready(Err(Error::new_h2(err))) }; } }; // If we were waiting on pending open // continue where we left off. if let Some(f) = self.fut_ctx.take() { self.poll_pipe(f, cx); continue; } match self.req_rx.poll_recv(cx) { Poll::Ready(Some((req, cb))) => { // Check that future hasn't been canceled already if cb.is_canceled() { trace!("request callback is canceled"); continue; } let (head, body) = req.into_parts(); let mut req = ::http::Request::from_parts(head, ()); super::strip_connection_headers(req.headers_mut(), true); if let Some(len) = body.size_hint().exact() { if len != 0 || headers::method_has_defined_payload_semantics(req.method()) { headers::set_content_length_if_missing(req.headers_mut(), len); } } // Sort headers if we have the original headers if let Some(orig_headers) = RequestConfig::::remove(req.extensions_mut()) { orig_headers.sort_headers(req.headers_mut()); } let is_connect = req.method() == Method::CONNECT; let eos = body.is_end_stream(); if is_connect && headers::content_length_parse_all(req.headers()) .is_some_and(|len| len != 0) { debug!("h2 connect request with non-zero body not supported"); cb.send(Err(TrySendError { error: Error::new_user_invalid_connect(), message: None, })); continue; } let (fut, body_tx) = match self.h2_tx.send_request(req, !is_connect && eos) { Ok(ok) => ok, Err(err) => { debug!("client send request error: {}", err); cb.send(Err(TrySendError { error: Error::new_h2(err), message: None, })); continue; } }; let f = FutCtx { is_connect, eos, fut, body_tx, body, cb, }; // Check poll_ready() again. // If the call to send_request() resulted in the new stream being pending open // we have to wait for the open to complete before accepting new requests. match self.h2_tx.poll_ready(cx) { Poll::Pending => { // Save Context self.fut_ctx = Some(f); return Poll::Pending; } Poll::Ready(Ok(())) => (), Poll::Ready(Err(err)) => { f.cb.send(Err(TrySendError { error: Error::new_h2(err), message: None, })); continue; } } self.poll_pipe(f, cx); continue; } Poll::Ready(None) => { trace!("client::dispatch::Sender dropped"); return Poll::Ready(Ok(Dispatched::Shutdown)); } Poll::Pending => match ready!(Pin::new(&mut self.conn_eof).poll(cx)) { // As of Rust 1.82, this pattern is no longer needed, and emits a warning. // But we cannot remove it as long as MSRV is less than that. Ok(never) => match never {}, Err(_conn_is_eof) => { trace!("connection task is closed, closing dispatch task"); return Poll::Ready(Ok(Dispatched::Shutdown)); } }, } } } } ================================================ FILE: src/client/core/proto/http2/ping.rs ================================================ //! HTTP2 Ping usage //! //! core uses HTTP2 pings for two purposes: //! //! 1. Adaptive flow control using BDP //! 2. Connection keep-alive //! //! Both cases are optional. //! //! # BDP Algorithm //! //! 1. When receiving a DATA frame, if a BDP ping isn't outstanding: 1a. Record current time. 1b. //! Send a BDP ping. //! 2. Increment the number of received bytes. //! 3. When the BDP ping ack is received: 3a. Record duration from sent time. 3b. Merge RTT with a //! running average. 3c. Calculate bdp as bytes/rtt. 3d. If bdp is over 2/3 max, set new max to //! bdp and update windows. use std::{ fmt, future::Future, pin::Pin, sync::Arc, task::{self, Poll}, time::{Duration, Instant}, }; use http2::{Ping, PingPong}; use crate::{ client::core::{ Result, error::{Error, Kind, TimedOut}, rt::{Sleep, Time, Timer}, }, sync::Mutex, }; type WindowSize = u32; pub(super) fn channel(ping_pong: PingPong, config: Config, timer: Time) -> (Recorder, Ponger) { debug_assert!( config.is_enabled(), "ping channel requires bdp or keep-alive config", ); let bdp = config.bdp_initial_window.map(|wnd| Bdp { bdp: wnd, max_bandwidth: 0.0, rtt: 0.0, ping_delay: Duration::from_millis(100), stable_count: 0, }); let now = timer.now(); let (bytes, next_bdp_at) = if bdp.is_some() { (Some(0), Some(now)) } else { (None, None) }; let keep_alive = config.keep_alive_interval.map(|interval| KeepAlive { interval, timeout: config.keep_alive_timeout, while_idle: config.keep_alive_while_idle, sleep: timer.sleep(interval), state: KeepAliveState::Init, timer: timer.clone(), }); let last_read_at = keep_alive.as_ref().map(|_| now); let shared = Arc::new(Mutex::new(Shared { bytes, last_read_at, is_keep_alive_timed_out: false, ping_pong, ping_sent_at: None, next_bdp_at, timer, })); ( Recorder { shared: Some(shared.clone()), }, Ponger { bdp, keep_alive, shared, }, ) } #[derive(Debug, Clone)] pub(crate) struct Config { bdp_initial_window: Option, /// If no frames are received in this amount of time, a PING frame is sent. keep_alive_interval: Option, /// After sending a keepalive PING, the connection will be closed if /// a pong is not received in this amount of time. keep_alive_timeout: Duration, /// If true, sends pings even when there are no active streams. keep_alive_while_idle: bool, } #[derive(Clone)] pub(crate) struct Recorder { shared: Option>>, } pub(super) struct Ponger { bdp: Option, keep_alive: Option, shared: Arc>, } struct Shared { ping_pong: PingPong, ping_sent_at: Option, // bdp /// If `Some`, bdp is enabled, and this tracks how many bytes have been /// read during the current sample. bytes: Option, /// We delay a variable amount of time between BDP pings. This allows us /// to send less pings as the bandwidth stabilizes. next_bdp_at: Option, // keep-alive /// If `Some`, keep-alive is enabled, and the Instant is how long ago /// the connection read the last frame. last_read_at: Option, is_keep_alive_timed_out: bool, timer: Time, } struct Bdp { /// Current BDP in bytes bdp: u32, /// Largest bandwidth we've seen so far. max_bandwidth: f64, /// Round trip time in seconds rtt: f64, /// Delay the next ping by this amount. /// /// This will change depending on how stable the current bandwidth is. ping_delay: Duration, /// The count of ping round trips where BDP has stayed the same. stable_count: u32, } struct KeepAlive { /// If no frames are received in this amount of time, a PING frame is sent. interval: Duration, /// After sending a keepalive PING, the connection will be closed if /// a pong is not received in this amount of time. timeout: Duration, /// If true, sends pings even when there are no active streams. while_idle: bool, state: KeepAliveState, sleep: Pin>, timer: Time, } enum KeepAliveState { Init, Scheduled(Instant), PingSent, } pub(super) enum Ponged { SizeUpdate(WindowSize), KeepAliveTimedOut, } #[derive(Debug)] pub(super) struct KeepAliveTimedOut; // ===== impl Config ===== impl Config { /// Creates a new `Config` with the specified parameters. pub(crate) fn new( adaptive_window: bool, initial_window_size: u32, keep_alive_interval: Option, keep_alive_timeout: Duration, keep_alive_while_idle: bool, ) -> Self { Config { bdp_initial_window: if adaptive_window { Some(initial_window_size) } else { None }, keep_alive_interval, keep_alive_timeout, keep_alive_while_idle, } } #[inline] pub(super) fn is_enabled(&self) -> bool { self.bdp_initial_window.is_some() || self.keep_alive_interval.is_some() } } // ===== impl Recorder ===== impl Recorder { #[inline] pub(super) fn disabled() -> Recorder { Recorder { shared: None } } pub(crate) fn record_data(&self, len: usize) { let Some(ref shared) = self.shared else { return; }; let mut locked = shared.lock(); locked.update_last_read_at(); // are we ready to send another bdp ping? // if not, we don't need to record bytes either if let Some(ref next_bdp_at) = locked.next_bdp_at { if Instant::now() < *next_bdp_at { return; } else { locked.next_bdp_at = None; } } if let Some(ref mut bytes) = locked.bytes { *bytes += len; } else { // no need to send bdp ping if bdp is disabled return; } if !locked.is_ping_sent() { locked.send_ping(); } } pub(crate) fn record_non_data(&self) { let Some(ref shared) = self.shared else { return; }; let mut locked = shared.lock(); locked.update_last_read_at(); } /// If the incoming stream is already closed, convert self into /// a disabled reporter. pub(super) fn for_stream(self, stream: &http2::RecvStream) -> Self { if stream.is_end_stream() { Recorder::disabled() } else { self } } pub(super) fn ensure_not_timed_out(&self) -> Result<()> { if let Some(ref shared) = self.shared { let locked = shared.lock(); if locked.is_keep_alive_timed_out { return Err(KeepAliveTimedOut.crate_error()); } } Ok(()) } } // ===== impl Ponger ===== impl Future for Ponger { type Output = Ponged; #[inline] fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { let this = self.as_mut().get_mut(); let mut locked = this.shared.lock(); // hoping this is fine to move within the lock let now = locked.timer.now(); let is_idle = this.is_idle(); if let Some(ref mut ka) = this.keep_alive { ka.maybe_schedule(is_idle, &locked); ka.maybe_ping(cx, is_idle, &mut locked); } if !locked.is_ping_sent() { // XXX: this doesn't register a waker...? return Poll::Pending; } match locked.ping_pong.poll_pong(cx) { Poll::Ready(Ok(_pong)) => { let start = locked .ping_sent_at .expect("pong received implies ping_sent_at"); locked.ping_sent_at = None; let rtt = now - start; trace!("recv pong"); if let Some(ref mut ka) = this.keep_alive { locked.update_last_read_at(); ka.maybe_schedule(is_idle, &locked); ka.maybe_ping(cx, is_idle, &mut locked); } if let Some(ref mut bdp) = this.bdp { let bytes = locked.bytes.expect("bdp enabled implies bytes"); locked.bytes = Some(0); // reset trace!("received BDP ack; bytes = {}, rtt = {:?}", bytes, rtt); let update = bdp.calculate(bytes, rtt); locked.next_bdp_at = Some(now + bdp.ping_delay); if let Some(update) = update { return Poll::Ready(Ponged::SizeUpdate(update)); } } } Poll::Ready(Err(_e)) => { debug!("pong error: {}", _e); } Poll::Pending => { if let Some(ref mut ka) = this.keep_alive { if let Err(KeepAliveTimedOut) = ka.maybe_timeout(cx) { this.keep_alive = None; locked.is_keep_alive_timed_out = true; return Poll::Ready(Ponged::KeepAliveTimedOut); } } } } // XXX: this doesn't register a waker...? Poll::Pending } } impl Ponger { #[inline] fn is_idle(&self) -> bool { Arc::strong_count(&self.shared) <= 2 } } // ===== impl Shared ===== impl Shared { fn send_ping(&mut self) { match self.ping_pong.send_ping(Ping::opaque()) { Ok(()) => { self.ping_sent_at = Some(self.timer.now()); trace!("sent ping"); } Err(_err) => { debug!("error sending ping: {}", _err); } } } #[inline] fn is_ping_sent(&self) -> bool { self.ping_sent_at.is_some() } #[inline] fn update_last_read_at(&mut self) { if self.last_read_at.is_some() { self.last_read_at = Some(self.timer.now()); } } #[inline] fn last_read_at(&self) -> Instant { self.last_read_at.expect("keep_alive expects last_read_at") } } // ===== impl Bdp ===== /// Any higher than this likely will be hitting the TCP flow control. const BDP_LIMIT: usize = 1024 * 1024 * 16; impl Bdp { fn calculate(&mut self, bytes: usize, rtt: Duration) -> Option { // No need to do any math if we're at the limit. if self.bdp as usize == BDP_LIMIT { self.stabilize_delay(); return None; } // average the rtt let rtt = seconds(rtt); if self.rtt == 0.0 { // First sample means rtt is first rtt. self.rtt = rtt; } else { // Weigh this rtt as 1/8 for a moving average. self.rtt += (rtt - self.rtt) * 0.125; } // calculate the current bandwidth let bw = (bytes as f64) / (self.rtt * 1.5); trace!("current bandwidth = {:.1}B/s", bw); if bw < self.max_bandwidth { // not a faster bandwidth, so don't update self.stabilize_delay(); return None; } else { self.max_bandwidth = bw; } // if the current `bytes` sample is at least 2/3 the previous // bdp, increase to double the current sample. if bytes >= self.bdp as usize * 2 / 3 { self.bdp = (bytes * 2).min(BDP_LIMIT) as WindowSize; trace!("BDP increased to {}", self.bdp); self.stable_count = 0; self.ping_delay /= 2; Some(self.bdp) } else { self.stabilize_delay(); None } } fn stabilize_delay(&mut self) { if self.ping_delay < Duration::from_secs(10) { self.stable_count += 1; if self.stable_count >= 2 { self.ping_delay *= 4; self.stable_count = 0; } } } } #[inline] fn seconds(dur: Duration) -> f64 { const NANOS_PER_SEC: f64 = 1_000_000_000.0; let secs = dur.as_secs() as f64; secs + (dur.subsec_nanos() as f64) / NANOS_PER_SEC } // ===== impl KeepAlive ===== impl KeepAlive { fn maybe_schedule(&mut self, is_idle: bool, shared: &Shared) { match self.state { KeepAliveState::Init => { if !self.while_idle && is_idle { return; } self.schedule(shared); } KeepAliveState::PingSent => { if shared.is_ping_sent() { return; } self.schedule(shared); } KeepAliveState::Scheduled(..) => (), } } fn schedule(&mut self, shared: &Shared) { let interval = shared.last_read_at() + self.interval; self.state = KeepAliveState::Scheduled(interval); self.timer.reset(&mut self.sleep, interval); } fn maybe_ping(&mut self, cx: &mut task::Context<'_>, is_idle: bool, shared: &mut Shared) { match self.state { KeepAliveState::Scheduled(at) => { if Pin::new(&mut self.sleep).poll(cx).is_pending() { return; } // check if we've received a frame while we were scheduled if shared.last_read_at() + self.interval > at { self.state = KeepAliveState::Init; cx.waker().wake_by_ref(); // schedule us again return; } if !self.while_idle && is_idle { trace!("keep-alive no need to ping when idle and while_idle=false"); return; } trace!("keep-alive interval ({:?}) reached", self.interval); shared.send_ping(); self.state = KeepAliveState::PingSent; let timeout = self.timer.now() + self.timeout; self.timer.reset(&mut self.sleep, timeout); } KeepAliveState::Init | KeepAliveState::PingSent => (), } } fn maybe_timeout(&mut self, cx: &mut task::Context<'_>) -> Result<(), KeepAliveTimedOut> { match self.state { KeepAliveState::PingSent => { if Pin::new(&mut self.sleep).poll(cx).is_pending() { return Ok(()); } trace!("keep-alive timeout ({:?}) reached", self.timeout); Err(KeepAliveTimedOut) } KeepAliveState::Init | KeepAliveState::Scheduled(..) => Ok(()), } } } // ===== impl KeepAliveTimedOut ===== impl KeepAliveTimedOut { pub(super) fn crate_error(self) -> Error { Error::new(Kind::Http2).with(self) } } impl fmt::Display for KeepAliveTimedOut { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str("keep-alive timed out") } } impl std::error::Error for KeepAliveTimedOut { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { Some(&TimedOut) } } ================================================ FILE: src/client/core/proto/http2.rs ================================================ //! HTTP/2 protocol implementation and utilities. pub(crate) mod client; pub(crate) mod ping; use std::{ future::Future, io::{self, Cursor, IoSlice}, pin::Pin, task::{Context, Poll, ready}, time::Duration, }; use bytes::{Buf, Bytes}; use http::{ HeaderMap, header::{CONNECTION, HeaderName, TE, TRANSFER_ENCODING, UPGRADE}, }; use http_body::Body; use http2::{ Reason, RecvStream, SendStream, frame::{Priorities, PseudoOrder, SettingsOrder, StreamDependency}, }; use pin_project_lite::pin_project; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; use crate::client::core::{Error, Result, error::BoxError}; /// Default initial stream window size defined in HTTP2 spec. const SPEC_WINDOW_SIZE: u32 = 65_535; // Our defaults are chosen for the "majority" case, which usually are not // resource constrained, and so the spec default of 64kb can be too limiting // for performance. const DEFAULT_CONN_WINDOW: u32 = 1024 * 1024 * 5; // 5mb const DEFAULT_STREAM_WINDOW: u32 = 1024 * 1024 * 2; // 2mb const DEFAULT_MAX_FRAME_SIZE: u32 = 1024 * 16; // 16kb const DEFAULT_MAX_SEND_BUF_SIZE: usize = 1024 * 1024; // 1mb const DEFAULT_MAX_HEADER_LIST_SIZE: u32 = 1024 * 16; // 16kb // The maximum number of concurrent streams that the client is allowed to open // before it receives the initial SETTINGS frame from the server. // This default value is derived from what the HTTP/2 spec recommends as the // minimum value that endpoints advertise to their peers. It means that using // this value will minimize the chance of the failure where the local endpoint // attempts to open too many streams and gets rejected by the remote peer with // the `REFUSED_STREAM` error. const DEFAULT_INITIAL_MAX_SEND_STREAMS: usize = 100; // List of connection headers from RFC 9110 Section 7.6.1 // // TE headers are allowed in HTTP/2 requests as long as the value is "trailers", so they're // tested separately. static CONNECTION_HEADERS: [HeaderName; 4] = [ HeaderName::from_static("keep-alive"), HeaderName::from_static("proxy-connection"), TRANSFER_ENCODING, UPGRADE, ]; fn strip_connection_headers(headers: &mut HeaderMap, is_request: bool) { for header in &CONNECTION_HEADERS { if headers.remove(header).is_some() { warn!("Connection header illegal in HTTP/2: {}", header.as_str()); } } if is_request { if headers .get(TE) .is_some_and(|te_header| te_header != "trailers") { warn!("TE headers not set to \"trailers\" are illegal in HTTP/2 requests"); headers.remove(TE); } } else if headers.remove(TE).is_some() { warn!("TE headers illegal in HTTP/2 responses"); } if let Some(header) = headers.remove(CONNECTION) { warn!( "Connection header illegal in HTTP/2: {}", CONNECTION.as_str() ); if let Ok(header_contents) = header.to_str() { // A `Connection` header may have a comma-separated list of names of other headers that // are meant for only this specific connection. // // Iterate these names and remove them as headers. Connection-specific headers are // forbidden in HTTP2, as that information has been moved into frame types of the h2 // protocol. for name in header_contents.split(',') { let name = name.trim(); headers.remove(name); } } } } // body adapters used by both Client pin_project! { pub(crate) struct PipeToSendStream where S: Body, { #[pin] stream: S, body_tx: SendStream>, data_done: bool, } } impl PipeToSendStream where S: Body, { #[inline] fn new(stream: S, body_tx: SendStream>) -> PipeToSendStream { PipeToSendStream { stream, body_tx, data_done: false, } } #[inline] fn send_reset(self: Pin<&mut Self>, reason: http2::Reason) { self.project().body_tx.send_reset(reason); } } impl Future for PipeToSendStream where S: Body, S::Error: Into, { type Output = Result<()>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let mut me = self.project(); loop { // we don't have the next chunk of data yet, so just reserve 1 byte to make // sure there's some capacity available. h2 will handle the capacity management // for the actual body chunk. me.body_tx.reserve_capacity(1); if me.body_tx.capacity() == 0 { loop { match ready!(me.body_tx.poll_capacity(cx)) { Some(Ok(0)) => {} Some(Ok(_)) => break, Some(Err(e)) => { return Poll::Ready(Err(Error::new_body_write(e))); } None => { // None means the stream is no longer in a // streaming state, we either finished it // somehow, or the remote reset us. return Poll::Ready(Err(Error::new_body_write( "send stream capacity unexpectedly closed", ))); } } } } else if let Poll::Ready(reason) = me.body_tx.poll_reset(cx).map_err(Error::new_body_write)? { debug!("stream received RST_STREAM: {:?}", reason); return Poll::Ready(Err(Error::new_body_write(::http2::Error::from(reason)))); } match ready!(me.stream.as_mut().poll_frame(cx)) { Some(Ok(frame)) => { if frame.is_data() { let chunk = frame.into_data().unwrap_or_else(|_| unreachable!()); let is_eos = me.stream.is_end_stream(); trace!( "send body chunk: {} bytes, eos={}", chunk.remaining(), is_eos, ); let buf = SendBuf::Buf(chunk); me.body_tx .send_data(buf, is_eos) .map_err(Error::new_body_write)?; if is_eos { return Poll::Ready(Ok(())); } } else if frame.is_trailers() { // no more DATA, so give any capacity back me.body_tx.reserve_capacity(0); me.body_tx .send_trailers(frame.into_trailers().unwrap_or_else(|_| unreachable!())) .map_err(Error::new_body_write)?; return Poll::Ready(Ok(())); } else { trace!("discarding unknown frame"); // loop again } } Some(Err(e)) => return Poll::Ready(Err(me.body_tx.on_user_err(e))), None => { // no more frames means we're done here // but at this point, we haven't sent an EOS DATA, or // any trailers, so send an empty EOS DATA. return Poll::Ready(me.body_tx.send_eos_frame()); } } } } } trait SendStreamExt { fn on_user_err(&mut self, err: E) -> Error where E: Into; fn send_eos_frame(&mut self) -> Result<()>; } impl SendStreamExt for SendStream> { fn on_user_err(&mut self, err: E) -> Error where E: Into, { let err = Error::new_user_body(err); debug!("send body user stream error: {}", err); self.send_reset(err.h2_reason()); err } fn send_eos_frame(&mut self) -> Result<()> { trace!("send body eos"); self.send_data(SendBuf::None, true) .map_err(Error::new_body_write) } } #[repr(usize)] enum SendBuf { Buf(B), Cursor(Cursor>), None, } impl Buf for SendBuf { #[inline] fn remaining(&self) -> usize { match *self { Self::Buf(ref b) => b.remaining(), Self::Cursor(ref c) => Buf::remaining(c), Self::None => 0, } } #[inline] fn chunk(&self) -> &[u8] { match *self { Self::Buf(ref b) => b.chunk(), Self::Cursor(ref c) => c.chunk(), Self::None => &[], } } #[inline] fn advance(&mut self, cnt: usize) { match *self { Self::Buf(ref mut b) => b.advance(cnt), Self::Cursor(ref mut c) => c.advance(cnt), Self::None => {} } } fn chunks_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize { match *self { Self::Buf(ref b) => b.chunks_vectored(dst), Self::Cursor(ref c) => c.chunks_vectored(dst), Self::None => 0, } } } struct H2Upgraded where B: Buf, { ping: ping::Recorder, send_stream: SendStream>, recv_stream: RecvStream, buf: Bytes, } impl AsyncRead for H2Upgraded where B: Buf, { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, read_buf: &mut ReadBuf<'_>, ) -> Poll> { if self.buf.is_empty() { self.buf = loop { match ready!(self.recv_stream.poll_data(cx)) { None => return Poll::Ready(Ok(())), Some(Ok(buf)) if buf.is_empty() && !self.recv_stream.is_end_stream() => { continue; } Some(Ok(buf)) => { self.ping.record_data(buf.len()); break buf; } Some(Err(e)) => { return Poll::Ready(match e.reason() { Some(Reason::NO_ERROR) | Some(Reason::CANCEL) => Ok(()), Some(Reason::STREAM_CLOSED) => { Err(io::Error::new(io::ErrorKind::BrokenPipe, e)) } _ => Err(h2_to_io_error(e)), }); } } }; } let cnt = std::cmp::min(self.buf.len(), read_buf.remaining()); read_buf.put_slice(&self.buf[..cnt]); self.buf.advance(cnt); let _ = self.recv_stream.flow_control().release_capacity(cnt); Poll::Ready(Ok(())) } } impl AsyncWrite for H2Upgraded where B: Buf, { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { if buf.is_empty() { return Poll::Ready(Ok(0)); } self.send_stream.reserve_capacity(buf.len()); // We ignore all errors returned by `poll_capacity` and `write`, as we // will get the correct from `poll_reset` anyway. let cnt = match ready!(self.send_stream.poll_capacity(cx)) { None => Some(0), Some(Ok(cnt)) => self .send_stream .send_data(SendBuf::Cursor(Cursor::new(buf[..cnt].into())), false) .ok() .map(|()| cnt), Some(Err(_)) => None, }; if let Some(cnt) = cnt { return Poll::Ready(Ok(cnt)); } Poll::Ready(Err(h2_to_io_error( match ready!(self.send_stream.poll_reset(cx)) { Ok(Reason::NO_ERROR) | Ok(Reason::CANCEL) | Ok(Reason::STREAM_CLOSED) => { return Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())); } Ok(reason) => reason.into(), Err(e) => e, }, ))) } #[inline] fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { if self .send_stream .send_data(SendBuf::Cursor(Cursor::new([].into())), true) .is_ok() { return Poll::Ready(Ok(())); } Poll::Ready(Err(h2_to_io_error( match ready!(self.send_stream.poll_reset(cx)) { Ok(Reason::NO_ERROR) => return Poll::Ready(Ok(())), Ok(Reason::CANCEL) | Ok(Reason::STREAM_CLOSED) => { return Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())); } Ok(reason) => reason.into(), Err(e) => e, }, ))) } } fn h2_to_io_error(e: http2::Error) -> std::io::Error { if e.is_io() { e.into_io() .expect("[BUG] http2::Error::is_io() is true, but into_io() failed") } else { std::io::Error::other(e) } } /// Builder for `Http2Options`. #[must_use] #[derive(Debug)] pub struct Http2OptionsBuilder { opts: Http2Options, } /// Options for tuning HTTP/2 connections. /// /// `Http2Options` lets you adjust how HTTP/2 works—stream limits, window sizes, frame and header /// settings, and more. Most fields are optional and have sensible defaults. See each field for /// details. #[non_exhaustive] #[derive(Debug, Clone)] pub struct Http2Options { /// Whether to use adaptive flow control. pub adaptive_window: bool, /// The initial stream ID for the connection. pub initial_stream_id: Option, /// The initial window size for HTTP/2 connection-level flow control. pub initial_conn_window_size: u32, /// The initial window size for HTTP/2 streams. pub initial_window_size: u32, /// The initial maximum number of locally initiated (send) streams. pub initial_max_send_streams: usize, /// The maximum frame size to use for HTTP/2. pub max_frame_size: Option, /// The interval for HTTP/2 keep-alive ping frames. pub keep_alive_interval: Option, /// The timeout for receiving an acknowledgement of the keep-alive ping. pub keep_alive_timeout: Duration, /// Whether HTTP/2 keep-alive should apply while the connection is idle. pub keep_alive_while_idle: bool, /// The maximum number of concurrent locally reset streams. pub max_concurrent_reset_streams: Option, /// The maximum size of the send buffer for HTTP/2 streams. pub max_send_buffer_size: usize, /// The maximum number of concurrent streams initiated by the remote peer. pub max_concurrent_streams: Option, /// The maximum size of the header list. pub max_header_list_size: Option, /// The maximum number of pending accept reset streams. pub max_pending_accept_reset_streams: Option, /// Whether to enable push promises. pub enable_push: Option, /// The header table size for HPACK compression. pub header_table_size: Option, /// Whether to enable the CONNECT protocol. pub enable_connect_protocol: Option, /// Whether to disable RFC 7540 Stream Priorities. pub no_rfc7540_priorities: Option, /// The HTTP/2 pseudo-header field order for outgoing HEADERS frames. pub headers_pseudo_order: Option, /// The stream dependency for the outgoing HEADERS frame. pub headers_stream_dependency: Option, /// The order of settings parameters in the initial SETTINGS frame. pub settings_order: Option, /// The list of PRIORITY frames to be sent after connection establishment. pub priorities: Option, } impl Http2OptionsBuilder { /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 /// stream-level flow control. /// /// Passing `None` will do nothing. /// /// If not set, crate::core: will use a default. /// /// [spec]: https://httpwg.org/specs/rfc9113.html#SETTINGS_INITIAL_WINDOW_SIZE #[inline] pub fn initial_window_size(mut self, sz: impl Into>) -> Self { if let Some(sz) = sz.into() { self.opts.adaptive_window = false; self.opts.initial_window_size = sz; } self } /// Sets the max connection-level flow control for HTTP2 /// /// Passing `None` will do nothing. /// /// If not set, crate::core: will use a default. #[inline] pub fn initial_connection_window_size(mut self, sz: impl Into>) -> Self { if let Some(sz) = sz.into() { self.opts.adaptive_window = false; self.opts.initial_conn_window_size = sz; } self } /// Sets the initial maximum of locally initiated (send) streams. /// /// This value will be overwritten by the value included in the initial /// SETTINGS frame received from the peer as part of a [connection preface]. /// /// Passing `None` will do nothing. /// /// If not set, crate::core: will use a default. /// /// [connection preface]: https://httpwg.org/specs/rfc9113.html#preface #[inline] pub fn initial_max_send_streams(mut self, initial: impl Into>) -> Self { if let Some(initial) = initial.into() { self.opts.initial_max_send_streams = initial; } self } /// Sets the initial stream id for the connection. #[inline] pub fn initial_stream_id(mut self, id: impl Into>) -> Self { if let Some(id) = id.into() { self.opts.initial_stream_id = Some(id); } self } /// Sets whether to use an adaptive flow control. /// /// Enabling this will override the limits set in /// `initial_stream_window_size` and /// `initial_connection_window_size`. #[inline] pub fn adaptive_window(mut self, enabled: bool) -> Self { self.opts.adaptive_window = enabled; if enabled { self.opts.initial_window_size = SPEC_WINDOW_SIZE; self.opts.initial_conn_window_size = SPEC_WINDOW_SIZE; } self } /// Sets the maximum frame size to use for HTTP2. /// /// Default is currently 16KB, but can change. #[inline] pub fn max_frame_size(mut self, sz: impl Into>) -> Self { if let Some(sz) = sz.into() { self.opts.max_frame_size = Some(sz); } self } /// Sets the max size of received header frames. /// /// Default is currently 16KB, but can change. #[inline] pub fn max_header_list_size(mut self, max: u32) -> Self { self.opts.max_header_list_size = Some(max); self } /// Sets the header table size. /// /// This setting informs the peer of the maximum size of the header compression /// table used to encode header blocks, in octets. The encoder may select any value /// equal to or less than the header table size specified by the sender. /// /// The default value of crate `h2` is 4,096. #[inline] pub fn header_table_size(mut self, size: impl Into>) -> Self { if let Some(size) = size.into() { self.opts.header_table_size = Some(size); } self } /// Sets the maximum number of concurrent streams. /// /// The maximum concurrent streams setting only controls the maximum number /// of streams that can be initiated by the remote peer. In other words, /// when this setting is set to 100, this does not limit the number of /// concurrent streams that can be created by the caller. /// /// It is recommended that this value be no smaller than 100, so as to not /// unnecessarily limit parallelism. However, any value is legal, including /// 0. If `max` is set to 0, then the remote will not be permitted to /// initiate streams. /// /// Note that streams in the reserved state, i.e., push promises that have /// been reserved but the stream has not started, do not count against this /// setting. /// /// Also note that if the remote *does* exceed the value set here, it is not /// a protocol level error. Instead, the `h2` library will immediately reset /// the stream. /// /// See [Section 5.1.2] in the HTTP/2 spec for more details. /// /// [Section 5.1.2]: https://http2.github.io/http2-spec/#rfc.section.5.1.2 #[inline] pub fn max_concurrent_streams(mut self, max: impl Into>) -> Self { if let Some(max) = max.into() { self.opts.max_concurrent_streams = Some(max); } self } /// Sets an interval for HTTP2 Ping frames should be sent to keep a /// connection alive. /// /// Pass `None` to disable HTTP2 keep-alive. /// /// Default is currently disabled. #[inline] pub fn keep_alive_interval(mut self, interval: impl Into>) -> Self { self.opts.keep_alive_interval = interval.into(); self } /// Sets a timeout for receiving an acknowledgement of the keep-alive ping. /// /// If the ping is not acknowledged within the timeout, the connection will /// be closed. Does nothing if `keep_alive_interval` is disabled. /// /// Default is 20 seconds. #[inline] pub fn keep_alive_timeout(mut self, timeout: Duration) -> Self { self.opts.keep_alive_timeout = timeout; self } /// Sets whether HTTP2 keep-alive should apply while the connection is idle. /// /// If disabled, keep-alive pings are only sent while there are open /// request/responses streams. If enabled, pings are also sent when no /// streams are active. Does nothing if `keep_alive_interval` is /// disabled. /// /// Default is `false`. #[inline] pub fn keep_alive_while_idle(mut self, enabled: bool) -> Self { self.opts.keep_alive_while_idle = enabled; self } /// Enables and disables the push feature for HTTP2. /// /// Passing `None` will do nothing. #[inline] pub fn enable_push(mut self, opt: bool) -> Self { self.opts.enable_push = Some(opt); self } /// Sets the enable connect protocol. #[inline] pub fn enable_connect_protocol(mut self, opt: bool) -> Self { self.opts.enable_connect_protocol = Some(opt); self } /// Disable RFC 7540 Stream Priorities (set to `true` to disable). /// [RFC 9218]: #[inline] pub fn no_rfc7540_priorities(mut self, opt: bool) -> Self { self.opts.no_rfc7540_priorities = Some(opt); self } /// Sets the maximum number of HTTP2 concurrent locally reset streams. /// /// See the documentation of [`http2::client::Builder::max_concurrent_reset_streams`] for more /// details. /// /// The default value is determined by the `h2` crate. /// /// [`http2::client::Builder::max_concurrent_reset_streams`]: https://docs.rs/h2/client/struct.Builder.html#method.max_concurrent_reset_streams #[inline] pub fn max_concurrent_reset_streams(mut self, max: usize) -> Self { self.opts.max_concurrent_reset_streams = Some(max); self } /// Set the maximum write buffer size for each HTTP/2 stream. /// /// Default is currently 1MB, but may change. /// /// # Panics /// /// The value must be no larger than `u32::MAX`. #[inline] pub fn max_send_buf_size(mut self, max: usize) -> Self { assert!(max <= u32::MAX as usize); self.opts.max_send_buffer_size = max; self } /// Configures the maximum number of pending reset streams allowed before a GOAWAY will be sent. /// /// See for more information. #[inline] pub fn max_pending_accept_reset_streams(mut self, max: impl Into>) -> Self { if let Some(max) = max.into() { self.opts.max_pending_accept_reset_streams = Some(max); } self } /// Sets the stream dependency and weight for the outgoing HEADERS frame. /// /// This configures the priority of the stream by specifying its dependency and weight, /// as defined by the HTTP/2 priority mechanism. This can be used to influence how the /// server allocates resources to this stream relative to others. #[inline] pub fn headers_stream_dependency(mut self, stream_dependency: T) -> Self where T: Into>, { if let Some(stream_dependency) = stream_dependency.into() { self.opts.headers_stream_dependency = Some(stream_dependency); } self } /// Sets the HTTP/2 pseudo-header field order for outgoing HEADERS frames. /// /// This determines the order in which pseudo-header fields (such as `:method`, `:scheme`, etc.) /// are encoded in the HEADERS frame. Customizing the order may be useful for interoperability /// or testing purposes. #[inline] pub fn headers_pseudo_order(mut self, headers_pseudo_order: T) -> Self where T: Into>, { if let Some(headers_pseudo_order) = headers_pseudo_order.into() { self.opts.headers_pseudo_order = Some(headers_pseudo_order); } self } /// Sets the order of settings parameters in the initial SETTINGS frame. /// /// This determines the order in which settings are sent during the HTTP/2 handshake. /// Customizing the order may be useful for testing or protocol compliance. #[inline] pub fn settings_order(mut self, settings_order: T) -> Self where T: Into>, { if let Some(settings_order) = settings_order.into() { self.opts.settings_order = Some(settings_order); } self } /// Sets the list of PRIORITY frames to be sent immediately after the connection is established, /// but before the first request is sent. /// /// This allows you to pre-configure the HTTP/2 stream dependency tree by specifying a set of /// PRIORITY frames that will be sent as part of the connection preface. This can be useful for /// optimizing resource allocation or testing custom stream prioritization strategies. /// /// Each `Priority` in the list must have a valid (non-zero) stream ID. Any priority with a /// stream ID of zero will be ignored. #[inline] pub fn priorities(mut self, priorities: T) -> Self where T: Into>, { if let Some(priorities) = priorities.into() { self.opts.priorities = Some(priorities); } self } /// Builds the `Http2Options` instance. #[inline] pub fn build(self) -> Http2Options { self.opts } } impl Http2Options { /// Creates a new `Http2OptionsBuilder` instance. pub fn builder() -> Http2OptionsBuilder { // Reset optional frame size and header size settings to None to allow explicit // customization This ensures users can configure these via builder methods without // being constrained by defaults Http2OptionsBuilder { opts: Http2Options { max_frame_size: None, max_header_list_size: None, ..Default::default() }, } } } impl Default for Http2Options { #[inline] fn default() -> Self { Http2Options { adaptive_window: false, initial_stream_id: None, initial_conn_window_size: DEFAULT_CONN_WINDOW, initial_window_size: DEFAULT_STREAM_WINDOW, initial_max_send_streams: DEFAULT_INITIAL_MAX_SEND_STREAMS, max_frame_size: Some(DEFAULT_MAX_FRAME_SIZE), max_header_list_size: Some(DEFAULT_MAX_HEADER_LIST_SIZE), keep_alive_interval: None, keep_alive_timeout: Duration::from_secs(20), keep_alive_while_idle: false, max_concurrent_reset_streams: None, max_send_buffer_size: DEFAULT_MAX_SEND_BUF_SIZE, max_pending_accept_reset_streams: None, header_table_size: None, max_concurrent_streams: None, enable_push: None, enable_connect_protocol: None, no_rfc7540_priorities: None, settings_order: None, headers_pseudo_order: None, headers_stream_dependency: None, priorities: None, } } } #[cfg(test)] mod tests { use std::time::Duration; use bytes::Bytes; use http_body_util::Full; use tokio::sync::oneshot; use crate::client::core::{conn::http2::Builder, rt::TokioExecutor}; fn setup_duplex_test_server() -> (tokio::io::DuplexStream, tokio::io::DuplexStream) { let (client_io, server_io) = tokio::io::duplex(64); (client_io, server_io) } // https://github.com/hyperium/hyper/issues/4040 #[tokio::test] async fn h2_pipe_task_cancelled_on_response_future_drop() { let (client_io, server_io) = setup_duplex_test_server(); let (rst_tx, rst_rx) = oneshot::channel::(); tokio::spawn(async move { let mut builder = http2::server::Builder::new(); builder.initial_window_size(0); let mut h2 = builder.handshake::<_, Bytes>(server_io).await.unwrap(); let (req, _respond) = h2.accept().await.unwrap().unwrap(); tokio::spawn(async move { let _ = std::future::poll_fn(|cx| h2.poll_closed(cx)).await; }); let mut body = req.into_body(); let got_rst = tokio::time::timeout(Duration::from_secs(2), body.data()) .await .is_ok_and(|frame| matches!(frame, Some(Err(_)) | None)); let _ = rst_tx.send(got_rst); }); let (mut client, conn) = Builder::new(TokioExecutor::new()) .handshake(client_io) .await .expect("http handshake"); tokio::spawn(async move { let _ = conn.await; }); let req = http::Request::post("http://localhost/") .body(Full::new(Bytes::from(vec![b'x'; 50]))) .unwrap(); let res = tokio::time::timeout(Duration::from_millis(5), client.try_send_request(req)).await; assert!(res.is_err(), "should timeout waiting for response"); let got_rst = rst_rx.await.expect("server task should complete"); assert!(got_rst, "server should receive RST_STREAM"); } } ================================================ FILE: src/client/core/proto.rs ================================================ //! Pieces pertaining to the HTTP message protocol. mod headers; pub mod http1; pub mod http2; /// A request line of an incoming request message. #[derive(Debug, Default, PartialEq)] pub(crate) struct RequestLine(http::Method, http::Uri); /// An Incoming Message head. Includes request/status line, and headers. #[derive(Debug, Default)] pub(crate) struct MessageHead { /// HTTP version of the message. version: http::Version, /// Subject (request line or status line) of Incoming message. subject: S, /// Headers of the Incoming message. headers: http::HeaderMap, /// Extensions. extensions: http::Extensions, } /// An incoming request message. type RequestHead = MessageHead; /// An incoming response message. type ResponseHead = MessageHead; #[derive(Debug)] pub(crate) enum BodyLength { /// Content-Length Known(u64), /// Transfer-Encoding: chunked (if h1) Unknown, } /// Status of when a Dispatcher future completes. pub(crate) enum Dispatched { /// Dispatcher completely shutdown connection. Shutdown, /// Dispatcher has pending upgrade, and so did not shutdown. Upgrade(crate::client::core::upgrade::Pending), } impl MessageHead { fn into_response(self, body: B) -> http::Response { let mut res = http::Response::new(body); *res.status_mut() = self.subject; *res.headers_mut() = self.headers; *res.version_mut() = self.version; *res.extensions_mut() = self.extensions; res } } ================================================ FILE: src/client/core/rt/bounds.rs ================================================ //! Trait aliases //! //! Traits in this module ease setting bounds and usually automatically //! implemented by implementing another trait. pub use self::h2_client::Http2ClientConnExec; mod h2_client { use std::future::Future; use tokio::io::{AsyncRead, AsyncWrite}; use crate::client::core::{ error::BoxError, proto::http2::client::H2ClientFuture, rt::Executor, }; /// An executor to spawn http2 futures for the client. /// /// This trait is implemented for any type that implements [`Executor`] /// trait for any future. /// /// This trait is sealed and cannot be implemented for types outside this crate. pub trait Http2ClientConnExec: sealed_client::Sealed<(B, T)> where B: http_body::Body, B::Error: Into, T: AsyncRead + AsyncWrite + Unpin, { #[doc(hidden)] fn execute_h2_future(&mut self, future: H2ClientFuture); } impl Http2ClientConnExec for E where E: Executor>, B: http_body::Body + 'static, B::Error: Into, H2ClientFuture: Future, T: AsyncRead + AsyncWrite + Unpin, { #[inline] fn execute_h2_future(&mut self, future: H2ClientFuture) { self.execute(future) } } impl sealed_client::Sealed<(B, T)> for E where E: Executor>, B: http_body::Body + 'static, B::Error: Into, H2ClientFuture: Future, T: AsyncRead + AsyncWrite + Unpin, { } mod sealed_client { pub trait Sealed {} } } ================================================ FILE: src/client/core/rt/timer.rs ================================================ //! Provides a timer trait with timer-like functions use std::{ any::TypeId, future::Future, pin::Pin, sync::Arc, time::{Duration, Instant}, }; /// A timer which provides timer-like functions. pub trait Timer { /// Return a future that resolves in `duration` time. fn sleep(&self, duration: Duration) -> Pin>; /// Return a future that resolves at `deadline`. fn sleep_until(&self, deadline: Instant) -> Pin>; /// Return an `Instant` representing the current time. /// /// The default implementation returns [`Instant::now()`]. fn now(&self) -> Instant { Instant::now() } /// Reset a future to resolve at `new_deadline` instead. fn reset(&self, sleep: &mut Pin>, new_deadline: Instant) { *sleep = self.sleep_until(new_deadline); } } /// A future returned by a `Timer`. pub trait Sleep: Send + Sync + Future { #[doc(hidden)] /// This method is private and can not be implemented by downstream crate fn __type_id(&self, _: private::Sealed) -> TypeId where Self: 'static, { TypeId::of::() } } /// A user-provided timer to time background tasks. #[derive(Clone)] pub enum Time { Timer(Arc), Empty, } // =====impl Sleep ===== impl dyn Sleep { //! This is a re-implementation of downcast methods from std::any::Any /// Check whether the type is the same as `T` pub fn is(&self) -> bool where T: Sleep + 'static, { self.__type_id(private::Sealed {}) == TypeId::of::() } /// Downcast a pinned &mut Sleep object to its original type pub fn downcast_mut_pin(self: Pin<&mut Self>) -> Option> where T: Sleep + 'static, { if self.is::() { #[allow(unsafe_code)] unsafe { let inner = Pin::into_inner_unchecked(self); Some(Pin::new_unchecked( &mut *(&mut *inner as *mut dyn Sleep as *mut T), )) } } else { None } } } // ===== impl Time ===== impl Timer for Time { fn sleep(&self, duration: Duration) -> Pin> { match *self { Time::Empty => { panic!("You must supply a timer.") } Time::Timer(ref t) => t.sleep(duration), } } fn now(&self) -> Instant { match *self { Time::Empty => Instant::now(), Time::Timer(ref t) => t.now(), } } fn sleep_until(&self, deadline: Instant) -> Pin> { match *self { Time::Empty => { panic!("You must supply a timer.") } Time::Timer(ref t) => t.sleep_until(deadline), } } fn reset(&self, sleep: &mut Pin>, new_deadline: Instant) { match *self { Time::Empty => { panic!("You must supply a timer.") } Time::Timer(ref t) => t.reset(sleep, new_deadline), } } } mod private { pub struct Sealed {} } ================================================ FILE: src/client/core/rt/tokio.rs ================================================ //! Tokio IO integration for core. use std::{ future::Future, pin::Pin, task::{Context, Poll}, time::{Duration, Instant}, }; use pin_project_lite::pin_project; use super::{Executor, Sleep, Timer}; /// Future executor that utilises `tokio` threads. #[non_exhaustive] #[derive(Default, Debug, Clone)] pub struct TokioExecutor {} /// A Timer that uses the tokio runtime. #[non_exhaustive] #[derive(Default, Clone, Debug)] pub struct TokioTimer; // Use TokioSleep to get tokio::time::Sleep to implement Unpin. // see https://docs.rs/tokio/latest/tokio/time/struct.Sleep.html pin_project! { #[derive(Debug)] struct TokioSleep { #[pin] inner: tokio::time::Sleep, } } // ===== impl TokioExecutor ===== impl Executor for TokioExecutor where Fut: Future + Send + 'static, Fut::Output: Send + 'static, { fn execute(&self, fut: Fut) { tokio::spawn(fut); } } impl TokioExecutor { /// Create new executor that relies on [`tokio::spawn`] to execute futures. pub fn new() -> Self { Self {} } } // ==== impl TokioTimer ===== impl Timer for TokioTimer { fn sleep(&self, duration: Duration) -> Pin> { Box::pin(TokioSleep { inner: tokio::time::sleep(duration), }) } fn sleep_until(&self, deadline: Instant) -> Pin> { Box::pin(TokioSleep { inner: tokio::time::sleep_until(deadline.into()), }) } fn now(&self) -> Instant { tokio::time::Instant::now().into() } fn reset(&self, sleep: &mut Pin>, new_deadline: Instant) { if let Some(sleep) = sleep.as_mut().downcast_mut_pin::() { sleep.reset(new_deadline) } } } impl TokioTimer { /// Create a new TokioTimer pub fn new() -> Self { Self {} } } // ===== impl TokioSleep ===== impl Future for TokioSleep { type Output = (); #[inline] fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { self.project().inner.poll(cx) } } impl Sleep for TokioSleep {} impl TokioSleep { #[inline] fn reset(self: Pin<&mut Self>, deadline: Instant) { self.project().inner.as_mut().reset(deadline.into()); } } ================================================ FILE: src/client/core/rt.rs ================================================ //! Runtime components //! //! The traits and types within this module are used to allow plugging in //! runtime types. These include: //! //! - Executors //! - Timers //! - IO transports pub mod bounds; mod timer; mod tokio; pub use self::{ timer::{Sleep, Time, Timer}, tokio::{TokioExecutor, TokioTimer}, }; /// An executor of futures. /// /// This trait allows abstract over async runtimes. Implement this trait for your own type. pub trait Executor { /// Place the future into the executor to be run. fn execute(&self, fut: Fut); } ================================================ FILE: src/client/core/upgrade.rs ================================================ //! HTTP Upgrades //! //! This module deals with managing [HTTP Upgrades][mdn] in crate::core:. Since //! several concepts in HTTP allow for first talking HTTP, and then converting //! to a different protocol, this module conflates them into a single API. //! Those include: //! //! - HTTP/1.1 Upgrades //! - HTTP `CONNECT` //! //! You are responsible for any other pre-requisites to establish an upgrade, //! such as sending the appropriate headers, methods, and status codes. You can //! then use [`on`][] to grab a `Future` which will resolve to the upgraded //! connection object, or an error if the upgrade fails. //! //! [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Protocol_upgrade_mechanism //! //! Sending an HTTP upgrade from the client involves setting //! either the appropriate method, if wanting to `CONNECT`, or headers such as //! `Upgrade` and `Connection`, on the `http::Request`. Once receiving the //! `http::Response` back, you must check for the specific information that the //! upgrade is agreed upon by the server (such as a `101` status code), and then //! get the `Future` from the `Response`. use std::{ error::Error as StdError, fmt, future::Future, io, pin::Pin, sync::Arc, task::{Context, Poll}, }; use bytes::Bytes; use tokio::{ io::{AsyncRead, AsyncWrite, ReadBuf}, sync::oneshot, }; use self::rewind::Rewind; use super::{Error, Result}; use crate::sync::Mutex; /// An upgraded HTTP connection. /// /// This type holds a trait object internally of the original IO that /// was used to speak HTTP before the upgrade. It can be used directly /// as a [`AsyncRead`] or [`AsyncWrite`] for convenience. /// /// Alternatively, if the exact type is known, this can be deconstructed /// into its parts. pub struct Upgraded { io: Rewind>, } /// A future for a possible HTTP upgrade. /// /// If no upgrade was available, or it doesn't succeed, yields an `Error`. #[derive(Clone)] pub struct OnUpgrade { rx: Option>>>>, } /// Gets a pending HTTP upgrade from this message. /// /// This can be called on the following types: /// /// - `http::Request` /// - `http::Response` /// - `&mut http::Request` /// - `&mut http::Response` #[inline] pub fn on(msg: T) -> OnUpgrade { msg.on_upgrade() } pub(crate) struct Pending { tx: oneshot::Sender>, } pub(crate) fn pending() -> (Pending, OnUpgrade) { let (tx, rx) = oneshot::channel(); ( Pending { tx }, OnUpgrade { rx: Some(Arc::new(Mutex::new(rx))), }, ) } // ===== impl Upgraded ===== impl Upgraded { #[inline] pub(crate) fn new(io: T, read_buf: Bytes) -> Self where T: AsyncRead + AsyncWrite + Unpin + Send + 'static, { Upgraded { io: Rewind::new_buffered(Box::new(io), read_buf), } } } impl AsyncRead for Upgraded { #[inline] fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll> { Pin::new(&mut self.io).poll_read(cx, buf) } } impl AsyncWrite for Upgraded { #[inline] fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { Pin::new(&mut self.io).poll_write(cx, buf) } #[inline] fn poll_write_vectored( mut self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[io::IoSlice<'_>], ) -> Poll> { Pin::new(&mut self.io).poll_write_vectored(cx, bufs) } #[inline] fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { Pin::new(&mut self.io).poll_flush(cx) } #[inline] fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { Pin::new(&mut self.io).poll_shutdown(cx) } #[inline] fn is_write_vectored(&self) -> bool { self.io.is_write_vectored() } } impl fmt::Debug for Upgraded { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Upgraded").finish() } } // ===== impl OnUpgrade ===== impl OnUpgrade { #[inline] pub(super) fn none() -> Self { OnUpgrade { rx: None } } #[inline] pub(super) fn is_none(&self) -> bool { self.rx.is_none() } } impl Future for OnUpgrade { type Output = Result; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { match self.rx { Some(ref rx) => Pin::new(&mut *rx.lock()).poll(cx).map(|res| match res { Ok(Ok(upgraded)) => Ok(upgraded), Ok(Err(err)) => Err(err), Err(_oneshot_canceled) => Err(Error::new_canceled().with(UpgradeExpected)), }), None => Poll::Ready(Err(Error::new_user_no_upgrade())), } } } // ===== impl Pending ===== impl Pending { #[inline] pub(super) fn fulfill(self, upgraded: Upgraded) { trace!("pending upgrade fulfill"); let _ = self.tx.send(Ok(upgraded)); } /// Don't fulfill the pending Upgrade, but instead signal that /// upgrades are handled manually. #[inline] pub(super) fn manual(self) { trace!("pending upgrade handled manually"); let _ = self.tx.send(Err(Error::new_user_manual_upgrade())); } } // ===== impl UpgradeExpected ===== /// Error cause returned when an upgrade was expected but canceled /// for whatever reason. /// /// This likely means the actual `Conn` future wasn't polled and upgraded. #[derive(Debug)] struct UpgradeExpected; impl fmt::Display for UpgradeExpected { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str("upgrade expected but not completed") } } impl StdError for UpgradeExpected {} // ===== impl Io ===== trait Io: AsyncRead + AsyncWrite + Unpin + 'static {} impl Io for T {} mod sealed { use super::OnUpgrade; pub trait CanUpgrade { fn on_upgrade(self) -> OnUpgrade; } impl CanUpgrade for http::Request { fn on_upgrade(mut self) -> OnUpgrade { self.extensions_mut() .remove::() .unwrap_or_else(OnUpgrade::none) } } impl CanUpgrade for &'_ mut http::Request { fn on_upgrade(self) -> OnUpgrade { self.extensions_mut() .remove::() .unwrap_or_else(OnUpgrade::none) } } impl CanUpgrade for http::Response { fn on_upgrade(mut self) -> OnUpgrade { self.extensions_mut() .remove::() .unwrap_or_else(OnUpgrade::none) } } impl CanUpgrade for &'_ mut http::Response { fn on_upgrade(self) -> OnUpgrade { self.extensions_mut() .remove::() .unwrap_or_else(OnUpgrade::none) } } } mod rewind { use std::{ cmp, io, pin::Pin, task::{Context, Poll}, }; use bytes::{Buf, Bytes}; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; /// Combine a buffer with an IO, rewinding reads to use the buffer. #[derive(Debug)] pub(crate) struct Rewind { pre: Option, inner: T, } impl Rewind { #[inline] pub(crate) fn new_buffered(io: T, buf: Bytes) -> Self { Rewind { pre: Some(buf), inner: io, } } #[cfg(test)] pub(crate) fn rewind(&mut self, bs: Bytes) { debug_assert!(self.pre.is_none()); self.pre = Some(bs); } } impl AsyncRead for Rewind where T: AsyncRead + Unpin, { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll> { if let Some(mut prefix) = self.pre.take() { // If there are no remaining bytes, let the bytes get dropped. if !prefix.is_empty() { let copy_len = cmp::min(prefix.len(), buf.remaining()); // TODO: There should be a way to do following two lines cleaner... buf.put_slice(&prefix[..copy_len]); prefix.advance(copy_len); // Put back what's left if !prefix.is_empty() { self.pre = Some(prefix); } return Poll::Ready(Ok(())); } } Pin::new(&mut self.inner).poll_read(cx, buf) } } impl AsyncWrite for Rewind where T: AsyncWrite + Unpin, { #[inline] fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { Pin::new(&mut self.inner).poll_write(cx, buf) } #[inline] fn poll_write_vectored( mut self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[io::IoSlice<'_>], ) -> Poll> { Pin::new(&mut self.inner).poll_write_vectored(cx, bufs) } #[inline] fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { Pin::new(&mut self.inner).poll_flush(cx) } #[inline] fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { Pin::new(&mut self.inner).poll_shutdown(cx) } #[inline] fn is_write_vectored(&self) -> bool { self.inner.is_write_vectored() } } #[cfg(test)] mod tests { use bytes::Bytes; use tokio::io::AsyncReadExt; use super::Rewind; #[tokio::test] async fn partial_rewind() { let underlying = [104, 101, 108, 108, 111]; let mock = tokio_test::io::Builder::new().read(&underlying).build(); let mut stream = Rewind::new_buffered(mock, Bytes::new()); // Read off some bytes, ensure we filled o1 let mut buf = [0; 2]; stream.read_exact(&mut buf).await.expect("read1"); // Rewind the stream so that it is as if we never read in the first place. stream.rewind(Bytes::copy_from_slice(&buf[..])); let mut buf = [0; 5]; stream.read_exact(&mut buf).await.expect("read1"); // At this point we should have read everything that was in the MockStream assert_eq!(&buf, &underlying); } #[tokio::test] async fn full_rewind() { let underlying = [104, 101, 108, 108, 111]; let mock = tokio_test::io::Builder::new().read(&underlying).build(); let mut stream = Rewind::new_buffered(mock, Bytes::new()); let mut buf = [0; 5]; stream.read_exact(&mut buf).await.expect("read1"); // Rewind the stream so that it is as if we never read in the first place. stream.rewind(Bytes::copy_from_slice(&buf[..])); let mut buf = [0; 5]; stream.read_exact(&mut buf).await.expect("read1"); assert_eq!(&buf, &underlying); } } } ================================================ FILE: src/client/core.rs ================================================ //! Core HTTP client protocol and utilities. //! //! Much of this codebase is adapted and refined from [hyper](https://github.com/hyperium/hyper), //! aiming to match its performance and reliability for asynchronous HTTP/1 and HTTP/2. mod error; mod proto; pub mod body; pub mod conn; pub mod dispatch; pub mod rt; pub mod upgrade; pub use self::{ error::{Error, Result}, proto::{http1, http2}, }; ================================================ FILE: src/client/emulate.rs ================================================ use http::HeaderMap; use super::{ core::{http1::Http1Options, http2::Http2Options}, group::Group, }; use crate::{header::OrigHeaderMap, tls::TlsOptions}; /// Converts a value into an [`Emulation`] configuration. /// /// This trait lets multiple input types provide a unified way to produce /// an emulation profile. Typical inputs include: /// - Predefined browser profiles /// - Transport option sets (e.g. HTTP/1, HTTP/2, TLS) /// - User-defined strategy types pub trait IntoEmulation { /// Converts `self` into an [`Emulation`] configuration. fn into_emulation(self) -> Emulation; } /// Builder for creating an [`Emulation`] configuration. #[must_use] #[derive(Debug)] pub struct EmulationBuilder { emulation: Emulation, } /// HTTP emulation settings for a client profile. /// /// Combines protocol options (HTTP/1, HTTP/2, TLS) and default headers. #[non_exhaustive] #[derive(Debug, Clone)] pub struct Emulation { pub(crate) group: Group, /// Default headers applied to outgoing requests. pub headers: HeaderMap, /// Original headers with preserved case and duplicates. pub orig_headers: OrigHeaderMap, /// TLS configuration. pub tls_options: Option, /// HTTP/1 configuration. pub http1_options: Option, /// HTTP/2 configuration. pub http2_options: Option, } // ==== impl EmulationBuilder ==== impl EmulationBuilder { /// Sets the HTTP/1 options configuration. #[inline] pub fn http1_options(mut self, opts: Http1Options) -> Self { self.emulation.http1_options = Some(opts); self } /// Sets the HTTP/2 options configuration. #[inline] pub fn http2_options(mut self, opts: Http2Options) -> Self { self.emulation.http2_options = Some(opts); self } /// Sets the TLS options configuration. #[inline] pub fn tls_options(mut self, opts: TlsOptions) -> Self { self.emulation.tls_options = Some(opts); self } /// Sets the default headers. #[inline] pub fn headers(mut self, src: HeaderMap) -> Self { crate::util::replace_headers(&mut self.emulation.headers, src); self } /// Sets the original headers. #[inline] pub fn orig_headers(mut self, src: OrigHeaderMap) -> Self { self.emulation.orig_headers.extend(src); self } /// Builds the [`Emulation`] instance. #[inline] pub fn build(mut self, group: Group) -> Emulation { self.emulation.group.emulate(group); self.emulation } } // ==== impl Emulation ==== impl Emulation { /// Creates a new [`EmulationBuilder`]. #[inline] pub fn builder() -> EmulationBuilder { EmulationBuilder { emulation: Emulation { group: Group::default(), headers: HeaderMap::new(), orig_headers: OrigHeaderMap::new(), tls_options: None, http1_options: None, http2_options: None, }, } } } impl> IntoEmulation for T { #[inline] fn into_emulation(self) -> Emulation { self.into() } } ================================================ FILE: src/client/future.rs ================================================ use std::{ pin::Pin, task::{Context, Poll, ready}, }; use http::{Request, Uri}; use pin_project_lite::pin_project; use tower::util::{Either, Oneshot}; use super::{Body, BoxedClientService, ClientService, Error, Response}; pin_project! { /// [`Pending`] is a future representing the state of an HTTP request, which may be either /// an in-flight request (with its associated future and URI) or an error state. /// Used to drive the HTTP request to completion or report an error. #[project = PendingProj] pub enum Pending { Request { uri: Option, fut: Pin, Request>>>, }, Error { error: Option, }, } } impl Future for Pending { type Output = Result; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let (uri, res) = match self.project() { PendingProj::Request { uri, fut } => (uri, fut.as_mut().poll(cx)), PendingProj::Error { error } => { let err = error .take() .expect("Pending::Error polled after completion"); return Poll::Ready(Err(err)); } }; let res = ready!(res); let uri = uri .take() .expect("Pending::Request polled after completion"); let res = match res { Ok(res) => Ok(Response::new(res, uri)), Err(err) => { let mut err = err .downcast::() .map_or_else(Error::request, |err| *err); if err.uri().is_none() { err = err.with_uri(uri); } Err(err) } }; Poll::Ready(res) } } #[cfg(test)] mod test { #[test] fn test_future_size() { let s = std::mem::size_of::(); assert!(s <= 360, "size_of::() == {s}, too big"); } #[tokio::test] async fn error_has_url() { let u = "http://does.not.exist.local/ever"; let err = crate::Client::new().get(u).send().await.unwrap_err(); assert_eq!(err.uri().unwrap(), u, "{err:?}"); } } ================================================ FILE: src/client/group.rs ================================================ //! # Request Grouping Mechanism //! //! This module provides the [`Group`] structure, which defines the logical boundaries //! for categorizing and segregating outbound requests. //! //! ## Concept //! A `Group` acts as a multi-dimensional identity for a request. In complex networking //! stack environments, two requests targeting the same destination may belong to //! distinct logical groups due to different metadata, security contexts, or //! routing requirements. //! //! ## Logical Segregation //! By assigning requests to different groups, the system ensures: //! 1. **Contextual Isolation**: Requests are processed and dispatched within their defined logical //! partitions. //! 2. **Deterministic Identity**: The internal `BTreeMap` ensures that the identity of a group is //! stable and invariant to the order in which grouping criteria are applied. //! 3. **Resource Affinity**: Resource management (such as connection pooling) respects these //! boundaries, ensuring that resources are never leaked across different request groups. use std::{borrow::Cow, collections::BTreeMap, hash::Hash}; use http::{Uri, Version}; use crate::{client::SocketBindOptions, proxy::Matcher}; macro_rules! impl_group_variants { ($($name:ident $(($ty:ty))?,)*) => { /// Unique discriminator for request grouping dimensions. #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, PartialOrd, Ord)] enum GroupId { $($name,)* } /// Data container for specific grouping criteria. #[derive(Debug, Clone, Hash, PartialEq, Eq)] enum GroupPart { $($name $(($ty))?,)* } } } impl_group_variants! { Request(Group), Emulate(Group), Named(Cow<'static, str>), Number(u64), Uri(Uri), Version(Version), Proxy(Matcher), SocketBind(Option), } /// A logical identifier for request grouping. /// /// `Group` encapsulates the criteria that define a request's execution context. /// Requests with non-identical `Group` states are treated as belonging to /// different logical partitions, preventing unintended interaction or /// resource sharing between them. #[derive(Debug, Default, Clone, Hash, PartialEq, Eq)] pub struct Group(BTreeMap); impl Group { /// Creates a new [`Group`] with a custom name. #[inline] pub fn named>>(name: N) -> Self { Group(BTreeMap::from([( GroupId::Named, GroupPart::Named(name.into()), )])) } /// Creates a new [`Group`] with a numeric identifier. pub fn number>(value: V) -> Self { Group(BTreeMap::from([( GroupId::Number, GroupPart::Number(value.into()), )])) } /// Groups the request by a specific target [`Uri`]. #[inline] pub(crate) fn uri(&mut self, uri: Uri) -> &mut Self { self.extend(GroupId::Uri, GroupPart::Uri(uri)) } /// Groups the request by its required HTTP [`Version`]. #[inline] pub(crate) fn version(&mut self, version: Option) -> &mut Self { self.extend(GroupId::Version, version.map(GroupPart::Version)) } /// Groups the request based on its proxy [`Matcher`] criteria. #[inline] pub(crate) fn proxy(&mut self, proxy: Option) -> &mut Self { self.extend(GroupId::Proxy, proxy.map(GroupPart::Proxy)) } /// Groups the request by its resolved socket bind options. #[inline] pub(crate) fn socket_bind(&mut self, opts: Option) -> &mut Self { self.extend(GroupId::SocketBind, GroupPart::SocketBind(opts)) } /// Creates a nested request group. #[inline] pub(crate) fn request(&mut self, group: Group) -> &mut Self { self.extend(GroupId::Request, GroupPart::Request(group)) } /// Groups the request by its emulation-layer characteristics. #[inline] pub(crate) fn emulate(&mut self, group: Group) -> &mut Self { self.extend(GroupId::Emulate, GroupPart::Emulate(group)) } #[inline] fn extend>>(&mut self, id: GroupId, entry: T) -> &mut Self { if let Some(entry) = entry.into() { self.0.insert(id, entry); } self } } impl From for Group { #[inline] fn from(value: u64) -> Self { Group::number(value) } } impl From<&'static str> for Group { #[inline] fn from(value: &'static str) -> Self { Group::named(value) } } impl From for Group { #[inline] fn from(value: String) -> Self { Group::named(value) } } impl From> for Group { #[inline] fn from(value: Cow<'static, str>) -> Self { Group::named(value) } } #[cfg(test)] mod tests { use std::hash::{DefaultHasher, Hash, Hasher}; use super::*; #[test] fn test_group_identity_invariance() { let mut g1 = Group::default(); g1.extend(GroupId::Number, GroupPart::Number(42)); g1.extend(GroupId::Named, GroupPart::Named("worker".into())); let mut g2 = Group::default(); g2.extend(GroupId::Named, GroupPart::Named("worker".into())); g2.extend(GroupId::Number, GroupPart::Number(42)); let mut h1 = DefaultHasher::new(); g1.hash(&mut h1); let mut h2 = DefaultHasher::new(); g2.hash(&mut h2); assert_eq!( h1.finish(), h2.finish(), "Request groups must maintain identical hashes regardless of criteria insertion order" ); } } ================================================ FILE: src/client/layer/client/exec.rs ================================================ use std::{future::Future, pin::Pin, sync::Arc}; use crate::client::core::rt::Executor; pub(crate) type BoxSendFuture = Pin + Send>>; // Either the user provides an executor for background tasks, or we use `tokio::spawn`. #[derive(Clone)] pub struct Exec(Arc + Send + Sync>); // ===== impl Exec ===== impl Exec { pub(super) fn new(inner: E) -> Self where E: Executor + Send + Sync + 'static, { Exec(Arc::new(inner)) } } impl Executor for Exec where F: Future + Send + 'static, { fn execute(&self, fut: F) { self.0.execute(Box::pin(fut)); } } ================================================ FILE: src/client/layer/client/lazy.rs ================================================ use std::{ future::Future, pin::Pin, task::{self, Poll}, }; use pin_project_lite::pin_project; pub(crate) trait Started: Future { fn started(&self) -> bool; } pub(crate) fn lazy(func: F) -> Lazy where F: FnOnce() -> R, R: Future + Unpin, { Lazy { inner: Inner::Init { func }, } } // FIXME: allow() required due to `impl Trait` leaking types to this lint pin_project! { pub(crate) struct Lazy { #[pin] inner: Inner, } } pin_project! { #[project = InnerProj] #[project_replace = InnerProjReplace] enum Inner { Init { func: F }, Fut { #[pin] fut: R }, Empty, } } impl Started for Lazy where F: FnOnce() -> R, R: Future, { fn started(&self) -> bool { match self.inner { Inner::Init { .. } => false, Inner::Fut { .. } | Inner::Empty => true, } } } impl Future for Lazy where F: FnOnce() -> R, R: Future, { type Output = R::Output; fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { let mut this = self.project(); if let InnerProj::Fut { fut } = this.inner.as_mut().project() { return fut.poll(cx); } match this.inner.as_mut().project_replace(Inner::Empty) { InnerProjReplace::Init { func } => { this.inner.set(Inner::Fut { fut: func() }); if let InnerProj::Fut { fut } = this.inner.project() { return fut.poll(cx); } unreachable!() } _ => unreachable!("lazy state wrong"), } } } ================================================ FILE: src/client/layer/client/pool.rs ================================================ use std::{ collections::{HashMap, HashSet, VecDeque}, convert::Infallible, error::Error as StdError, fmt::{self, Debug}, future::Future, hash::Hash, num::NonZeroUsize, ops::{Deref, DerefMut}, pin::Pin, sync::{Arc, Weak}, task::{self, Poll, ready}, time::{Duration, Instant}, }; use lru::LruCache; use tokio::sync::oneshot; use super::exec::{self, Exec}; use crate::{ client::core::rt::{Executor, Time, Timer}, sync::Mutex, }; pub struct Pool { // If the pool is disabled, this is None. inner: Option>>>, } // Before using a pooled connection, make sure the sender is not dead. // // This is a trait to allow the `client::pool::tests` to work for `i32`. // // See https://github.com/hyperium/hyper/issues/1429 pub trait Poolable: Unpin + Send + Sized + 'static { fn is_open(&self) -> bool; /// Reserve this connection. /// /// Allows for HTTP/2 to return a shared reservation. fn reserve(self) -> Reservation; fn can_share(&self) -> bool; } pub trait Key: Eq + Hash + Clone + Debug + Unpin + Send + 'static {} impl Key for T where T: Eq + Hash + Clone + Debug + Unpin + Send + 'static {} /// A marker to identify what version a pooled connection is. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] #[repr(u8)] pub enum Ver { Auto, Http2, } /// When checking out a pooled connection, it might be that the connection /// only supports a single reservation, or it might be usable for many. /// /// Specifically, HTTP/1 requires a unique reservation, but HTTP/2 can be /// used for multiple requests. // FIXME: allow() required due to `impl Trait` leaking types to this lint pub enum Reservation { /// This connection could be used multiple times, the first one will be /// reinserted into the `idle` pool, and the second will be given to /// the `Checkout`. Shared(T, T), /// This connection requires unique access. It will be returned after /// use is complete. Unique(T), } /// Simple type alias in case the key type needs to be adjusted. // pub type Key = (http::uri::Scheme, http::uri::Authority); //Arc; struct PoolInner { // A flag that a connection is being established, and the connection // should be shared. This prevents making multiple HTTP/2 connections // to the same host. connecting: HashSet, // These are internal Conns sitting in the event loop in the KeepAlive // state, waiting to receive a new Request to send on the socket. idle: LruCache>>, max_idle_per_host: usize, // These are outstanding Checkouts that are waiting for a socket to be // able to send a Request one. This is used when "racing" for a new // connection. // // The Client starts 2 tasks, 1 to connect a new socket, and 1 to wait // for the Pool to receive an idle Conn. When a Conn becomes idle, // this list is checked for any parked Checkouts, and tries to notify // them that the Conn could be used instead of waiting for a brand new // connection. waiters: HashMap>>, // A oneshot channel is used to allow the interval to be notified when // the Pool completely drops. That way, the interval can cancel immediately. idle_interval_ref: Option>, exec: Exec, timer: Time, timeout: Option, } // This is because `Weak::new()` *allocates* space for `T`, even if it // doesn't need it! struct WeakOpt(Option>); #[derive(Clone, Copy, Debug)] pub struct Config { pub idle_timeout: Option, pub max_idle_per_host: usize, pub max_pool_size: Option, } impl Config { pub fn is_enabled(&self) -> bool { self.max_idle_per_host > 0 } } impl Pool { pub fn new(config: Config, executor: E, timer: Time) -> Pool where E: Executor + Send + Sync + Clone + 'static, { let inner = if config.is_enabled() { Some(Arc::new(Mutex::new(PoolInner { connecting: HashSet::default(), idle: config .max_pool_size .map_or_else(LruCache::unbounded, LruCache::new), idle_interval_ref: None, max_idle_per_host: config.max_idle_per_host, waiters: HashMap::default(), exec: Exec::new(executor), timer, timeout: config.idle_timeout, }))) } else { None }; Pool { inner } } pub(crate) fn is_enabled(&self) -> bool { self.inner.is_some() } } impl Pool { /// Returns a `Checkout` which is a future that resolves if an idle /// connection becomes available. pub fn checkout(&self, key: K) -> Checkout { Checkout { key, pool: self.clone(), waiter: None, } } /// Ensure that there is only ever 1 connecting task for HTTP/2 /// connections. This does nothing for HTTP/1. pub fn connecting(&self, key: K, ver: Ver) -> Option> { if ver == Ver::Http2 { if let Some(ref enabled) = self.inner { let mut inner = enabled.lock(); return if inner.connecting.insert(key.clone()) { let connecting = Connecting { key, pool: WeakOpt::downgrade(enabled), }; Some(connecting) } else { trace!("HTTP/2 connecting already in progress for {:?}", key); None }; } } // else Some(Connecting { key, // in HTTP/1's case, there is never a lock, so we don't // need to do anything in Drop. pool: WeakOpt::none(), }) } pub fn pooled(&self, mut connecting: Connecting, value: T) -> Pooled { let (value, pool_ref) = if let Some(ref enabled) = self.inner { match value.reserve() { Reservation::Shared(to_insert, to_return) => { let mut inner = enabled.lock(); inner.put(&connecting.key, to_insert, enabled); // Do this here instead of Drop for Connecting because we // already have a lock, no need to lock the mutex twice. inner.connected(&connecting.key); drop(inner); // prevent the Drop of Connecting from repeating inner.connected() connecting.pool = WeakOpt::none(); // Shared reservations don't need a reference to the pool, // since the pool always keeps a copy. (to_return, WeakOpt::none()) } Reservation::Unique(value) => { // Unique reservations must take a reference to the pool // since they hope to reinsert once the reservation is // completed (value, WeakOpt::downgrade(enabled)) } } } else { // If pool is not enabled, skip all the things... // The Connecting should have had no pool ref debug_assert!(connecting.pool.upgrade().is_none()); (value, WeakOpt::none()) }; Pooled { key: connecting.key.clone(), is_reused: false, pool: pool_ref, value: Some(value), } } fn reuse(&self, key: &K, value: T) -> Pooled { debug!("reuse idle connection for {:?}", key); // TODO: unhack this // In Pool::pooled(), which is used for inserting brand new connections, // there's some code that adjusts the pool reference taken depending // on if the Reservation can be shared or is unique. By the time // reuse() is called, the reservation has already been made, and // we just have the final value, without knowledge of if this is // unique or shared. So, the hack is to just assume Ver::Http2 means // shared... :( let mut pool_ref = WeakOpt::none(); if !value.can_share() { if let Some(ref enabled) = self.inner { pool_ref = WeakOpt::downgrade(enabled); } } Pooled { is_reused: true, key: key.clone(), pool: pool_ref, value: Some(value), } } } /// Pop off this list, looking for a usable connection that hasn't expired. struct IdlePopper<'a, T, K> { #[allow(dead_code)] key: &'a K, list: &'a mut Vec>, } impl<'a, T: Poolable + 'a, K: Debug> IdlePopper<'a, T, K> { fn pop(self, expiration: &Expiration, now: Instant) -> Option> { while let Some(entry) = self.list.pop() { // If the connection has been closed, or is older than our idle // timeout, simply drop it and keep looking... if !entry.value.is_open() { trace!("removing closed connection for {:?}", self.key); continue; } // TODO: Actually, since the `idle` list is pushed to the end always, // that would imply that if *this* entry is expired, then anything // "earlier" in the list would *have* to be expired also... Right? // // In that case, we could just break out of the loop and drop the // whole list... if expiration.expires(entry.idle_at, now) { trace!("removing expired connection for {:?}", self.key); continue; } let value = match entry.value.reserve() { Reservation::Shared(to_reinsert, to_checkout) => { self.list.push(Idle { idle_at: now, value: to_reinsert, }); to_checkout } Reservation::Unique(unique) => unique, }; return Some(Idle { idle_at: entry.idle_at, value, }); } None } } impl PoolInner { fn now(&self) -> Instant { self.timer.now() } fn put(&mut self, key: &K, value: T, __pool_ref: &Arc>>) { if value.can_share() && self.idle.peek(key).is_some() { trace!("put; existing idle HTTP/2 connection for {:?}", key); return; } trace!("put; add idle connection for {:?}", key); let mut remove_waiters = false; let mut value = Some(value); if let Some(waiters) = self.waiters.get_mut(key) { while let Some(tx) = waiters.pop_front() { if !tx.is_closed() { let reserved = value.take().expect("value already sent"); let reserved = match reserved.reserve() { Reservation::Shared(to_keep, to_send) => { value = Some(to_keep); to_send } Reservation::Unique(uniq) => uniq, }; match tx.send(reserved) { Ok(()) => { if value.is_none() { break; } else { continue; } } Err(e) => { value = Some(e); } } } trace!("put; removing canceled waiter for {:?}", key); } remove_waiters = waiters.is_empty(); } if remove_waiters { self.waiters.remove(key); } if let Some(value) = value { // borrow-check scope... { let now = self.now(); let idle_list = self .idle .get_or_insert_mut(key.clone(), Vec::>::default); if self.max_idle_per_host <= idle_list.len() { trace!("max idle per host for {:?}, dropping connection", key); return; } debug!("pooling idle connection for {:?}", key); idle_list.push(Idle { value, idle_at: now, }); } self.spawn_idle_interval(__pool_ref); } else { trace!("put; found waiter for {:?}", key) } } /// A `Connecting` task is complete. Not necessarily successfully, /// but the lock is going away, so clean up. fn connected(&mut self, key: &K) { let existed = self.connecting.remove(key); debug_assert!(existed, "Connecting dropped, key not in pool.connecting"); // cancel any waiters. if there are any, it's because // this Connecting task didn't complete successfully. // those waiters would never receive a connection. self.waiters.remove(key); } fn spawn_idle_interval(&mut self, pool_ref: &Arc>>) { if self.idle_interval_ref.is_some() { return; } let dur = if let Some(dur) = self.timeout { dur } else { return; }; if dur == Duration::ZERO { return; } if matches!(self.timer, Time::Empty) { return; } // While someone might want a shorter duration, and it will be respected // at checkout time, there's no need to wake up and proactively evict // faster than this. // // The value of 90ms was chosen as a balance between responsiveness and // efficiency. A shorter interval could lead to unnecessary wake-ups and // increased CPU usage, while a longer interval might delay the eviction // of idle connections. This value has been empirically determined to // work well in typical use cases. const MIN_CHECK: Duration = Duration::from_millis(90); let dur = dur.max(MIN_CHECK); let (tx, rx) = oneshot::channel(); self.idle_interval_ref = Some(tx); let interval = IdleTask { timer: self.timer.clone(), duration: dur, pool: WeakOpt::downgrade(pool_ref), pool_drop_notifier: rx, }; self.exec.execute(interval.run()); } } impl PoolInner { /// Any `FutureResponse`s that were created will have made a `Checkout`, /// and possibly inserted into the pool that it is waiting for an idle /// connection. If a user ever dropped that future, we need to clean out /// those parked senders. fn clean_waiters(&mut self, key: &K) { let mut remove_waiters = false; if let Some(waiters) = self.waiters.get_mut(key) { waiters.retain(|tx| !tx.is_closed()); remove_waiters = waiters.is_empty(); } if remove_waiters { self.waiters.remove(key); } } } impl PoolInner { /// This should *only* be called by the IdleTask fn clear_expired(&mut self) { let dur = self.timeout.expect("interval assumes timeout"); let now = self.now(); let mut keys_to_remove = Vec::new(); for (key, values) in self.idle.iter_mut() { values.retain(|entry| { if !entry.value.is_open() { trace!("idle interval evicting closed for {:?}", key); return false; } // Avoid `Instant::sub` to avoid issues like rust-lang/rust#86470. if now.saturating_duration_since(entry.idle_at) > dur { trace!("idle interval evicting expired for {:?}", key); return false; } // Otherwise, keep this value... true }); // If the list is empty, remove the key. if values.is_empty() { keys_to_remove.push(key.clone()); } } for key in keys_to_remove { trace!("idle interval removing empty key {:?}", key); self.idle.pop(&key); } } } impl Clone for Pool { fn clone(&self) -> Pool { Pool { inner: self.inner.clone(), } } } /// A wrapped poolable value that tries to reinsert to the Pool on Drop. // Note: The bounds `T: Poolable` is needed for the Drop impl. pub struct Pooled { value: Option, is_reused: bool, key: K, pool: WeakOpt>>, } impl Pooled { pub fn is_reused(&self) -> bool { self.is_reused } pub fn is_pool_enabled(&self) -> bool { self.pool.0.is_some() } fn as_ref(&self) -> &T { self.value.as_ref().expect("not dropped") } fn as_mut(&mut self) -> &mut T { self.value.as_mut().expect("not dropped") } } impl Deref for Pooled { type Target = T; fn deref(&self) -> &T { self.as_ref() } } impl DerefMut for Pooled { fn deref_mut(&mut self) -> &mut T { self.as_mut() } } impl Drop for Pooled { fn drop(&mut self) { if let Some(value) = self.value.take() { if !value.is_open() { // If we *already* know the connection is done here, // it shouldn't be re-inserted back into the pool. return; } if let Some(pool) = self.pool.upgrade() { let mut inner = pool.lock(); inner.put(&self.key, value, &pool); } else if !value.can_share() { trace!("pool dropped, dropping pooled ({:?})", self.key); } // Ver::Http2 is already in the Pool (or dead), so we wouldn't // have an actual reference to the Pool. } } } impl Debug for Pooled { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Pooled").field("key", &self.key).finish() } } struct Idle { idle_at: Instant, value: T, } pub struct Checkout { key: K, pool: Pool, waiter: Option>, } #[derive(Debug)] #[non_exhaustive] pub enum Error { PoolDisabled, CheckoutNoLongerWanted, CheckedOutClosedValue, } impl Error { pub(super) fn is_canceled(&self) -> bool { matches!(self, Error::CheckedOutClosedValue) } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(match self { Error::PoolDisabled => "pool is disabled", Error::CheckedOutClosedValue => "checked out connection was closed", Error::CheckoutNoLongerWanted => "request was canceled", }) } } impl StdError for Error {} impl Checkout { fn poll_waiter( &mut self, cx: &mut task::Context<'_>, ) -> Poll, Error>>> { if let Some(mut rx) = self.waiter.take() { match Pin::new(&mut rx).poll(cx) { Poll::Ready(Ok(value)) => { if value.is_open() { Poll::Ready(Some(Ok(self.pool.reuse(&self.key, value)))) } else { Poll::Ready(Some(Err(Error::CheckedOutClosedValue))) } } Poll::Pending => { self.waiter = Some(rx); Poll::Pending } Poll::Ready(Err(_canceled)) => { Poll::Ready(Some(Err(Error::CheckoutNoLongerWanted))) } } } else { Poll::Ready(None) } } fn checkout(&mut self, cx: &mut task::Context<'_>) -> Option> { let entry = { let mut inner = self.pool.inner.as_ref()?.lock(); let expiration = Expiration::new(inner.timeout); let now = inner.now(); let maybe_entry = inner.idle.get_mut(&self.key).and_then(|list| { trace!("take? {:?}: expiration = {:?}", self.key, expiration.0); // A block to end the mutable borrow on list, // so the map below can check is_empty() { let popper = IdlePopper { key: &self.key, list, }; popper.pop(&expiration, now) } .map(|e| (e, list.is_empty())) }); let (entry, empty) = if let Some((e, empty)) = maybe_entry { (Some(e), empty) } else { // No entry found means nuke the list for sure. (None, true) }; if empty { inner.idle.pop(&self.key); } if entry.is_none() && self.waiter.is_none() { let (tx, mut rx) = oneshot::channel(); trace!("checkout waiting for idle connection: {:?}", self.key); inner .waiters .entry(self.key.clone()) .or_insert_with(VecDeque::new) .push_back(tx); // register the waker with this oneshot assert!(Pin::new(&mut rx).poll(cx).is_pending()); self.waiter = Some(rx); } entry }; entry.map(|e| self.pool.reuse(&self.key, e.value)) } } impl Future for Checkout { type Output = Result, Error>; fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { if let Some(pooled) = ready!(self.poll_waiter(cx)?) { return Poll::Ready(Ok(pooled)); } if let Some(pooled) = self.checkout(cx) { Poll::Ready(Ok(pooled)) } else if !self.pool.is_enabled() { Poll::Ready(Err(Error::PoolDisabled)) } else { // There's a new waiter, already registered in self.checkout() debug_assert!(self.waiter.is_some()); Poll::Pending } } } impl Drop for Checkout { fn drop(&mut self) { if self.waiter.take().is_some() { trace!("checkout dropped for {:?}", self.key); if let Some(mut inner) = self.pool.inner.as_ref().map(|i| i.lock()) { inner.clean_waiters(&self.key); } } } } pub struct Connecting { key: K, pool: WeakOpt>>, } impl Connecting { pub fn alpn_h2(self, pool: &Pool) -> Option { debug_assert!( self.pool.0.is_none(), "Connecting::alpn_h2 but already Http2" ); pool.connecting(self.key.clone(), Ver::Http2) } } impl Drop for Connecting { fn drop(&mut self) { if let Some(pool) = self.pool.upgrade() { // No need to panic on drop, that could abort! let mut inner = pool.lock(); inner.connected(&self.key); } } } struct Expiration(Option); impl Expiration { fn new(dur: Option) -> Expiration { Expiration(dur) } fn expires(&self, instant: Instant, now: Instant) -> bool { match self.0 { // Avoid `Instant::elapsed` to avoid issues like rust-lang/rust#86470. Some(timeout) => now.saturating_duration_since(instant) > timeout, None => false, } } } struct IdleTask { timer: Time, duration: Duration, pool: WeakOpt>>, // This allows the IdleTask to be notified as soon as the entire // Pool is fully dropped, and shutdown. This channel is never sent on, // but Err(Canceled) will be received when the Pool is dropped. pool_drop_notifier: oneshot::Receiver, } impl IdleTask { async fn run(self) { use futures_util::future; let mut sleep = self.timer.sleep_until(self.timer.now() + self.duration); let mut on_pool_drop = self.pool_drop_notifier; loop { match future::select(&mut on_pool_drop, &mut sleep).await { future::Either::Left(_) => { // pool dropped, bah-bye break; } future::Either::Right(((), _)) => { if let Some(inner) = self.pool.upgrade() { let mut inner = inner.lock(); trace!("idle interval checking for expired"); inner.clear_expired(); drop(inner); } let deadline = self.timer.now() + self.duration; self.timer.reset(&mut sleep, deadline); } } } trace!("pool closed, canceling idle interval"); } } impl WeakOpt { fn none() -> Self { WeakOpt(None) } fn downgrade(arc: &Arc) -> Self { WeakOpt(Some(Arc::downgrade(arc))) } fn upgrade(&self) -> Option> { self.0.as_ref().and_then(Weak::upgrade) } } #[cfg(test)] mod tests { use std::{ fmt::Debug, future::Future, hash::Hash, num::NonZero, pin::Pin, sync::Arc, task::{self, Poll}, time::Duration, }; use super::{Connecting, Key, Pool, Poolable, Reservation, WeakOpt}; use crate::{ client::core::rt::{Time, TokioExecutor, TokioTimer}, sync::MutexGuard, }; #[derive(Clone, Debug, PartialEq, Eq, Hash)] struct KeyImpl(http::uri::Scheme, http::uri::Authority); /// Test unique reservations. #[derive(Debug, PartialEq, Eq)] struct Uniq(T); impl Poolable for Uniq { fn is_open(&self) -> bool { true } fn reserve(self) -> Reservation { Reservation::Unique(self) } fn can_share(&self) -> bool { false } } fn c(key: K) -> Connecting { Connecting { key, pool: WeakOpt::none(), } } fn host_key(s: &str) -> KeyImpl { KeyImpl(http::uri::Scheme::HTTP, s.parse().expect("host key")) } fn pool_no_timer() -> Pool { pool_max_idle_no_timer(usize::MAX) } fn pool_max_idle_no_timer(max_idle: usize) -> Pool { Pool::new( super::Config { idle_timeout: Some(Duration::from_millis(100)), max_idle_per_host: max_idle, max_pool_size: None, }, TokioExecutor::new(), Time::Empty, ) } impl Pool { fn locked(&self) -> MutexGuard<'_, super::PoolInner> { self.inner.as_ref().expect("enabled").lock() } } #[tokio::test] async fn test_pool_checkout_smoke() { let pool = pool_no_timer(); let key = host_key("foo"); let pooled = pool.pooled(c(key.clone()), Uniq(41)); drop(pooled); match pool.checkout(key).await { Ok(pooled) => assert_eq!(*pooled, Uniq(41)), Err(_) => panic!("not ready"), }; } /// Helper to check if the future is ready after polling once. struct PollOnce<'a, F>(&'a mut F); impl Future for PollOnce<'_, F> where F: Future> + Unpin, { type Output = Option<()>; fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { match Pin::new(&mut self.0).poll(cx) { Poll::Ready(Ok(_)) => Poll::Ready(Some(())), Poll::Ready(Err(_)) => Poll::Ready(Some(())), Poll::Pending => Poll::Ready(None), } } } #[tokio::test] async fn test_pool_checkout_returns_none_if_expired() { let pool = pool_no_timer(); let key = host_key("foo"); let pooled = pool.pooled(c(key.clone()), Uniq(41)); drop(pooled); let timeout = pool.locked().timeout.unwrap(); tokio::time::sleep(timeout).await; let mut checkout = pool.checkout(key); let poll_once = PollOnce(&mut checkout); let is_not_ready = poll_once.await.is_none(); assert!(is_not_ready); } #[tokio::test] async fn test_pool_checkout_removes_expired() { let pool = pool_no_timer(); let key = host_key("foo"); pool.pooled(c(key.clone()), Uniq(41)); pool.pooled(c(key.clone()), Uniq(5)); pool.pooled(c(key.clone()), Uniq(99)); assert_eq!( pool.locked().idle.get(&key).map(|entries| entries.len()), Some(3) ); let timeout = pool.locked().timeout.unwrap(); tokio::time::sleep(timeout).await; let mut checkout = pool.checkout(key.clone()); let poll_once = PollOnce(&mut checkout); // checkout.await should clean out the expired poll_once.await; assert!(pool.locked().idle.get(&key).is_none()); } #[test] fn test_pool_max_idle_per_host() { let pool = pool_max_idle_no_timer(2); let key = host_key("foo"); pool.pooled(c(key.clone()), Uniq(41)); pool.pooled(c(key.clone()), Uniq(5)); pool.pooled(c(key.clone()), Uniq(99)); // pooled and dropped 3, max_idle should only allow 2 assert_eq!( pool.locked().idle.get(&key).map(|entries| entries.len()), Some(2) ); } #[tokio::test] async fn test_pool_timer_removes_expired_realtime() { test_pool_timer_removes_expired_inner().await } #[tokio::test(start_paused = true)] async fn test_pool_timer_removes_expired_faketime() { test_pool_timer_removes_expired_inner().await } async fn test_pool_timer_removes_expired_inner() { let pool = Pool::new( super::Config { idle_timeout: Some(Duration::from_millis(10)), max_idle_per_host: usize::MAX, max_pool_size: None, }, TokioExecutor::new(), Time::Timer(Arc::new(TokioTimer::new())), ); let key = host_key("foo"); pool.pooled(c(key.clone()), Uniq(41)); pool.pooled(c(key.clone()), Uniq(5)); pool.pooled(c(key.clone()), Uniq(99)); assert_eq!( pool.locked().idle.get(&key).map(|entries| entries.len()), Some(3) ); // Let the timer tick passed the expiration... tokio::time::sleep(Duration::from_millis(30)).await; // But minimum interval is higher, so nothing should have been reaped assert_eq!( pool.locked().idle.get(&key).map(|entries| entries.len()), Some(3) ); // Now wait passed the minimum interval more tokio::time::sleep(Duration::from_millis(70)).await; assert!(pool.locked().idle.get(&key).is_none()); } #[tokio::test] async fn test_pool_checkout_task_unparked() { use futures_util::{FutureExt, future::join}; let pool = pool_no_timer(); let key = host_key("foo"); let pooled = pool.pooled(c(key.clone()), Uniq(41)); let checkout = join(pool.checkout(key), async { // the checkout future will park first, // and then this lazy future will be polled, which will insert // the pooled back into the pool // // this test makes sure that doing so will unpark the checkout drop(pooled); }) .map(|(entry, _)| entry); assert_eq!(*checkout.await.unwrap(), Uniq(41)); } #[tokio::test] async fn test_pool_checkout_drop_cleans_up_waiters() { let pool = pool_no_timer::, KeyImpl>(); let key = host_key("foo"); let mut checkout1 = pool.checkout(key.clone()); let mut checkout2 = pool.checkout(key.clone()); let poll_once1 = PollOnce(&mut checkout1); let poll_once2 = PollOnce(&mut checkout2); // first poll needed to get into Pool's parked poll_once1.await; assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 1); poll_once2.await; assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 2); // on drop, clean up Pool drop(checkout1); assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 1); drop(checkout2); assert!(!pool.locked().waiters.contains_key(&key)); } #[derive(Debug)] struct CanClose { #[allow(unused)] val: i32, closed: bool, } impl Poolable for CanClose { fn is_open(&self) -> bool { !self.closed } fn reserve(self) -> Reservation { Reservation::Unique(self) } fn can_share(&self) -> bool { false } } #[test] fn pooled_drop_if_closed_doesnt_reinsert() { let pool = pool_no_timer(); let key = host_key("foo"); pool.pooled( c(key.clone()), CanClose { val: 57, closed: true, }, ); assert!(pool.locked().idle.get(&key).is_none()); } #[tokio::test] async fn test_pool_size_limit() { let pool = Pool::new( super::Config { idle_timeout: Some(Duration::from_millis(100)), max_idle_per_host: usize::MAX, max_pool_size: Some(NonZero::new(2).expect("max pool size")), }, TokioExecutor::new(), Time::Empty, ); let key1 = host_key("foo"); let key2 = host_key("bar"); let key3 = host_key("baz"); pool.pooled(c(key1.clone()), Uniq(41)); pool.pooled(c(key2.clone()), Uniq(5)); pool.pooled(c(key3.clone()), Uniq(99)); assert!(pool.locked().idle.get(&key1).is_none()); assert!(pool.locked().idle.get(&key2).is_some()); assert!(pool.locked().idle.get(&key3).is_some()); } } ================================================ FILE: src/client/layer/client.rs ================================================ //! Much of this codebase is adapted and refined from [hyper](https://github.com/hyperium/hyper-util), mod exec; mod lazy; mod pool; use std::{ error::Error as StdError, fmt, future::Future, num::NonZeroUsize, pin::Pin, sync::Arc, task::{self, Poll}, time::Duration, }; use bytes::Bytes; use futures_util::future::{self, BoxFuture, Either, FutureExt, TryFutureExt}; use http::{ HeaderValue, Method, Request, Response, Uri, Version, header::{HOST, PROXY_AUTHORIZATION}, uri::{Authority, PathAndQuery, Scheme}, }; use http_body::Body; use pool::Ver; use tokio::io::{AsyncRead, AsyncWrite}; use tower::{BoxError, util::Oneshot}; #[cfg(feature = "cookies")] use { crate::cookie::{CookieStore, Cookies}, http::header::COOKIE, }; use self::{ exec::Exec, lazy::{Started as Lazy, lazy}, }; #[cfg(feature = "socks")] use crate::client::conn::socks; use crate::{ client::{ conn::{ Connected, Connection, descriptor::{ConnectionDescriptor, ConnectionId}, tunnel, }, core::{ self, body::Incoming, conn, dispatch::TrySendError as ConnTrySendError, http1::Http1Options, http2::Http2Options, rt::{Executor, Time, Timer}, }, layer::config::RequestOptions, }, config::RequestConfig, error::ProxyConnect, }; type BoxSendFuture = Pin + Send>>; /// A HttpClient to make outgoing HTTP requests. /// /// `HttpClient` is cheap to clone and cloning is the recommended way to share a `HttpClient`. The /// underlying connection pool will be reused. #[must_use] pub(crate) struct HttpClient { config: Config, connector: C, exec: Exec, h1_builder: conn::http1::Builder, h2_builder: conn::http2::Builder, pool: pool::Pool, ConnectionId>, #[cfg(feature = "cookies")] cookie_store: RequestConfig>, } #[derive(Clone, Copy)] struct Config { retry_canceled_requests: bool, set_host: bool, ver: Ver, } #[derive(Debug)] pub struct Error { kind: ErrorKind, source: Option, #[allow(unused)] connect_info: Option, } #[derive(Debug)] enum ErrorKind { Canceled, ChannelClosed, Connect, ProxyConnect, UserUnsupportedRequestMethod, UserUnsupportedVersion, UserAbsoluteUriRequired, SendRequest, } enum ClientConnectError { Normal(Error), CheckoutIsClosed(pool::Error), } #[allow(clippy::large_enum_variant)] enum TrySendError { Retryable { error: Error, req: Request, connection_reused: bool, }, Nope(Error), } macro_rules! e { ($kind:ident) => { Error { kind: ErrorKind::$kind, source: None, connect_info: None, } }; ($kind:ident, $src:expr) => { Error { kind: ErrorKind::$kind, source: Some($src.into()), connect_info: None, } }; } // ===== impl HttpClient ===== impl HttpClient<(), ()> { /// Create a builder to configure a new [`HttpClient`]. #[inline] pub fn builder(executor: E) -> Builder where E: Executor + Send + Sync + Clone + 'static, { Builder::new(executor) } } impl HttpClient where C: tower::Service + Clone + Send + Sync + 'static, C::Response: AsyncRead + AsyncWrite + Connection + Unpin + Send + 'static, C::Error: Into, C::Future: Unpin + Send + 'static, B: Body + Send + 'static + Unpin, B::Data: Send, B::Error: Into, { fn request( &self, mut req: Request, ) -> BoxFuture<'static, Result, BoxError>> { let is_http_connect = req.method() == Method::CONNECT; // Validate HTTP version early match req.version() { Version::HTTP_10 if is_http_connect => { warn!("CONNECT is not allowed for HTTP/1.0"); return Box::pin(future::err(e!(UserUnsupportedRequestMethod).into())); } Version::HTTP_10 | Version::HTTP_11 | Version::HTTP_2 => {} // completely unsupported HTTP version (like HTTP/0.9)! _unsupported => { warn!("Request has unsupported version: {:?}", _unsupported); return Box::pin(future::err(e!(UserUnsupportedVersion).into())); } }; // Extract and normalize URI let uri = match normalize_uri(&mut req, is_http_connect) { Ok(uri) => uri, Err(err) => { return Box::pin(future::err(e!(UserAbsoluteUriRequired, err).into())); } }; let mut this = self.clone(); // Extract per-request options from the request extensions and apply them to the client. let descriptor = { let RequestOptions { group, proxy, version, tls_options, http1_options, http2_options, socket_bind_options, } = RequestConfig::::remove(req.extensions_mut()).unwrap_or_default(); if let Some(opts) = http1_options { this.h1_builder.options(opts); } if let Some(opts) = http2_options { this.h2_builder.options(opts); } ConnectionDescriptor::new(uri, group, proxy, version, tls_options, socket_bind_options) }; Box::pin(this.send_request(req, descriptor).map_err(Into::into)) } async fn send_request( self, mut req: Request, descriptor: ConnectionDescriptor, ) -> Result, Error> { let uri = req.uri().clone(); loop { req = match self.try_send_request(req, descriptor.clone()).await { Ok(resp) => return Ok(resp), Err(TrySendError::Nope(err)) => return Err(err), Err(TrySendError::Retryable { mut req, error, connection_reused, }) => { if !self.config.retry_canceled_requests || !connection_reused { // if client disabled, don't retry // a fresh connection means we definitely can't retry return Err(error); } trace!( "unstarted request canceled, trying again (reason={:?})", error ); *req.uri_mut() = uri.clone(); req } } } } async fn try_send_request( &self, mut req: Request, descriptor: ConnectionDescriptor, ) -> Result, TrySendError> { let mut pooled = self .connection_for(descriptor) .await // `connection_for` already retries checkout errors, so if // it returns an error, there's not much else to retry .map_err(TrySendError::Nope)?; let uri = req.uri().clone(); if pooled.is_http1() { if req.version() == Version::HTTP_2 { warn!("Connection is HTTP/1, but request requires HTTP/2"); return Err(TrySendError::Nope( e!(UserUnsupportedVersion).with_connect_info(pooled.conn_info.clone()), )); } if self.config.set_host { req.headers_mut() .entry(HOST) .or_insert_with(|| generate_host_header(&uri)); } // CONNECT always sends authority-form, so check it first... if req.method() == Method::CONNECT { authority_form(req.uri_mut()); } else if pooled.conn_info.is_proxied() { if let Some(auth) = pooled.conn_info.proxy_auth() { req.headers_mut() .entry(PROXY_AUTHORIZATION) .or_insert_with(|| auth.clone()); } if let Some(headers) = pooled.conn_info.proxy_headers() { crate::util::replace_headers(req.headers_mut(), headers.clone()); } absolute_form(req.uri_mut()); } else { origin_form(req.uri_mut()); } } else if req.method() == Method::CONNECT && !pooled.is_http2() { authority_form(req.uri_mut()); } #[cfg(feature = "cookies")] let cookie_store = self.cookie_store.fetch(req.extensions()).cloned(); #[cfg(feature = "cookies")] if let Some(ref cookie_store) = cookie_store { let headers = req.headers_mut(); if !headers.contains_key(COOKIE) { let version = if pooled.is_http2() { Version::HTTP_2 } else { Version::HTTP_11 }; match cookie_store.cookies(&uri, version) { Cookies::Compressed(value) => { headers.insert(COOKIE, value); } Cookies::Uncompressed(values) => { for value in values { headers.append(COOKIE, value); } } Cookies::Empty => (), } } } let mut res = match pooled.try_send_request(req).await { Ok(res) => res, Err(mut err) => { return if let Some(req) = err.take_message() { Err(TrySendError::Retryable { connection_reused: pooled.is_reused(), error: Error::new(ErrorKind::Canceled, err.into_error()) .with_connect_info(pooled.conn_info.clone()), req, }) } else { Err(TrySendError::Nope( Error::new(ErrorKind::SendRequest, err.into_error()) .with_connect_info(pooled.conn_info.clone()), )) }; } }; #[cfg(feature = "cookies")] if let Some(cookie_store) = cookie_store { let mut cookies = res .headers() .get_all(http::header::SET_COOKIE) .iter() .peekable(); if cookies.peek().is_some() { cookie_store.set_cookies(&mut cookies, &uri); } } // If the Connector included 'extra' info, add to Response... pooled.conn_info.set_extras(res.extensions_mut()); // If the Connector included connection info, add to Response... res.extensions_mut().insert(pooled.conn_info.clone()); // If pooled is HTTP/2, we can toss this reference immediately. // // when pooled is dropped, it will try to insert back into the // pool. To delay that, spawn a future that completes once the // sender is ready again. // // This *should* only be once the related `Connection` has polled // for a new request to start. // // It won't be ready if there is a body to stream. if pooled.is_http2() || !pooled.is_pool_enabled() || pooled.is_ready() { drop(pooled); } else { let on_idle = std::future::poll_fn(move |cx| pooled.poll_ready(cx)).map(|_| ()); self.exec.execute(on_idle); } Ok(res) } async fn connection_for( &self, descriptor: ConnectionDescriptor, ) -> Result, ConnectionId>, Error> { loop { match self.one_connection_for(descriptor.clone()).await { Ok(pooled) => return Ok(pooled), Err(ClientConnectError::Normal(err)) => return Err(err), Err(ClientConnectError::CheckoutIsClosed(reason)) => { if !self.config.retry_canceled_requests { return Err(Error::new(ErrorKind::Connect, reason)); } trace!( "unstarted request canceled, trying again (reason={:?})", reason, ); continue; } }; } } async fn one_connection_for( &self, descriptor: ConnectionDescriptor, ) -> Result, ConnectionId>, ClientConnectError> { // Return a single connection if pooling is not enabled if !self.pool.is_enabled() { return self .connect_to(descriptor) .await .map_err(ClientConnectError::Normal); } // This actually races 2 different futures to try to get a ready // connection the fastest, and to reduce connection churn. // // - If the pool has an idle connection waiting, that's used immediately. // - Otherwise, the Connector is asked to start connecting to the destination Uri. // - Meanwhile, the pool Checkout is watching to see if any other request finishes and tries // to insert an idle connection. // - If a new connection is started, but the Checkout wins after (an idle connection became // available first), the started connection future is spawned into the runtime to // complete, and then be inserted into the pool as an idle connection. let checkout = self.pool.checkout(descriptor.id()); let connect = self.connect_to(descriptor); let is_ver_h2 = self.config.ver == Ver::Http2; // The order of the `select` is depended on below... match futures_util::future::select(checkout, connect).await { // Checkout won, connect future may have been started or not. // // If it has, let it finish and insert back into the pool, // so as to not waste the socket... Either::Left((Ok(checked_out), connecting)) => { // This depends on the `select` above having the correct // order, such that if the checkout future were ready // immediately, the connect future will never have been // started. // // If it *wasn't* ready yet, then the connect future will // have been started... if connecting.started() { let bg = connecting .map_err(|_err| { trace!("background connect error: {}", _err); }) .map(|_pooled| { // dropping here should just place it in // the Pool for us... }); // An execute error here isn't important, we're just trying // to prevent a waste of a socket... self.exec.execute(bg); } Ok(checked_out) } // Connect won, checkout can just be dropped. Either::Right((Ok(connected), _checkout)) => Ok(connected), // Either checkout or connect could get canceled: // // 1. Connect is canceled if this is HTTP/2 and there is an outstanding HTTP/2 // connecting task. // 2. Checkout is canceled if the pool cannot deliver an idle connection reliably. // // In both cases, we should just wait for the other future. Either::Left((Err(err), connecting)) => { if err.is_canceled() { connecting.await.map_err(ClientConnectError::Normal) } else { Err(ClientConnectError::Normal(Error::new( ErrorKind::Connect, err, ))) } } Either::Right((Err(err), checkout)) => { if err.is_canceled() { checkout.await.map_err(move |err| { if is_ver_h2 && err.is_canceled() { ClientConnectError::CheckoutIsClosed(err) } else { ClientConnectError::Normal(Error::new(ErrorKind::Connect, err)) } }) } else { Err(ClientConnectError::Normal(err)) } } } } fn connect_to( &self, descriptor: ConnectionDescriptor, ) -> impl Lazy, ConnectionId>, Error>> + Send + Unpin + 'static { let executor = self.exec.clone(); let pool = self.pool.clone(); let h1_builder = self.h1_builder.clone(); let h2_builder = self.h2_builder.clone(); let ver = match descriptor.version() { Some(Version::HTTP_2) => Ver::Http2, _ => self.config.ver, }; let is_ver_h2 = ver == Ver::Http2; let connector = self.connector.clone(); lazy(move || { // Try to take a "connecting lock". // // If the pool_key is for HTTP/2, and there is already a // connection being established, then this can't take a // second lock. The "connect_to" future is Canceled. let connecting = match pool.connecting(descriptor.id(), ver) { Some(lock) => lock, None => { // HTTP/2 connection in progress. return Either::Right(futures_util::future::err(e!(Canceled))); } }; Either::Left( Oneshot::new(connector, descriptor) .map_err(|src| Error::new(ErrorKind::Connect, src)) .and_then(move |io| { let connected = io.connected(); // If ALPN is h2 and we aren't http2_only already, // then we need to convert our pool checkout into // a single HTTP2 one. let connecting = if connected.is_negotiated_h2() && !is_ver_h2 { match connecting.alpn_h2(&pool) { Some(lock) => { trace!("ALPN negotiated h2, updating pool"); lock } None => { // Another connection has already upgraded, // the pool checkout should finish up for us. let canceled =Error::new(ErrorKind::Canceled, "ALPN upgraded to HTTP/2"); return Either::Right(futures_util::future::err(canceled)); } } } else { connecting }; let is_h2 = is_ver_h2 || connected.is_negotiated_h2(); Either::Left(Box::pin(async move { let tx = if is_h2 { { let (mut tx, conn) = h2_builder.handshake(io).await.map_err(Error::tx)?; trace!( "http2 handshake complete, spawning background dispatcher task" ); executor.execute( conn.map_err(|_e| debug!("client connection error: {}", _e)) .map(|_| ()), ); // Wait for 'conn' to ready up before we // declare this tx as usable tx.ready().await.map_err(Error::tx)?; PoolTx::Http2(tx) } } else { { // Perform the HTTP/1.1 handshake on the provided I/O stream. More actions // Uses the h1_builder to establish a connection, returning a sender (tx) for requests // and a connection task (conn) that manages the connection lifecycle. let (mut tx, conn) = h1_builder.handshake(io).await.map_err(Error::tx)?; // Log that the HTTP/1.1 handshake has completed successfully. // This indicates the connection is established and ready for request processing. trace!( "http1 handshake complete, spawning background dispatcher task" ); // Create a oneshot channel to communicate errors from the connection task. // err_tx sends errors from the connection task, and err_rx receives them // to correlate connection failures with request readiness errors. let (err_tx, err_rx) = tokio::sync::oneshot::channel(); // Spawn the connection task in the background using the executor. // The task manages the HTTP/1.1 connection, including upgrades (e.g., WebSocket). // Errors are sent via err_tx to ensure they can be checked if the sender (tx) fails. executor.execute( conn.with_upgrades() .map_err(|e| { // Log the connection error at debug level for diagnostic purposes. debug!("client connection error: {:?}", e); // Log that the error is being sent to the error channel. trace!("sending connection error to error channel"); // Send the error via the oneshot channel, ignoring send failures // (e.g., if the receiver is dropped, which is handled later). let _ = err_tx.send(e); }) .map(|_| ()), ); // Log that the client is waiting for the connection to be ready. // Readiness indicates the sender (tx) can accept a request without blocking. More actions trace!("waiting for connection to be ready"); // Check if the sender is ready to accept a request. // This ensures the connection is fully established before proceeding. // Wait for 'conn' to ready up before we // declare this tx as usable match tx.ready().await { // If ready, the connection is usable for sending requests. Ok(_) => { // Log that the connection is ready for use. trace!("connection is ready"); // Drop the error receiver, as it’s no longer needed since the sender is ready. // This prevents waiting for errors that won’t occur in a successful case. drop(err_rx); // Wrap the sender in PoolTx::Http1 for use in the connection pool. PoolTx::Http1(tx) } // If the sender fails with a closed channel error, check for a specific connection error. // This distinguishes between a vague ChannelClosed error and an actual connection failure. Err(e) if e.is_closed() => { // Log that the channel is closed, indicating a potential connection issue. trace!("connection channel closed, checking for connection error"); // Check the oneshot channel for a specific error from the connection task. match err_rx.await { // If an error was received, it’s a specific connection failure. Ok(err) => { // Log the specific connection error for diagnostics. trace!("received connection error: {:?}", err); // Return the error wrapped in Error::tx to propagate it. return Err(Error::tx(err)); } // If the error channel is closed, no specific error was sent. // Fall back to the vague ChannelClosed error. Err(_) => { // Log that the error channel is closed, indicating no specific error. trace!("error channel closed, returning the vague ChannelClosed error"); // Return the original error wrapped in Error::tx. return Err(Error::tx(e)); } } } // For other errors (e.g., timeout, I/O issues), propagate them directly. // These are not ChannelClosed errors and don’t require error channel checks. Err(e) => { // Log the specific readiness failure for diagnostics. trace!("connection readiness failed: {:?}", e); // Return the error wrapped in Error::tx to propagate it. return Err(Error::tx(e)); } } } }; Ok(pool.pooled( connecting, PoolClient { conn_info: connected, tx, }, )) })) }), ) }) } } impl tower::Service> for HttpClient where C: tower::Service + Clone + Send + Sync + 'static, C::Response: AsyncRead + AsyncWrite + Connection + Unpin + Send + 'static, C::Error: Into, C::Future: Unpin + Send + 'static, B: Body + Send + 'static + Unpin, B::Data: Send, B::Error: Into, { type Response = Response; type Error = BoxError; type Future = BoxFuture<'static, Result, Self::Error>>; fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll> { Poll::Ready(Ok(())) } fn call(&mut self, req: Request) -> Self::Future { self.request(req) } } impl Clone for HttpClient { fn clone(&self) -> HttpClient { HttpClient { config: self.config, exec: self.exec.clone(), h1_builder: self.h1_builder.clone(), h2_builder: self.h2_builder.clone(), connector: self.connector.clone(), pool: self.pool.clone(), #[cfg(feature = "cookies")] cookie_store: self.cookie_store.clone(), } } } /// A pooled HTTP connection that can send requests struct PoolClient { conn_info: Connected, tx: PoolTx, } enum PoolTx { Http1(conn::http1::SendRequest), Http2(conn::http2::SendRequest), } // ===== impl PoolClient ===== impl PoolClient { #[inline] fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { match self.tx { PoolTx::Http1(ref mut tx) => tx.poll_ready(cx).map_err(Error::closed), PoolTx::Http2(_) => Poll::Ready(Ok(())), } } #[inline] fn is_http1(&self) -> bool { !self.is_http2() } #[inline] fn is_http2(&self) -> bool { match self.tx { PoolTx::Http1(_) => false, PoolTx::Http2(_) => true, } } #[inline] fn is_poisoned(&self) -> bool { self.conn_info.poisoned() } #[inline] fn is_ready(&self) -> bool { match self.tx { PoolTx::Http1(ref tx) => tx.is_ready(), PoolTx::Http2(ref tx) => tx.is_ready(), } } } impl PoolClient { #[inline] fn try_send_request( &mut self, req: Request, ) -> impl Future, ConnTrySendError>>> where B: Send, { match self.tx { PoolTx::Http1(ref mut tx) => Either::Left(tx.try_send_request(req)), PoolTx::Http2(ref mut tx) => Either::Right(tx.try_send_request(req)), } } } impl pool::Poolable for PoolClient where B: Send + 'static, { #[inline] fn is_open(&self) -> bool { !self.is_poisoned() && self.is_ready() } fn reserve(self) -> pool::Reservation { match self.tx { PoolTx::Http1(tx) => pool::Reservation::Unique(PoolClient { conn_info: self.conn_info, tx: PoolTx::Http1(tx), }), PoolTx::Http2(tx) => { let b = PoolClient { conn_info: self.conn_info.clone(), tx: PoolTx::Http2(tx.clone()), }; let a = PoolClient { conn_info: self.conn_info, tx: PoolTx::Http2(tx), }; pool::Reservation::Shared(a, b) } } } #[inline] fn can_share(&self) -> bool { self.is_http2() } } /// A builder to configure a new [`HttpClient`]. #[derive(Clone)] pub struct Builder { config: Config, exec: Exec, h1_builder: conn::http1::Builder, h2_builder: conn::http2::Builder, pool_config: pool::Config, pool_timer: Time, #[cfg(feature = "cookies")] cookie_store: Option>, } // ===== impl Builder ===== impl Builder { /// Construct a new Builder. pub fn new(executor: E) -> Self where E: Executor + Send + Sync + Clone + 'static, { let exec = Exec::new(executor); Self { config: Config { retry_canceled_requests: true, set_host: true, ver: Ver::Auto, }, exec: exec.clone(), h1_builder: conn::http1::Builder::new(), h2_builder: conn::http2::Builder::new(exec), pool_config: pool::Config { idle_timeout: Some(Duration::from_secs(90)), max_idle_per_host: usize::MAX, max_pool_size: None, }, pool_timer: Time::Empty, #[cfg(feature = "cookies")] cookie_store: None, } } /// Set an optional timeout for idle sockets being kept-alive. /// A `Timer` is required for this to take effect. See `Builder::pool_timer` /// /// Pass `None` to disable timeout. /// /// Default is 90 seconds. #[inline] pub fn pool_idle_timeout(mut self, val: D) -> Self where D: Into>, { self.pool_config.idle_timeout = val.into(); self } /// Sets the maximum idle connection per host allowed in the pool. /// /// Default is `usize::MAX` (no limit). #[inline] pub fn pool_max_idle_per_host(mut self, max_idle: usize) -> Self { self.pool_config.max_idle_per_host = max_idle; self } /// Sets the maximum number of connections in the pool. /// /// Default is `None` (no limit). #[inline] pub fn pool_max_size(mut self, max_size: impl Into>) -> Self { self.pool_config.max_pool_size = max_size.into(); self } /// Set whether the connection **must** use HTTP/2. /// /// The destination must either allow HTTP2 Prior Knowledge, or the /// `Connect` should be configured to do use ALPN to upgrade to `h2` /// as part of the connection process. This will not make the `HttpClient` /// utilize ALPN by itself. /// /// Note that setting this to true prevents HTTP/1 from being allowed. /// /// Default is false. #[inline] pub fn http2_only(mut self, val: bool) -> Self { self.config.ver = if val { Ver::Http2 } else { Ver::Auto }; self } /// Provide a timer to be used for http2 /// /// See the documentation of [`http2::client::Builder::timer`] for more /// details. /// /// [`http2::client::Builder::timer`]: https://docs.rs/http2/latest/http2/client/struct.Builder.html#method.timer #[inline] pub fn http2_timer(mut self, timer: M) -> Self where M: Timer + Send + Sync + 'static, { self.h2_builder.timer(timer); self } /// Provide a configuration for HTTP/1. #[inline] pub fn http1_options(mut self, opts: O) -> Self where O: Into>, { if let Some(opts) = opts.into() { self.h1_builder.options(opts); } self } /// Provide a configuration for HTTP/2. #[inline] pub fn http2_options(mut self, opts: O) -> Self where O: Into>, { if let Some(opts) = opts.into() { self.h2_builder.options(opts); } self } /// Provide a timer to be used for timeouts and intervals in connection pools. #[inline] pub fn pool_timer(mut self, timer: M) -> Self where M: Timer + Clone + Send + Sync + 'static, { self.pool_timer = Time::Timer(Arc::new(timer)); self } /// Provide a cookie store for automatic cookie management. #[inline] #[cfg(feature = "cookies")] pub fn cookie_store(mut self, cookie_store: Option>) -> Self { self.cookie_store = cookie_store; self } /// Combine the configuration of this builder with a connector to create a `HttpClient`. pub fn build(self, connector: C) -> HttpClient where C: tower::Service + Clone + Send + Sync + 'static, C::Response: AsyncRead + AsyncWrite + Connection + Unpin + Send + 'static, C::Error: Into, C::Future: Unpin + Send + 'static, B: Body + Send, B::Data: Send, { let exec = self.exec.clone(); let timer = self.pool_timer.clone(); HttpClient { config: self.config, exec: exec.clone(), connector, h1_builder: self.h1_builder, h2_builder: self.h2_builder, pool: pool::Pool::new(self.pool_config, exec, timer), #[cfg(feature = "cookies")] cookie_store: RequestConfig::new(self.cookie_store), } } } // ==== impl Error ==== impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "client error ({:?})", self.kind) } } impl StdError for Error { fn source(&self) -> Option<&(dyn StdError + 'static)> { self.source.as_ref().map(|e| &**e as _) } } impl Error { fn new(kind: ErrorKind, error: E) -> Self where E: Into, { let error = error.into(); let kind = if error.is::() || error.is::() || { #[cfg(feature = "socks")] { error.is::() } #[cfg(not(feature = "socks"))] { false } } { ErrorKind::ProxyConnect } else { kind }; Self { kind, source: Some(error), connect_info: None, } } /// Returns true if this was an error from [`ErrorKind::Connect`]. #[inline] pub fn is_connect(&self) -> bool { matches!(self.kind, ErrorKind::Connect) } /// Returns true if this was an error from [`ErrorKind::ProxyConnect`]. #[inline] pub fn is_proxy_connect(&self) -> bool { matches!(self.kind, ErrorKind::ProxyConnect) } #[inline] fn with_connect_info(self, connect_info: Connected) -> Self { Self { connect_info: Some(connect_info), ..self } } #[inline] fn is_canceled(&self) -> bool { matches!(self.kind, ErrorKind::Canceled) } #[inline] fn tx(src: core::Error) -> Self { Self::new(ErrorKind::SendRequest, src) } #[inline] fn closed(src: core::Error) -> Self { Self::new(ErrorKind::ChannelClosed, src) } } fn origin_form(uri: &mut Uri) { let path = match uri.path_and_query() { Some(path) if path.as_str() != "/" => { let mut parts = ::http::uri::Parts::default(); parts.path_and_query.replace(path.clone()); Uri::from_parts(parts).expect("path is valid uri") } _none_or_just_slash => { debug_assert!(Uri::default() == "/"); Uri::default() } }; *uri = path } fn absolute_form(uri: &mut Uri) { debug_assert!(uri.scheme().is_some(), "absolute_form needs a scheme"); debug_assert!( uri.authority().is_some(), "absolute_form needs an authority" ); } fn authority_form(uri: &mut Uri) { if let Some(path) = uri.path_and_query() { // `https://hyper.rs` would parse with `/` path, don't // annoy people about that... if path != "/" { warn!("HTTP/1.1 CONNECT request stripping path: {:?}", path); } } *uri = match uri.authority() { Some(auth) => { let mut parts = ::http::uri::Parts::default(); parts.authority = Some(auth.clone()); Uri::from_parts(parts).expect("authority is valid") } None => { unreachable!("authority_form with relative uri"); } }; } fn normalize_uri(req: &mut Request, is_http_connect: bool) -> Result { let uri = req.uri().clone(); let build_base_uri = |scheme: Scheme, authority: Authority| { Uri::builder() .scheme(scheme) .authority(authority) .path_and_query(PathAndQuery::from_static("/")) .build() .expect("valid base URI") }; match (uri.scheme(), uri.authority()) { (Some(scheme), Some(auth)) => Ok(build_base_uri(scheme.clone(), auth.clone())), (None, Some(auth)) if is_http_connect => { let scheme = match auth.port_u16() { Some(443) => Scheme::HTTPS, _ => Scheme::HTTP, }; set_scheme(req.uri_mut(), scheme.clone()); Ok(build_base_uri(scheme, auth.clone())) } _ => { debug!("Client requires absolute-form URIs, received: {:?}", uri); Err(e!(UserAbsoluteUriRequired)) } } } fn generate_host_header(uri: &Uri) -> HeaderValue { let hostname = uri.host().expect("authority implies host"); let port = match (uri.port().map(|p| p.as_u16()), is_schema_secure(uri)) { (Some(443), true) | (Some(80), false) => None, _ => uri.port(), }; if let Some(port) = port { let host = format!("{hostname}:{port}"); HeaderValue::from_maybe_shared(Bytes::from(host)) } else { HeaderValue::from_str(hostname) } .expect("uri host is valid header value") } fn set_scheme(uri: &mut Uri, scheme: Scheme) { debug_assert!( uri.scheme().is_none(), "set_scheme expects no existing scheme" ); let old = std::mem::take(uri); let mut parts: ::http::uri::Parts = old.into(); parts.scheme = Some(scheme); parts.path_and_query = Some(PathAndQuery::from_static("/")); *uri = Uri::from_parts(parts).expect("scheme is valid"); } fn is_schema_secure(uri: &Uri) -> bool { uri.scheme_str() .map(|scheme_str| matches!(scheme_str, "wss" | "https")) .unwrap_or_default() } ================================================ FILE: src/client/layer/config.rs ================================================ use std::{ sync::Arc, task::{Context, Poll}, }; use futures_util::future::{self, Either, Ready}; use http::{HeaderMap, Request, Response, Version}; use tower::{Layer, Service}; use crate::{ Error, client::{ conn::SocketBindOptions, core::{http1::Http1Options, http2::Http2Options}, group::Group, }, config::RequestConfig, ext::UriExt, header::OrigHeaderMap, proxy::Matcher, tls::TlsOptions, }; /// A marker type for the default headers configuration value. #[derive(Clone, Copy)] pub(crate) struct DefaultHeaders; /// Per-request configuration for proxy, protocol, and transport options. /// Overrides client defaults for a single request. #[derive(Debug, Default, Clone)] #[non_exhaustive] pub(crate) struct RequestOptions { pub group: Group, pub proxy: Option, pub version: Option, pub tls_options: Option, pub http1_options: Option, pub http2_options: Option, pub socket_bind_options: Option, } /// Configuration for the [`ConfigService`]. struct Config { https_only: bool, headers: HeaderMap, orig_headers: RequestConfig, default_headers: RequestConfig, } /// Middleware layer to use [`ConfigService`]. pub struct ConfigServiceLayer { config: Arc, } /// Middleware service to use [`Config`]. #[derive(Clone)] pub struct ConfigService { inner: S, config: Arc, } // ===== impl DefaultHeaders ===== impl_request_config_value!(DefaultHeaders, bool); // ===== impl RequestOptions ===== impl_request_config_value!(RequestOptions); // ===== impl ConfigServiceLayer ===== impl ConfigServiceLayer { /// Creates a new [`ConfigServiceLayer`]. pub fn new(https_only: bool, headers: HeaderMap, orig_headers: OrigHeaderMap) -> Self { let org_headers = (!orig_headers.is_empty()).then_some(orig_headers); ConfigServiceLayer { config: Arc::new(Config { https_only, headers, orig_headers: RequestConfig::new(org_headers), default_headers: RequestConfig::new(Some(true)), }), } } } impl Layer for ConfigServiceLayer { type Service = ConfigService; #[inline(always)] fn layer(&self, inner: S) -> Self::Service { ConfigService { inner, config: self.config.clone(), } } } // ===== impl ConfigService ===== impl Service> for ConfigService where S: Service, Response = Response>, S::Error: From, { type Response = S::Response; type Error = S::Error; type Future = Either>>; #[inline(always)] fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.inner.poll_ready(cx) } fn call(&mut self, mut req: Request) -> Self::Future { let uri = req.uri().clone(); // check if the request URI scheme is valid. if !(uri.is_http() || uri.is_https()) || (self.config.https_only && !uri.is_https()) { return Either::Right(future::err(Error::uri_bad_scheme(uri.clone()).into())); } // check if the request ignores the default headers. if self .config .default_headers .fetch(req.extensions()) .copied() .unwrap_or_default() { // insert default headers in the request headers // without overwriting already appended headers. let mut dest = self.config.headers.clone(); crate::util::replace_headers(&mut dest, std::mem::take(req.headers_mut())); std::mem::swap(req.headers_mut(), &mut dest); } // store the original headers in request extensions self.config.orig_headers.store(req.extensions_mut()); Either::Left(self.inner.call(req)) } } ================================================ FILE: src/client/layer/decoder.rs ================================================ //! Middleware for decoding use std::task::{Context, Poll}; use http::{Request, Response}; use http_body::Body; use tower::{Layer, Service}; use tower_http::decompression::{self, DecompressionBody, ResponseFuture}; use crate::config::RequestConfig; /// Configuration for supported content-encoding algorithms. /// /// `AcceptEncoding` controls which compression formats are enabled for decoding /// response bodies. Each field corresponds to a specific algorithm and is only /// available if the corresponding feature is enabled. #[derive(Clone)] pub(crate) struct AcceptEncoding { #[cfg(feature = "gzip")] pub(crate) gzip: bool, #[cfg(feature = "brotli")] pub(crate) brotli: bool, #[cfg(feature = "zstd")] pub(crate) zstd: bool, #[cfg(feature = "deflate")] pub(crate) deflate: bool, } /// Layer that adds response body decompression to a service. #[derive(Clone)] pub struct DecompressionLayer { accept: AcceptEncoding, } /// Service that decompresses response bodies based on the [`AcceptEncoding`] configuration. #[derive(Clone)] pub struct Decompression(Option>); // ===== AcceptEncoding ===== impl Default for AcceptEncoding { fn default() -> AcceptEncoding { AcceptEncoding { #[cfg(feature = "gzip")] gzip: true, #[cfg(feature = "brotli")] brotli: true, #[cfg(feature = "zstd")] zstd: true, #[cfg(feature = "deflate")] deflate: true, } } } impl_request_config_value!(AcceptEncoding); // ===== impl DecompressionLayer ===== impl DecompressionLayer { /// Creates a new [`DecompressionLayer`] with the specified [`AcceptEncoding`]. #[inline(always)] pub fn new(accept: AcceptEncoding) -> Self { Self { accept } } } impl Layer for DecompressionLayer { type Service = Decompression; #[inline(always)] fn layer(&self, service: S) -> Self::Service { Decompression(Some(Decompression::::accept_in_place( decompression::Decompression::new(service), &self.accept, ))) } } // ===== impl Decompression ===== impl Decompression { const BUG_MSG: &str = "[BUG] Decompression service not initialized; bug in setup"; fn accept_in_place( mut decoder: decompression::Decompression, accept: &AcceptEncoding, ) -> decompression::Decompression { #[cfg(feature = "gzip")] { decoder = decoder.gzip(accept.gzip); } #[cfg(feature = "deflate")] { decoder = decoder.deflate(accept.deflate); } #[cfg(feature = "brotli")] { decoder = decoder.br(accept.brotli); } #[cfg(feature = "zstd")] { decoder = decoder.zstd(accept.zstd); } decoder } } impl Service> for Decompression where S: Service, Response = Response>, ReqBody: Body, ResBody: Body, { type Response = Response>; type Error = S::Error; type Future = ResponseFuture; #[inline(always)] fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.0.as_mut().expect(Self::BUG_MSG).poll_ready(cx) } fn call(&mut self, req: Request) -> Self::Future { if let Some(accept) = RequestConfig::::get(req.extensions()) { if let Some(decoder) = self.0.take() { self.0 .replace(Decompression::accept_in_place(decoder, accept)); } debug_assert!(self.0.is_some()); } self.0.as_mut().expect(Self::BUG_MSG).call(req) } } ================================================ FILE: src/client/layer/redirect/future.rs ================================================ use std::{ future::Future, pin::Pin, str, task::{Context, Poll, ready}, }; use futures_util::future::Either; use http::{ HeaderMap, Method, Request, Response, StatusCode, Uri, header::{CONTENT_ENCODING, CONTENT_LENGTH, CONTENT_TYPE, LOCATION, TRANSFER_ENCODING}, request::Parts, }; use http_body::Body; use pin_project_lite::pin_project; use tower::{BoxError, Service, util::Oneshot}; use url::Url; use super::{ BodyRepr, policy::{Action, Attempt, Policy}, }; use crate::{Error, ext::RequestUri, into_uri::IntoUriSealed}; /// Pending future state for handling redirects. pub struct Pending { future: Pin + Send>>, location: Uri, body: ReqBody, res: Response, } pin_project! { /// Response future for [`FollowRedirect`]. #[project = ResponseFutureProj] pub enum ResponseFuture where S: Service>, { Redirect { #[pin] future: Either>>, pending_future: Option>, service: S, policy: P, parts: Parts, body_repr: BodyRepr, }, Direct { #[pin] future: S::Future, }, } } impl Future for ResponseFuture where S: Service, Response = Response> + Clone, S::Error: From, P: Policy, ReqBody: Body + Default, { type Output = Result, S::Error>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { match self.project() { ResponseFutureProj::Direct { mut future } => future.as_mut().poll(cx), ResponseFutureProj::Redirect { mut future, pending_future, service, policy, parts, body_repr, } => { // Check if we have a pending action to resolve if let Some(mut state) = pending_future.take() { let action = match state.future.as_mut().poll(cx) { Poll::Ready(action) => action, Poll::Pending => { *pending_future = Some(state); return Poll::Pending; } }; return handle_action( cx, RedirectAction { action, future: &mut future, service, policy, parts, body: state.body, body_repr, res: state.res, location: state.location, }, ); } // Poll the current future to get the response let mut res = { let mut res = ready!(future.as_mut().poll(cx)?); res.extensions_mut().insert(RequestUri(parts.uri.clone())); res }; // Determine if the response is a redirect match res.status() { StatusCode::MOVED_PERMANENTLY | StatusCode::FOUND => { // User agents MAY change the request method from POST to GET // (RFC 7231 section 6.4.2. and 6.4.3.). if parts.method == Method::POST { parts.method = Method::GET; *body_repr = BodyRepr::Empty; drop_payload_headers(&mut parts.headers); } } StatusCode::SEE_OTHER => { // A user agent can perform a GET or HEAD request (RFC 7231 section 6.4.4.). if parts.method != Method::HEAD { parts.method = Method::GET; } *body_repr = BodyRepr::Empty; drop_payload_headers(&mut parts.headers); } StatusCode::TEMPORARY_REDIRECT | StatusCode::PERMANENT_REDIRECT => {} _ => { // Not a redirect status code, return the response as is. policy.on_response(&mut res); return Poll::Ready(Ok(res)); } }; // Extract the request body for potential reuse let Some(body) = body_repr.take() else { return Poll::Ready(Ok(res)); }; // Get and resolve the Location header let Some(location) = res .headers() .get(LOCATION) .and_then(|loc| loc.to_str().ok()) .and_then(|loc| resolve_uri(loc, &parts.uri)) else { return Poll::Ready(Ok(res)); }; // Prepare the attempt for the policy decision let attempt = Attempt { status: res.status(), headers: res.headers(), location: &location, previous: &parts.uri, }; // Resolve the action, awaiting if it's pending let action = match policy.redirect(attempt)? { Action::Pending(future) => { // Save the task and necessary state for next poll *pending_future = Some(Pending { future, location, body, res, }); cx.waker().wake_by_ref(); return Poll::Pending; } action => action, }; handle_action( cx, RedirectAction { action, future: &mut future, service, policy, parts, body, body_repr, res, location, }, ) } } } } /// Try to resolve a URI reference `relative` against a base URI `base`. fn resolve_uri(relative: &str, base: &Uri) -> Option { Url::parse(&base.to_string()) .ok()? .join(relative) .map(String::from) .ok()? .into_uri() .ok() } /// Handle the response based on its status code fn drop_payload_headers(headers: &mut HeaderMap) { for header in &[ CONTENT_TYPE, CONTENT_LENGTH, CONTENT_ENCODING, TRANSFER_ENCODING, ] { headers.remove(header); } } type RedirectFuturePin<'a, S, ReqBody> = Pin<&'a mut Either<>>::Future, Oneshot>>>; struct RedirectAction<'a, S, ReqBody, ResBody, P> where S: Service, Response = Response> + Clone, P: Policy, { action: Action, future: &'a mut RedirectFuturePin<'a, S, ReqBody>, service: &'a S, policy: &'a mut P, parts: &'a mut Parts, body: ReqBody, body_repr: &'a mut BodyRepr, res: Response, location: Uri, } fn handle_action( cx: &mut Context<'_>, redirect: RedirectAction<'_, S, ReqBody, ResBody, P>, ) -> Poll, S::Error>> where S: Service, Response = Response> + Clone, S::Error: From, P: Policy, ReqBody: Body + Default, { match redirect.action { Action::Follow => { redirect.parts.uri = redirect.location; redirect .body_repr .try_clone_from(&redirect.body, redirect.policy); let mut req = Request::from_parts(redirect.parts.clone(), redirect.body); redirect.policy.on_request(&mut req); redirect .future .set(Either::Right(Oneshot::new(redirect.service.clone(), req))); cx.waker().wake_by_ref(); Poll::Pending } Action::Stop => Poll::Ready(Ok(redirect.res)), Action::Pending(_) => Poll::Ready(Err(S::Error::from( Error::redirect( "Nested pending Action is not supported in redirect policy", redirect.parts.uri.clone(), ) .into(), ))), Action::Error(err) => Poll::Ready(Err(err.into())), } } ================================================ FILE: src/client/layer/redirect/policy.rs ================================================ //! Tools for customizing the behavior of a [`FollowRedirect`][super::FollowRedirect] middleware. use std::{fmt, pin::Pin}; use http::{HeaderMap, Request, Response, StatusCode, Uri}; use crate::error::BoxError; /// Trait for the policy on handling redirection responses. pub trait Policy { /// Invoked when the service received a response with a redirection status code (`3xx`). /// /// This method returns an [`Action`] which indicates whether the service should follow /// the redirection. fn redirect(&mut self, attempt: Attempt<'_>) -> Result; /// Returns whether redirection is currently permitted by this policy. /// /// This method is called to determine whether the client should follow redirects at all. /// It allows policies to enable or disable redirection behavior based on the [`Request`]. fn follow_redirects(&mut self, _request: &mut Request) -> bool; /// Invoked right before the service makes a [`Request`]. fn on_request(&mut self, _request: &mut Request); /// Invoked right after the service received a [`Response`]. fn on_response(&mut self, _response: &mut Response); /// Try to clone a request body before the service makes a redirected request. fn clone_body(&self, _body: &B) -> Option; } /// A type that holds information on a redirection attempt. pub struct Attempt<'a> { pub(crate) status: StatusCode, pub(crate) headers: &'a HeaderMap, pub(crate) location: &'a Uri, pub(crate) previous: &'a Uri, } /// A value returned by [`Policy::redirect`] which indicates the action /// [`FollowRedirect`][super::FollowRedirect] should take for a redirection response. pub enum Action { /// Follow the redirection. Follow, /// Do not follow the redirection, and return the redirection response as-is. Stop, /// Pending async decision. The async task will be awaited to determine the final action. Pending(Pin + Send>>), /// An error occurred while determining the redirection action. Error(BoxError), } impl fmt::Debug for Action { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Action::Follow => f.debug_tuple("Follow").finish(), Action::Stop => f.debug_tuple("Stop").finish(), Action::Pending(_) => f.debug_tuple("Pending").finish(), Action::Error(_) => f.debug_tuple("Error").finish(), } } } ================================================ FILE: src/client/layer/redirect.rs ================================================ //! Middleware for following redirections. mod future; mod policy; use std::{ mem, task::{Context, Poll}, }; use futures_util::future::Either; use http::{Request, Response}; use http_body::Body; use tower::{BoxError, Layer, Service}; use self::future::ResponseFuture; pub use self::policy::{Action, Attempt, Policy}; enum BodyRepr { Some(B), Empty, None, } impl BodyRepr where B: Body + Default, { fn take(&mut self) -> Option { match mem::replace(self, BodyRepr::None) { BodyRepr::Some(body) => Some(body), BodyRepr::Empty => { *self = BodyRepr::Empty; Some(B::default()) } BodyRepr::None => None, } } fn try_clone_from(&mut self, body: &B, policy: &P) where P: Policy, { match self { BodyRepr::Some(_) | BodyRepr::Empty => {} BodyRepr::None => { if body.size_hint().exact() == Some(0) { *self = BodyRepr::Some(B::default()); } else if let Some(cloned) = policy.clone_body(body) { *self = BodyRepr::Some(cloned); } } } } } /// [`Layer`] for retrying requests with a [`Service`] to follow redirection responses. #[derive(Clone, Copy, Default)] pub struct FollowRedirectLayer

{ policy: P, } impl

FollowRedirectLayer

{ /// Create a new [`FollowRedirectLayer`] with the given redirection [`Policy`]. #[inline(always)] pub fn with_policy(policy: P) -> Self { FollowRedirectLayer { policy } } } impl Layer for FollowRedirectLayer

where S: Clone, P: Clone, { type Service = FollowRedirect; #[inline(always)] fn layer(&self, inner: S) -> Self::Service { FollowRedirect::with_policy(inner, self.policy.clone()) } } /// Middleware that retries requests with a [`Service`] to follow redirection responses. #[derive(Clone, Copy)] pub struct FollowRedirect { inner: S, policy: P, } impl FollowRedirect where P: Clone, { /// Create a new [`FollowRedirect`] with the given redirection [`Policy`]. #[inline(always)] pub fn with_policy(inner: S, policy: P) -> Self { FollowRedirect { inner, policy } } } impl Service> for FollowRedirect where S: Service, Response = Response> + Clone, S::Error: From, P: Policy + Clone, ReqBody: Body + Default, { type Response = Response; type Error = S::Error; type Future = ResponseFuture; #[inline(always)] fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.inner.poll_ready(cx) } fn call(&mut self, mut req: Request) -> Self::Future { if self.policy.follow_redirects(&mut req) { let service = self.inner.clone(); let mut service = mem::replace(&mut self.inner, service); let mut policy = self.policy.clone(); let mut body_repr = BodyRepr::None; body_repr.try_clone_from(req.body(), &policy); policy.on_request(&mut req); let (parts, body) = req.into_parts(); let req = Request::from_parts(parts.clone(), body); ResponseFuture::Redirect { future: Either::Left(service.call(req)), pending_future: None, service, policy, parts, body_repr, } } else { ResponseFuture::Direct { future: self.inner.call(req), } } } } ================================================ FILE: src/client/layer/retry/classify.rs ================================================ use std::{error::Error as StdError, sync::Arc}; use http::{Method, StatusCode, Uri}; use super::{Req, Res}; use crate::error::BoxError; pub trait Classify: Send + Sync + 'static { fn classify(&self, req_rep: ReqRep<'_>) -> Action; } // For Future Whoever: making a blanket impl for any closure sounds nice, // but it causes inference issues at the call site. Every closure would // need to include `: ReqRep` in the arguments. // // An alternative is to make things like `ClassifyFn`. Slightly more // annoying, but also more forwards-compatible. :shrug: pub struct ClassifyFn(pub(crate) F); impl Classify for ClassifyFn where F: Fn(ReqRep<'_>) -> Action + Send + Sync + 'static, { fn classify(&self, req_rep: ReqRep<'_>) -> Action { (self.0)(req_rep) } } /// Represents a request-response pair for classification purposes. #[derive(Debug)] pub struct ReqRep<'a>(&'a Req, Result); impl ReqRep<'_> { /// Returns the HTTP method of the request. pub fn method(&self) -> &Method { self.0.method() } /// Returns the URI of the request. pub fn uri(&self) -> &Uri { self.0.uri() } /// Returns the HTTP status code if the response was successful. pub fn status(&self) -> Option { self.1.ok() } /// Returns the error if the request failed. pub fn error(&self) -> Option<&(dyn StdError + 'static)> { self.1.as_ref().err().map(|&e| &**e as _) } /// Returns a retryable action. pub fn retryable(self) -> Action { Action::Retryable } /// Returns a success action. pub fn success(self) -> Action { Action::Success } } /// The action to take after classifying a request/response pair. #[must_use] pub enum Action { /// The request was successful and should not be retried. Success, /// The request failed but can be retried. Retryable, } /// Determines whether a request should be retried based on the response or error. #[derive(Clone)] pub(crate) enum Classifier { /// Never retry any requests. Never, /// Retry protocol-level errors (connection issues, timeouts, etc.). ProtocolNacks, /// Use custom classification logic. Dyn(Arc), } impl Classifier { /// Classifies a request/response pair to determine the appropriate retry action. pub(super) fn classify(&mut self, req: &Req, res: &Result) -> Action { let req_rep = ReqRep(req, res.as_ref().map(|r| r.status())); match self { Classifier::Never => Action::Success, Classifier::ProtocolNacks => { let is_protocol_nack = req_rep .error() .map(super::is_retryable_error) .unwrap_or(false); if is_protocol_nack { Action::Retryable } else { Action::Success } } Classifier::Dyn(c) => c.classify(req_rep), } } } ================================================ FILE: src/client/layer/retry/scope.rs ================================================ use std::sync::Arc; use super::Req; pub trait Scope: Send + Sync + 'static { fn applies_to(&self, req: &super::Req) -> bool; } // I think scopes likely make the most sense being to hosts. // If that's the case, then it should probably be easiest to check for // the host. Perhaps also considering the ability to add more things // to scope off in the future... // For Future Whoever: making a blanket impl for any closure sounds nice, // but it causes inference issues at the call site. Every closure would // need to include `: ReqRep` in the arguments. // // An alternative is to make things like `ScopeFn`. Slightly more annoying, // but also more forwards-compatible. :shrug: pub struct ScopeFn(pub(crate) F); impl Scope for ScopeFn where F: Fn(&Req) -> bool + Send + Sync + 'static, { fn applies_to(&self, req: &Req) -> bool { (self.0)(req) } } /// Defines the scope of requests that are eligible for retry. #[derive(Clone)] pub(crate) enum Scoped { /// All requests are eligible for retry regardless of their properties. Unscoped, /// Use custom logic to determine if a request is eligible for retry. Dyn(Arc), } impl Scoped { /// Checks if the given request falls within the retry scope. pub(super) fn applies_to(&self, req: &super::Req) -> bool { let ret = match self { Scoped::Unscoped => true, Scoped::Dyn(s) => s.applies_to(req), }; trace!("retry in scope: {ret}"); ret } } ================================================ FILE: src/client/layer/retry.rs ================================================ //! Middleware for retrying requests. mod classify; mod scope; use std::{error::Error as StdError, future::Ready, sync::Arc, time::Duration}; use http::{Request, Response}; use tower::{ BoxError, retry::{ Policy, budget::{Budget, TpsBudget}, }, }; pub(crate) use self::{ classify::{Action, Classifier, ClassifyFn, ReqRep}, scope::{ScopeFn, Scoped}, }; use super::super::core::body::Incoming; use crate::{Body, retry}; /// A retry policy for HTTP requests. #[derive(Clone)] pub struct RetryPolicy { budget: Option>, classifier: Classifier, max_retries_per_request: u32, retry_cnt: u32, scope: Scoped, } impl RetryPolicy { /// Create a new `RetryPolicy`. #[inline] pub fn new(policy: retry::Policy) -> Self { Self { budget: policy .budget .map(|budget| Arc::new(TpsBudget::new(Duration::from_secs(10), 10, budget))), classifier: policy.classifier, max_retries_per_request: policy.max_retries_per_request, retry_cnt: 0, scope: policy.scope, } } } type Req = Request; type Res = Response; impl Policy for RetryPolicy { type Future = Ready<()>; fn retry(&mut self, req: &mut Req, result: &mut Result) -> Option { match self.classifier.classify(req, result) { Action::Success => { trace!( "Request successful, no retry needed: {} {}", req.method(), req.uri() ); if let Some(ref budget) = self.budget { budget.deposit(); trace!("Token deposited back to retry budget"); } None } Action::Retryable => { if self.budget.as_ref().map(|b| b.withdraw()).unwrap_or(true) { self.retry_cnt += 1; trace!( "Retrying request ({}/{} attempts): {} {} - {}", self.retry_cnt, self.max_retries_per_request, req.method(), req.uri(), match result { Ok(res) => format!("HTTP {}", res.status()), Err(e) => format!("Error: {}", e), } ); Some(std::future::ready(())) } else { debug!( "Request is retryable but retry budget exhausted: {} {}", req.method(), req.uri() ); None } } } } fn clone_request(&mut self, req: &Req) -> Option { if self.retry_cnt > 0 && !self.scope.applies_to(req) { trace!("not in scope, not retrying"); return None; } if self.retry_cnt >= self.max_retries_per_request { trace!("max_retries_per_request hit"); return None; } let body = req.body().try_clone()?; let mut new = http::Request::new(body); *new.method_mut() = req.method().clone(); *new.uri_mut() = req.uri().clone(); *new.version_mut() = req.version(); *new.headers_mut() = req.headers().clone(); *new.extensions_mut() = req.extensions().clone(); Some(new) } } /// Determines whether the given error is considered retryable for HTTP/2 requests. /// /// Returns `true` if the error type or content indicates that the request can be retried, /// otherwise returns `false`. fn is_retryable_error(err: &(dyn StdError + 'static)) -> bool { let err = if let Some(err) = err.source() { err } else { return false; }; if let Some(cause) = err.source() { if let Some(err) = cause.downcast_ref::() { // They sent us a graceful shutdown, try with a new connection! if err.is_go_away() && err.is_remote() && err.reason() == Some(http2::Reason::NO_ERROR) { return true; } // REFUSED_STREAM was sent from the server, which is safe to retry. // https://www.rfc-editor.org/rfc/rfc9113.html#section-8.7-3.2 if err.is_reset() && err.is_remote() && err.reason() == Some(http2::Reason::REFUSED_STREAM) { return true; } } } false } ================================================ FILE: src/client/layer/timeout/body.rs ================================================ use std::{ future::Future, pin::Pin, task::{Context, Poll, ready}, time::Duration, }; use http_body::Body; use pin_project_lite::pin_project; use crate::{ Error, client::core::rt::{Sleep, Time, Timer}, error::{BoxError, TimedOut}, }; pin_project! { /// A wrapper body that applies timeout strategies to an inner HTTP body. #[project = TimeoutBodyProj] pub enum TimeoutBody { Plain { #[pin] body: B, }, TotalTimeout { #[pin] body: TotalTimeoutBody, }, ReadTimeout { #[pin] body: ReadTimeoutBody }, CombinedTimeout { #[pin] body: TotalTimeoutBody>, } } } pin_project! { /// A body wrapper that enforces a total timeout for the entire stream. /// /// The timeout applies to the whole body: if the deadline is reached before /// the body is fully read, an error is returned. The timer does **not** reset /// between chunks. pub struct TotalTimeoutBody { #[pin] body: B, timeout: Pin>, } } pin_project! { /// A body wrapper that enforces a timeout for each read operation. /// /// The timeout resets after every successful read. If a single read /// takes longer than the specified duration, an error is returned. pub struct ReadTimeoutBody { timeout: Duration, #[pin] sleep: Option>>, #[pin] body: B, timer: Time, } } /// ==== impl TimeoutBody ==== impl TimeoutBody { /// Creates a new [`TimeoutBody`] with no timeout. pub fn new( timer: Time, deadline: Option, read_timeout: Option, body: B, ) -> Self { let deadline = deadline.map(|deadline| timer.sleep(deadline)); match (deadline, read_timeout) { (Some(total_timeout), Some(read_timeout)) => TimeoutBody::CombinedTimeout { body: TotalTimeoutBody { timeout: total_timeout, body: ReadTimeoutBody { timeout: read_timeout, sleep: None, body, timer, }, }, }, (Some(timeout), None) => TimeoutBody::TotalTimeout { body: TotalTimeoutBody { body, timeout }, }, (None, Some(timeout)) => TimeoutBody::ReadTimeout { body: ReadTimeoutBody { timeout, sleep: None, body, timer, }, }, (None, None) => TimeoutBody::Plain { body }, } } } impl Body for TimeoutBody where B: Body, B::Error: Into, { type Data = B::Data; type Error = BoxError; #[inline(always)] fn poll_frame( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll, Self::Error>>> { match self.project() { TimeoutBodyProj::TotalTimeout { body } => body.poll_frame(cx), TimeoutBodyProj::ReadTimeout { body } => body.poll_frame(cx), TimeoutBodyProj::CombinedTimeout { body } => body.poll_frame(cx), TimeoutBodyProj::Plain { body } => poll_and_map_body(body, cx), } } #[inline(always)] fn size_hint(&self) -> http_body::SizeHint { match self { TimeoutBody::TotalTimeout { body } => body.size_hint(), TimeoutBody::ReadTimeout { body } => body.size_hint(), TimeoutBody::CombinedTimeout { body } => body.size_hint(), TimeoutBody::Plain { body } => body.size_hint(), } } #[inline(always)] fn is_end_stream(&self) -> bool { match self { TimeoutBody::TotalTimeout { body } => body.is_end_stream(), TimeoutBody::ReadTimeout { body } => body.is_end_stream(), TimeoutBody::CombinedTimeout { body } => body.is_end_stream(), TimeoutBody::Plain { body } => body.is_end_stream(), } } } #[inline(always)] fn poll_and_map_body( body: Pin<&mut B>, cx: &mut Context<'_>, ) -> Poll, BoxError>>> where B: Body, B::Error: Into, { Poll::Ready( ready!(body.poll_frame(cx)).map(|opt| opt.map_err(Error::decode).map_err(Into::into)), ) } // ==== impl TotalTimeoutBody ==== impl Body for TotalTimeoutBody where B: Body, B::Error: Into, { type Data = B::Data; type Error = BoxError; fn poll_frame( self: Pin<&mut Self>, cx: &mut Context, ) -> Poll, Self::Error>>> { let this = self.project(); if let Poll::Ready(()) = this.timeout.as_mut().poll(cx) { return Poll::Ready(Some(Err(Error::body(TimedOut).into()))); } poll_and_map_body(this.body, cx) } #[inline(always)] fn size_hint(&self) -> http_body::SizeHint { self.body.size_hint() } #[inline(always)] fn is_end_stream(&self) -> bool { self.body.is_end_stream() } } /// ==== impl ReadTimeoutBody ==== impl Body for ReadTimeoutBody where B: Body, B::Error: Into, { type Data = B::Data; type Error = BoxError; fn poll_frame( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll, Self::Error>>> { let mut this = self.project(); // Error if the timeout has expired. if this.sleep.is_none() { this.sleep.set(Some(this.timer.sleep(*this.timeout))); } // Error if the timeout has expired. if let Some(sleep) = this.sleep.as_mut().as_pin_mut() { if sleep.poll(cx).is_ready() { return Poll::Ready(Some(Err(Box::new(TimedOut)))); } } // Poll the actual body match ready!(this.body.poll_frame(cx)) { Some(Ok(frame)) => { // Reset timeout on successful read this.sleep.set(None); Poll::Ready(Some(Ok(frame))) } Some(Err(err)) => Poll::Ready(Some(Err(err.into()))), None => Poll::Ready(None), } } #[inline(always)] fn size_hint(&self) -> http_body::SizeHint { self.body.size_hint() } #[inline(always)] fn is_end_stream(&self) -> bool { self.body.is_end_stream() } } ================================================ FILE: src/client/layer/timeout/future.rs ================================================ use std::{ future::Future, pin::Pin, task::{Context, Poll, ready}, time::Duration, }; use http::Response; use pin_project_lite::pin_project; use tokio::time::Sleep; use super::body::TimeoutBody; use crate::{ client::core::rt::Time, error::{BoxError, Error, TimedOut}, }; pin_project! { /// [`Timeout`] response future pub struct ResponseFuture { #[pin] pub(crate) response: F, #[pin] pub(crate) total_timeout: Option, #[pin] pub(crate) read_timeout: Option, } } impl Future for ResponseFuture where F: Future>, E: Into, { type Output = Result; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let mut this = self.project(); // First, try polling the future match this.response.poll(cx) { Poll::Ready(v) => return Poll::Ready(v.map_err(Into::into)), Poll::Pending => {} } // Helper closure for polling a timeout and returning a TimedOut error let mut check_timeout = |sleep: Option>| { if let Some(sleep) = sleep { if sleep.poll(cx).is_ready() { return Some(Poll::Ready(Err(Error::request(TimedOut).into()))); } } None }; // Check total timeout first if let Some(poll) = check_timeout(this.total_timeout.as_mut().as_pin_mut()) { return poll; } // Check read timeout if let Some(poll) = check_timeout(this.read_timeout.as_mut().as_pin_mut()) { return poll; } Poll::Pending } } pin_project! { /// Response future for [`ResponseBodyTimeout`]. pub struct ResponseBodyTimeoutFuture { #[pin] pub(super) inner: Fut, pub(super) total_timeout: Option, pub(super) read_timeout: Option, pub(super) timer: Time, } } impl Future for ResponseBodyTimeoutFuture where Fut: Future, E>>, { type Output = Result>, E>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let timer = self.timer.clone(); let total_timeout = self.total_timeout; let read_timeout = self.read_timeout; let res = ready!(self.project().inner.poll(cx))? .map(|body| TimeoutBody::new(timer, total_timeout, read_timeout, body)); Poll::Ready(Ok(res)) } } ================================================ FILE: src/client/layer/timeout.rs ================================================ //! Middleware for setting a timeout on the response. mod body; mod future; use std::{ sync::Arc, task::{Context, Poll}, time::Duration, }; use http::{Request, Response}; use tower::{BoxError, Layer, Service}; pub use self::body::TimeoutBody; use self::future::{ResponseBodyTimeoutFuture, ResponseFuture}; use crate::{ client::core::rt::{Time, Timer}, config::RequestConfig, }; /// Options for configuring timeouts. #[derive(Clone, Copy, Default)] pub struct TimeoutOptions { total_timeout: Option, read_timeout: Option, } impl TimeoutOptions { /// Sets the read timeout for the options. #[inline] pub fn read_timeout(&mut self, read_timeout: Duration) -> &mut Self { self.read_timeout = Some(read_timeout); self } /// Sets the total timeout for the options. #[inline] pub fn total_timeout(&mut self, total_timeout: Duration) -> &mut Self { self.total_timeout = Some(total_timeout); self } } impl_request_config_value!(TimeoutOptions); /// [`Layer`] that applies a [`Timeout`] middleware to a service. // This layer allows you to set a total timeout and a read timeout for requests. #[derive(Clone)] pub struct TimeoutLayer { timeout: RequestConfig, } impl TimeoutLayer { /// Create a new [`TimeoutLayer`]. pub fn new(options: TimeoutOptions) -> Self { TimeoutLayer { timeout: RequestConfig::new(Some(options)), } } } impl Layer for TimeoutLayer { type Service = Timeout; #[inline(always)] fn layer(&self, service: S) -> Self::Service { Timeout { inner: service, timeout: self.timeout, } } } /// Middleware that applies total and per-read timeouts to a [`Service`] response body. #[derive(Clone)] pub struct Timeout { inner: T, timeout: RequestConfig, } impl Service> for Timeout where S: Service, Response = Response, Error = BoxError>, { type Response = S::Response; type Error = BoxError; type Future = ResponseFuture; #[inline(always)] fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.inner.poll_ready(cx) } #[inline(always)] fn call(&mut self, req: Request) -> Self::Future { let (total_timeout, read_timeout) = fetch_timeout_options(&self.timeout, req.extensions()); ResponseFuture { response: self.inner.call(req), total_timeout: total_timeout.map(tokio::time::sleep), read_timeout: read_timeout.map(tokio::time::sleep), } } } /// [`Layer`] that applies a [`ResponseBodyTimeout`] middleware to a service. // This layer allows you to set a total timeout and a read timeout for the response body. #[derive(Clone)] pub struct ResponseBodyTimeoutLayer { timer: Time, timeout: RequestConfig, } impl ResponseBodyTimeoutLayer { /// Creates a new [`ResponseBodyTimeoutLayer`]. pub fn new(timer: M, options: TimeoutOptions) -> Self where M: Timer + Send + Sync + 'static, { Self { timer: Time::Timer(Arc::new(timer)), timeout: RequestConfig::new(Some(options)), } } } impl Layer for ResponseBodyTimeoutLayer { type Service = ResponseBodyTimeout; #[inline(always)] fn layer(&self, inner: S) -> Self::Service { ResponseBodyTimeout { inner, timeout: self.timeout, timer: self.timer.clone(), } } } /// Middleware that timeouts the response body of a request with a [`Service`] to a total timeout /// and a read timeout. #[derive(Clone)] pub struct ResponseBodyTimeout { inner: S, timeout: RequestConfig, timer: Time, } impl Service> for ResponseBodyTimeout where S: Service, Response = Response>, { type Response = Response>; type Error = S::Error; type Future = ResponseBodyTimeoutFuture; #[inline(always)] fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.inner.poll_ready(cx) } #[inline(always)] fn call(&mut self, req: Request) -> Self::Future { let (total_timeout, read_timeout) = fetch_timeout_options(&self.timeout, req.extensions()); ResponseBodyTimeoutFuture { inner: self.inner.call(req), total_timeout, read_timeout, timer: self.timer.clone(), } } } fn fetch_timeout_options( opts: &RequestConfig, extensions: &http::Extensions, ) -> (Option, Option) { match (opts.as_ref(), opts.fetch(extensions)) { (Some(opts), Some(request_opts)) => ( request_opts.total_timeout.or(opts.total_timeout), request_opts.read_timeout.or(opts.read_timeout), ), (Some(opts), None) => (opts.total_timeout, opts.read_timeout), (None, Some(opts)) => (opts.total_timeout, opts.read_timeout), (None, None) => (None, None), } } ================================================ FILE: src/client/layer.rs ================================================ //! Middleware for the client. pub mod client; pub mod config; #[cfg(any( feature = "gzip", feature = "zstd", feature = "brotli", feature = "deflate", ))] pub mod decoder; pub mod redirect; pub mod retry; pub mod timeout; ================================================ FILE: src/client/multipart.rs ================================================ //! multipart/form-data use std::{borrow::Cow, pin::Pin}; use bytes::Bytes; use futures_util::{Stream, StreamExt, future, stream}; use http::header::HeaderMap; use http_body_util::BodyExt; use mime_guess::Mime; use percent_encoding::{self, AsciiSet, NON_ALPHANUMERIC}; #[cfg(feature = "stream")] use {std::io, std::path::Path, tokio::fs::File}; use super::Body; /// An async multipart/form-data request. #[derive(Debug)] pub struct Form { boundary: Cow<'static, str>, computed_headers: Vec>, fields: Vec<(Cow<'static, str>, Part)>, percent_encoding: PercentEncoding, } /// A field in a multipart form. #[derive(Debug)] pub struct Part { meta: PartMetadata, value: Body, body_length: Option, } #[derive(Debug)] struct PartMetadata { mime: Option, file_name: Option>, headers: HeaderMap, } // ===== impl Form ===== impl Default for Form { fn default() -> Self { Self::new() } } impl Form { /// Creates a new async Form without any content. pub fn new() -> Form { Form::with_boundary(gen_boundary()) } /// Creates a new async Form with a custom boundary. /// /// **Setting a custom boundary incurs significant risk of generating /// corrupted bodies.** Only use this if you need it and you understand the /// risk! pub fn with_boundary(boundary: S) -> Form where S: Into>, { Form { boundary: boundary.into(), computed_headers: Vec::new(), fields: Vec::new(), percent_encoding: PercentEncoding::PathSegment, } } /// Get the boundary that this form will use. pub fn boundary(&self) -> &str { &self.boundary } /// Add a data field with supplied name and value. /// /// # Examples /// /// ``` /// let form = wreq::multipart::Form::new() /// .text("username", "seanmonstar") /// .text("password", "secret"); /// ``` pub fn text(self, name: T, value: U) -> Form where T: Into>, U: Into>, { self.part(name, Part::text(value)) } /// Adds a file field. /// /// The path will be used to try to guess the filename and mime. /// /// # Examples /// /// ```no_run /// # async fn run() -> std::io::Result<()> { /// let form = wreq::multipart::Form::new() /// .file("key", "/path/to/file") /// .await?; /// # Ok(()) /// # } /// ``` /// /// # Errors /// /// Errors when the file cannot be opened. #[cfg(feature = "stream")] #[cfg_attr(docsrs, doc(cfg(feature = "stream")))] pub async fn file(self, name: T, path: U) -> io::Result

where T: Into>, U: AsRef, { Ok(self.part(name, Part::file(path).await?)) } /// Adds a customized Part. pub fn part(mut self, name: T, part: Part) -> Form where T: Into>, { self.fields.push((name.into(), part)); self } /// Configure this `Form` to percent-encode using the `path-segment` rules. pub fn percent_encode_path_segment(mut self) -> Form { self.percent_encoding = PercentEncoding::PathSegment; self } /// Configure this `Form` to percent-encode using the `attr-char` rules. pub fn percent_encode_attr_chars(mut self) -> Form { self.percent_encoding = PercentEncoding::AttrChar; self } /// Configure this `Form` to skip percent-encoding pub fn percent_encode_noop(mut self) -> Form { self.percent_encoding = PercentEncoding::NoOp; self } /// Consume this instance and transform into an instance of Body for use in a request. pub(crate) fn stream(self) -> Body { if self.fields.is_empty() { return Body::empty(); } Body::stream(self.into_stream()) } /// Produce a stream of the bytes in this `Form`, consuming it. pub fn into_stream(mut self) -> impl Stream> + Send + Sync { if self.fields.is_empty() { let empty_stream: Pin< Box> + Send + Sync>, > = Box::pin(futures_util::stream::empty()); return empty_stream; } // create initial part to init reduce chain let (name, part) = self.fields.remove(0); let start = Box::pin(self.part_stream(name, part)) as Pin> + Send + Sync>>; let fields = self.take_fields(); // for each field, chain an additional stream let stream = fields.into_iter().fold(start, |memo, (name, part)| { let part_stream = self.part_stream(name, part); Box::pin(memo.chain(part_stream)) as Pin> + Send + Sync>> }); // append special ending boundary let last = stream::once(future::ready(Ok( format!("--{}--\r\n", self.boundary).into() ))); Box::pin(stream.chain(last)) } /// Generate a crate::core::Body stream for a single Part instance of a Form request. pub(crate) fn part_stream( &mut self, name: T, part: Part, ) -> impl Stream> + use where T: Into>, { // start with boundary let boundary = stream::once(future::ready(Ok(format!("--{}\r\n", self.boundary).into()))); // append headers let header = stream::once(future::ready(Ok({ let mut h = self .percent_encoding .encode_headers(&name.into(), &part.meta); h.extend_from_slice(b"\r\n\r\n"); h.into() }))); // then append form data followed by terminating CRLF boundary .chain(header) .chain(part.value.into_data_stream()) .chain(stream::once(future::ready(Ok("\r\n".into())))) } // If predictable, computes the length the request will have // The length should be predictable if only String and file fields have been added, // but not if a generic reader has been added; pub(crate) fn compute_length(&mut self) -> Option { let mut length = 0u64; for (name, field) in self.fields.iter() { match field.value_len() { Some(value_length) => { // We are constructing the header just to get its length. To not have to // construct it again when the request is sent we cache these headers. let header = self.percent_encoding.encode_headers(name, field.metadata()); let header_length = header.len(); self.computed_headers.push(header); // The additions mimic the format string out of which the field is constructed // in Reader. Not the cleanest solution because if that format string is // ever changed then this formula needs to be changed too which is not an // obvious dependency in the code. length += 2 + self.boundary.len() as u64 + 2 + header_length as u64 + 4 + value_length + 2 } _ => return None, } } // If there is at least one field there is a special boundary for the very last field. if !self.fields.is_empty() { length += 2 + self.boundary.len() as u64 + 4 } Some(length) } /// Take the fields vector of this instance, replacing with an empty vector. fn take_fields(&mut self) -> Vec<(Cow<'static, str>, Part)> { std::mem::take(&mut self.fields) } } // ===== impl Part ===== impl Part { /// Makes a text parameter. pub fn text(value: T) -> Part where T: Into>, { let body = match value.into() { Cow::Borrowed(slice) => Body::from(slice), Cow::Owned(string) => Body::from(string), }; Part::new(body, None) } /// Makes a new parameter from arbitrary bytes. pub fn bytes(value: T) -> Part where T: Into>, { let body = match value.into() { Cow::Borrowed(slice) => Body::from(slice), Cow::Owned(vec) => Body::from(vec), }; Part::new(body, None) } /// Makes a new parameter from an arbitrary stream. pub fn stream>(value: T) -> Part { Part::new(value.into(), None) } /// Makes a new parameter from an arbitrary stream with a known length. This is particularly /// useful when adding something like file contents as a stream, where you can know the content /// length beforehand. pub fn stream_with_length>(value: T, length: u64) -> Part { Part::new(value.into(), Some(length)) } /// Makes a file parameter. /// /// # Errors /// /// Errors when the file cannot be opened. #[cfg(feature = "stream")] #[cfg_attr(docsrs, doc(cfg(feature = "stream")))] pub async fn file>(path: T) -> io::Result { let path = path.as_ref(); let file_name = path .file_name() .map(|filename| filename.to_string_lossy().into_owned()); let ext = path.extension().and_then(|ext| ext.to_str()).unwrap_or(""); let mime = mime_guess::from_ext(ext).first_or_octet_stream(); let file = File::open(path).await?; let len = file.metadata().await.map(|m| m.len()).ok(); let field = match len { Some(len) => Part::stream_with_length(file, len), None => Part::stream(file), } .mime(mime); Ok(if let Some(file_name) = file_name { field.file_name(file_name) } else { field }) } fn new(value: Body, body_length: Option) -> Part { Part { meta: PartMetadata::new(), value, body_length, } } /// Tries to set the mime of this part. pub fn mime_str(self, mime: &str) -> crate::Result { Ok(self.mime(mime.parse().map_err(crate::Error::builder)?)) } // Re-export when mime 0.4 is available, with split MediaType/MediaRange. fn mime(self, mime: Mime) -> Part { self.with_inner(move |inner| inner.mime(mime)) } /// Sets the filename, builder style. pub fn file_name(self, filename: T) -> Part where T: Into>, { self.with_inner(move |inner| inner.file_name(filename)) } /// Sets custom headers for the part. pub fn headers(self, headers: HeaderMap) -> Part { self.with_inner(move |inner| inner.headers(headers)) } fn value_len(&self) -> Option { if self.body_length.is_some() { self.body_length } else { self.value.content_length() } } fn metadata(&self) -> &PartMetadata { &self.meta } fn with_inner(self, func: F) -> Self where F: FnOnce(PartMetadata) -> PartMetadata, { Part { meta: func(self.meta), ..self } } } // ===== impl PartMetadata ===== impl PartMetadata { fn new() -> Self { PartMetadata { mime: None, file_name: None, headers: HeaderMap::default(), } } fn mime(mut self, mime: Mime) -> Self { self.mime = Some(mime); self } fn file_name(mut self, filename: T) -> Self where T: Into>, { self.file_name = Some(filename.into()); self } fn headers(mut self, headers: T) -> Self where T: Into, { self.headers = headers.into(); self } } // https://url.spec.whatwg.org/#fragment-percent-encode-set const FRAGMENT_ENCODE_SET: &AsciiSet = &percent_encoding::CONTROLS .add(b' ') .add(b'"') .add(b'<') .add(b'>') .add(b'`'); // https://url.spec.whatwg.org/#path-percent-encode-set const PATH_ENCODE_SET: &AsciiSet = &FRAGMENT_ENCODE_SET.add(b'#').add(b'?').add(b'{').add(b'}'); const PATH_SEGMENT_ENCODE_SET: &AsciiSet = &PATH_ENCODE_SET.add(b'/').add(b'%'); // https://tools.ietf.org/html/rfc8187#section-3.2.1 const ATTR_CHAR_ENCODE_SET: &AsciiSet = &NON_ALPHANUMERIC .remove(b'!') .remove(b'#') .remove(b'$') .remove(b'&') .remove(b'+') .remove(b'-') .remove(b'.') .remove(b'^') .remove(b'_') .remove(b'`') .remove(b'|') .remove(b'~'); #[derive(Debug)] enum PercentEncoding { PathSegment, AttrChar, NoOp, } impl PercentEncoding { fn encode_headers(&self, name: &str, field: &PartMetadata) -> Vec { let mut buf = Vec::new(); buf.extend_from_slice(b"Content-Disposition: form-data; "); match self.percent_encode(name) { Cow::Borrowed(value) => { // nothing has been percent encoded buf.extend_from_slice(b"name=\""); buf.extend_from_slice(value.as_bytes()); buf.extend_from_slice(b"\""); } Cow::Owned(value) => { // something has been percent encoded buf.extend_from_slice(b"name*=utf-8''"); buf.extend_from_slice(value.as_bytes()); } } // According to RFC7578 Section 4.2, `filename*=` syntax is invalid. // See https://github.com/seanmonstar/reqwest/issues/419. if let Some(filename) = &field.file_name { buf.extend_from_slice(b"; filename=\""); let legal_filename = filename .replace('\\', "\\\\") .replace('"', "\\\"") .replace('\r', "\\\r") .replace('\n', "\\\n"); buf.extend_from_slice(legal_filename.as_bytes()); buf.extend_from_slice(b"\""); } if let Some(mime) = &field.mime { buf.extend_from_slice(b"\r\nContent-Type: "); buf.extend_from_slice(mime.as_ref().as_bytes()); } for (k, v) in field.headers.iter() { buf.extend_from_slice(b"\r\n"); buf.extend_from_slice(k.as_str().as_bytes()); buf.extend_from_slice(b": "); buf.extend_from_slice(v.as_bytes()); } buf } fn percent_encode<'a>(&self, value: &'a str) -> Cow<'a, str> { use percent_encoding::utf8_percent_encode as percent_encode; match self { Self::PathSegment => percent_encode(value, PATH_SEGMENT_ENCODE_SET).into(), Self::AttrChar => percent_encode(value, ATTR_CHAR_ENCODE_SET).into(), Self::NoOp => value.into(), } } } /// See chromium's implementation: https://source.chromium.org/chromium/chromium/src/+/main:third_party/blink/renderer/platform/network/form_data_encoder.cc fn gen_boundary() -> String { use crate::util::fast_random as random; const PREFIX: &[u8; 22] = b"----WebKitFormBoundary"; // The RFC 2046 spec says the alphanumeric characters plus the // following characters are legal for boundaries: '()+_,-./:=? // However the following characters, though legal, cause some sites // to fail: (),./:=+ // Note that our algorithm makes it twice as much likely for 'A' or 'B' // to appear in the boundary string, because 0x41 and 0x42 are present in // the below array twice. const ALPHA_NUMERIC_ENCODING_MAP: [u8; 64] = [ 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x41, 0x42, ]; // Pre-allocate a buffer for the boundary string. The final length will be 22 (prefix) + 16 // (random chars) = 38. let mut boundary = Vec::with_capacity(38); // Start with an informative prefix. boundary.extend_from_slice(PREFIX); // Append 16 random 7bit ascii AlphaNumeric characters. for _ in 0..2 { let mut randomness = random(); for _ in 0..8 { let index = (randomness & 0x3F) as usize; boundary.push(ALPHA_NUMERIC_ENCODING_MAP[index]); randomness >>= 6; } } assert_eq!(boundary.len(), 38); String::from_utf8(boundary).expect("Invalid UTF-8 generated") } #[cfg(test)] mod tests { use std::future; use futures_util::{TryStreamExt, stream}; use tokio::{self, runtime}; use super::*; #[test] fn form_empty() { let form = Form::new(); let rt = runtime::Builder::new_current_thread() .enable_all() .build() .expect("new rt"); let body = form.stream().into_data_stream(); let s = body.map_ok(|try_c| try_c.to_vec()).try_concat(); let out = rt.block_on(s); assert!(out.unwrap().is_empty()); } #[test] fn stream_to_end() { let mut form = Form::new() .part( "reader1", Part::stream(Body::stream(stream::once(future::ready::< Result, >(Ok( "part1".to_owned() ))))), ) .part("key1", Part::text("value1")) .part( "key2", Part::text("value2").mime(mime_guess::mime::IMAGE_BMP), ) .part( "reader2", Part::stream(Body::stream(stream::once(future::ready::< Result, >(Ok( "part2".to_owned() ))))), ) .part("key3", Part::text("value3").file_name("filename")); form.boundary = "boundary".into(); let expected = "--boundary\r\n\ Content-Disposition: form-data; name=\"reader1\"\r\n\r\n\ part1\r\n\ --boundary\r\n\ Content-Disposition: form-data; name=\"key1\"\r\n\r\n\ value1\r\n\ --boundary\r\n\ Content-Disposition: form-data; name=\"key2\"\r\n\ Content-Type: image/bmp\r\n\r\n\ value2\r\n\ --boundary\r\n\ Content-Disposition: form-data; name=\"reader2\"\r\n\r\n\ part2\r\n\ --boundary\r\n\ Content-Disposition: form-data; name=\"key3\"; filename=\"filename\"\r\n\r\n\ value3\r\n--boundary--\r\n"; let rt = runtime::Builder::new_current_thread() .enable_all() .build() .expect("new rt"); let body = form.stream().into_data_stream(); let s = body.map(|try_c| try_c.map(|r| r.to_vec())).try_concat(); let out = rt.block_on(s).unwrap(); // These prints are for debug purposes in case the test fails println!( "START REAL\n{}\nEND REAL", std::str::from_utf8(&out).unwrap() ); println!("START EXPECTED\n{expected}\nEND EXPECTED"); assert_eq!(std::str::from_utf8(&out).unwrap(), expected); } #[test] fn stream_to_end_with_header() { let mut part = Part::text("value2").mime(mime_guess::mime::IMAGE_BMP); let mut headers = HeaderMap::new(); headers.insert("Hdr3", "/a/b/c".parse().unwrap()); part = part.headers(headers); let mut form = Form::new().part("key2", part); form.boundary = "boundary".into(); let expected = "--boundary\r\n\ Content-Disposition: form-data; name=\"key2\"\r\n\ Content-Type: image/bmp\r\n\ hdr3: /a/b/c\r\n\ \r\n\ value2\r\n\ --boundary--\r\n"; let rt = runtime::Builder::new_current_thread() .enable_all() .build() .expect("new rt"); let body = form.stream().into_data_stream(); let s = body.map(|try_c| try_c.map(|r| r.to_vec())).try_concat(); let out = rt.block_on(s).unwrap(); // These prints are for debug purposes in case the test fails println!( "START REAL\n{}\nEND REAL", std::str::from_utf8(&out).unwrap() ); println!("START EXPECTED\n{expected}\nEND EXPECTED"); assert_eq!(std::str::from_utf8(&out).unwrap(), expected); } #[test] fn correct_content_length() { // Setup an arbitrary data stream let stream_data = b"just some stream data"; let stream_len = stream_data.len(); let stream_data = stream_data .chunks(3) .map(|c| Ok::<_, std::io::Error>(Bytes::from(c))); let the_stream = futures_util::stream::iter(stream_data); let bytes_data = b"some bytes data".to_vec(); let bytes_len = bytes_data.len(); let stream_part = Part::stream_with_length(Body::stream(the_stream), stream_len as u64); let body_part = Part::bytes(bytes_data); // A simple check to make sure we get the configured body length assert_eq!(stream_part.value_len().unwrap(), stream_len as u64); // Make sure it delegates to the underlying body if length is not specified assert_eq!(body_part.value_len().unwrap(), bytes_len as u64); } #[test] fn header_percent_encoding() { let name = "start%'\"\r\nßend"; let field = Part::text(""); assert_eq!( PercentEncoding::PathSegment.encode_headers(name, &field.meta), &b"Content-Disposition: form-data; name*=utf-8''start%25'%22%0D%0A%C3%9Fend"[..] ); assert_eq!( PercentEncoding::AttrChar.encode_headers(name, &field.meta), &b"Content-Disposition: form-data; name*=utf-8''start%25%27%22%0D%0A%C3%9Fend"[..] ); } #[test] fn custom_boundary_is_applied() { let form = Form::with_boundary("----WebKitFormBoundary0123456789"); assert_eq!(form.boundary(), "----WebKitFormBoundary0123456789"); } } ================================================ FILE: src/client/request.rs ================================================ use std::{ convert::TryFrom, fmt, future::Future, net::{IpAddr, Ipv4Addr, Ipv6Addr}, time::Duration, }; #[cfg(any(feature = "form", feature = "json", feature = "multipart"))] use http::header::CONTENT_TYPE; use http::{Extensions, Uri, Version}; #[cfg(any(feature = "query", feature = "form", feature = "json"))] use serde::Serialize; #[cfg(feature = "multipart")] use {super::multipart, bytes::Bytes, http::header::CONTENT_LENGTH}; #[cfg(feature = "cookies")] use { crate::cookie::{CookieStore, IntoCookieStore}, std::sync::Arc, }; #[cfg(any( feature = "gzip", feature = "zstd", feature = "brotli", feature = "deflate", ))] use super::layer::decoder::AcceptEncoding; use super::{ Body, Client, IntoEmulation, Response, future::Pending, group::Group, layer::{ config::{DefaultHeaders, RequestOptions}, timeout::TimeoutOptions, }, }; use crate::{ Error, Method, Proxy, config::{RequestConfig, RequestConfigValue}, ext::UriExt, header::{AUTHORIZATION, HeaderMap, HeaderName, HeaderValue, OrigHeaderMap}, redirect, }; /// A request which can be executed with [`Client::execute()`]. #[derive(Debug)] pub struct Request(http::Request>); /// A builder to construct the properties of a [`Request`]. /// /// To construct a [`RequestBuilder`], refer to the [`Client`] documentation. #[must_use = "RequestBuilder does nothing until you 'send' it"] pub struct RequestBuilder { client: Client, request: crate::Result, } impl Request { /// Constructs a new [`Request`]. pub fn new(method: Method, uri: Uri) -> Self { let mut request = http::Request::new(None); *request.method_mut() = method; *request.uri_mut() = uri; Request(request) } /// Get the method. #[inline] pub fn method(&self) -> &Method { self.0.method() } /// Get a mutable reference to the method. #[inline] pub fn method_mut(&mut self) -> &mut Method { self.0.method_mut() } /// Get the uri. #[inline] pub fn uri(&self) -> &Uri { self.0.uri() } /// Get a mutable reference to the uri. #[inline] pub fn uri_mut(&mut self) -> &mut Uri { self.0.uri_mut() } /// Get the headers. #[inline] pub fn headers(&self) -> &HeaderMap { self.0.headers() } /// Get a mutable reference to the headers. #[inline] pub fn headers_mut(&mut self) -> &mut HeaderMap { self.0.headers_mut() } /// Get the body. #[inline] pub fn body(&self) -> Option<&Body> { self.0.body().as_ref() } /// Get a mutable reference to the body. #[inline] pub fn body_mut(&mut self) -> &mut Option { self.0.body_mut() } /// Get the http version. #[inline] pub fn version(&self) -> Option { self.config::() .and_then(|opts| opts.version) } /// Get a mutable reference to the http version. #[inline] pub fn version_mut(&mut self) -> &mut Option { &mut self .config_mut::() .get_or_insert_default() .version } /// Returns a reference to the associated extensions. /// /// # Examples /// /// ``` /// # use wreq; /// let request = wreq::get("http://httpbin.org/get") /// .build() /// .expect("failed to build request"); /// assert!(request.extensions().get::().is_none()); /// ``` #[inline] pub fn extensions(&self) -> &Extensions { self.0.extensions() } /// Returns a mutable reference to the associated extensions. /// /// # Examples /// /// ``` /// # use wreq; /// let mut request = wreq::get("http://httpbin.org/get") /// .build() /// .expect("failed to build request"); /// request.extensions_mut().insert("hello"); /// assert_eq!(request.extensions().get(), Some(&"hello")); /// ``` #[inline] pub fn extensions_mut(&mut self) -> &mut Extensions { self.0.extensions_mut() } /// Attempt to clone the request. /// /// `None` is returned if the request can not be cloned, i.e. if the body is a stream. pub fn try_clone(&self) -> Option { let body = match self.body() { Some(body) => Some(body.try_clone()?), None => None, }; let mut req = Request::new(self.method().clone(), self.uri().clone()); *req.headers_mut() = self.headers().clone(); *req.version_mut() = self.version(); *req.extensions_mut() = self.extensions().clone(); *req.body_mut() = body; Some(req) } #[inline] pub(crate) fn config(&self) -> Option<&T::Value> where T: RequestConfigValue, { RequestConfig::::get(self.extensions()) } #[inline] pub(crate) fn config_mut(&mut self) -> &mut Option where T: RequestConfigValue, { RequestConfig::::get_mut(self.extensions_mut()) } } impl RequestBuilder { pub(super) fn new(client: Client, request: crate::Result) -> RequestBuilder { let mut builder = RequestBuilder { client, request }; let auth = builder .request .as_mut() .ok() .and_then(|req| extract_authority(req.uri_mut())); if let Some((username, password)) = auth { builder.basic_auth(username, password) } else { builder } } /// Assemble a builder starting from an existing `Client` and a `Request`. pub fn from_parts(client: Client, request: Request) -> RequestBuilder { RequestBuilder { client, request: crate::Result::Ok(request), } } /// Add a `Header` to this Request with ability to define if `header_value` is sensitive. fn header_sensitive(mut self, key: K, value: V, sensitive: bool) -> RequestBuilder where HeaderName: TryFrom, >::Error: Into, HeaderValue: TryFrom, >::Error: Into, { let mut error = None; if let Ok(ref mut req) = self.request { match >::try_from(key) { Ok(key) => match >::try_from(value) { Ok(mut value) => { // We want to potentially make an non-sensitive header // to be sensitive, not the reverse. So, don't turn off // a previously sensitive header. if sensitive { value.set_sensitive(true); } req.headers_mut().append(key, value); } Err(e) => error = Some(Error::builder(e.into())), }, Err(e) => error = Some(Error::builder(e.into())), }; } if let Some(err) = error { self.request = Err(err); } self } /// Add a `Header` to this Request. /// /// If the header is already present, the value will be replaced. #[inline] pub fn header(self, key: K, value: V) -> RequestBuilder where HeaderName: TryFrom, >::Error: Into, HeaderValue: TryFrom, >::Error: Into, { self.header_sensitive(key, value, false) } /// Add a set of Headers to the existing ones on this Request. /// /// The headers will be merged in to any already set. pub fn headers(mut self, headers: HeaderMap) -> RequestBuilder { if let Ok(ref mut req) = self.request { crate::util::replace_headers(req.headers_mut(), headers); } self } /// Set the original headers for this request. pub fn orig_headers(mut self, orig_headers: OrigHeaderMap) -> RequestBuilder { if let Ok(ref mut req) = self.request { req.config_mut::().replace(orig_headers); } self } /// Enable or disable client default headers for this request. /// /// By default, client default headers are included. Set to `false` to skip them. pub fn default_headers(mut self, enable: bool) -> RequestBuilder { if let Ok(ref mut req) = self.request { req.config_mut::().replace(enable); } self } /// Enable HTTP authentication. /// /// ```rust /// # use wreq::Error; /// # /// # async fn run() -> Result<(), Error> { /// let client = wreq::Client::new(); /// let resp = client /// .get("http://httpbin.org/get") /// .auth("your_token_here") /// .send() /// .await?; /// # Ok(()) /// # } /// ``` pub fn auth(self, token: V) -> RequestBuilder where HeaderValue: TryFrom, >::Error: Into, { self.header_sensitive(AUTHORIZATION, token, true) } /// Enable HTTP basic authentication. /// /// ```rust /// # use wreq::Error; /// /// # async fn run() -> Result<(), Error> { /// let client = wreq::Client::new(); /// let resp = client /// .delete("http://httpbin.org/delete") /// .basic_auth("admin", Some("good password")) /// .send() /// .await?; /// # Ok(()) /// # } /// ``` pub fn basic_auth(self, username: U, password: Option

) -> RequestBuilder where U: fmt::Display, P: fmt::Display, { let header_value = crate::util::basic_auth(username, password); self.header_sensitive(AUTHORIZATION, header_value, true) } /// Enable HTTP bearer authentication. /// /// ```rust /// # use wreq::Error; /// # /// # async fn run() -> Result<(), Error> { /// let client = wreq::Client::new(); /// let resp = client /// .get("http://httpbin.org/get") /// .bearer_auth("your_token_here") /// .send() /// .await?; /// # Ok(()) /// # } /// ``` pub fn bearer_auth(self, token: T) -> RequestBuilder { let header_value = format!("Bearer {token}"); self.header_sensitive(AUTHORIZATION, header_value, true) } /// Enables a request timeout. /// /// The timeout is applied from when the request starts connecting until the /// response body has finished. It affects only this request and overrides /// the timeout configured using `ClientBuilder::timeout()`. pub fn timeout(mut self, timeout: Duration) -> RequestBuilder { if let Ok(ref mut req) = self.request { req.config_mut::() .get_or_insert_default() .total_timeout(timeout); } self } /// Enables a read timeout. /// /// The read timeout is applied from when the response body starts being read /// until the response body has finished. It affects only this request and /// overrides the read timeout configured using `ClientBuilder::read_timeout()`. pub fn read_timeout(mut self, timeout: Duration) -> RequestBuilder { if let Ok(ref mut req) = self.request { req.config_mut::() .get_or_insert_default() .read_timeout(timeout); } self } /// Modify the query string of the URI. /// /// Modifies the URI of this request, adding the parameters provided. /// This method appends and does not overwrite. This means that it can /// be called multiple times and that existing query parameters are not /// overwritten if the same key is used. The key will simply show up /// twice in the query string. /// Calling `.query(&[("foo", "a"), ("foo", "b")])` gives `"foo=a&foo=b"`. /// /// # Note /// This method does not support serializing a single key-value /// pair. Instead of using `.query(("key", "val"))`, use a sequence, such /// as `.query(&[("key", "val")])`. It's also possible to serialize structs /// and maps into a key-value pair. /// /// # Errors /// This method will fail if the object you provide cannot be serialized /// into a query string. #[cfg(feature = "query")] #[cfg_attr(docsrs, doc(cfg(feature = "query")))] pub fn query(mut self, query: &T) -> RequestBuilder { let mut error = None; if let Ok(ref mut req) = self.request { match serde_html_form::to_string(query) { Ok(serializer) => { let uri = req.uri_mut(); uri.set_query(serializer); } Err(err) => error = Some(Error::builder(err)), } } if let Some(err) = error { self.request = Err(err); } self } /// Send a form body. /// /// Sets the body to the uri encoded serialization of the passed value, /// and also sets the `Content-Type: application/x-www-form-urlencoded` /// header. /// /// ```rust /// # use wreq::Error; /// # use std::collections::HashMap; /// # /// # async fn run() -> Result<(), Error> { /// let mut params = HashMap::new(); /// params.insert("lang", "rust"); /// /// let client = wreq::Client::new(); /// let res = client /// .post("http://httpbin.org") /// .form(¶ms) /// .send() /// .await?; /// # Ok(()) /// # } /// ``` /// /// # Errors /// /// This method fails if the passed value cannot be serialized into /// uri encoded format #[cfg(feature = "form")] #[cfg_attr(docsrs, doc(cfg(feature = "form")))] pub fn form(mut self, form: &T) -> RequestBuilder { if let Ok(ref mut req) = self.request { match serde_html_form::to_string(form) { Ok(body) => { const HEADER_VALUE: HeaderValue = HeaderValue::from_static("application/x-www-form-urlencoded"); req.headers_mut() .entry(CONTENT_TYPE) .or_insert(HEADER_VALUE); req.body_mut().replace(body.into()); } Err(err) => self.request = Err(Error::builder(err)), } } self } /// Send a JSON body. /// /// # Optional /// /// This requires the optional `json` feature enabled. /// /// # Errors /// /// Serialization can fail if `T`'s implementation of `Serialize` decides to /// fail, or if `T` contains a map with non-string keys. #[cfg(feature = "json")] #[cfg_attr(docsrs, doc(cfg(feature = "json")))] pub fn json(mut self, json: &T) -> RequestBuilder { if let Ok(ref mut req) = self.request { match serde_json::to_vec(json) { Ok(body) => { const HEADER_VALUE: HeaderValue = HeaderValue::from_static("application/json"); req.headers_mut() .entry(CONTENT_TYPE) .or_insert(HEADER_VALUE); req.body_mut().replace(body.into()); } Err(err) => self.request = Err(Error::builder(err)), } } self } /// Set the request body. pub fn body>(mut self, body: T) -> RequestBuilder { if let Ok(ref mut req) = self.request { *req.body_mut() = Some(body.into()); } self } /// Sends a multipart/form-data body. /// /// ``` /// # use wreq::Error; /// /// # async fn run() -> Result<(), Error> { /// let client = wreq::Client::new(); /// let form = wreq::multipart::Form::new() /// .text("key3", "value3") /// .text("key4", "value4"); /// /// let response = client.post("your uri").multipart(form).send().await?; /// # Ok(()) /// # } /// ``` #[cfg(feature = "multipart")] #[cfg_attr(docsrs, doc(cfg(feature = "multipart")))] pub fn multipart(mut self, mut multipart: multipart::Form) -> RequestBuilder { if let Ok(ref mut req) = self.request { match HeaderValue::from_maybe_shared(Bytes::from(format!( "multipart/form-data; boundary={}", multipart.boundary() ))) { Ok(content_type) => { req.headers_mut() .entry(CONTENT_TYPE) .or_insert(content_type); if let Some(length) = multipart.compute_length() { req.headers_mut() .entry(CONTENT_LENGTH) .or_insert_with(|| HeaderValue::from(length)); } *req.body_mut() = Some(multipart.stream()) } Err(err) => { self.request = Err(Error::builder(err)); } }; } self } /// Set HTTP version pub fn version(mut self, version: Version) -> RequestBuilder { if let Ok(ref mut req) = self.request { req.version_mut().replace(version); req.config_mut::() .get_or_insert_default() .version = Some(version); } self } /// Set the redirect policy for this request. pub fn redirect(mut self, policy: redirect::Policy) -> RequestBuilder { if let Ok(ref mut req) = self.request { req.config_mut::().replace(policy); } self } /// Set the persistent cookie store for the request. #[cfg(feature = "cookies")] #[cfg_attr(docsrs, doc(cfg(feature = "cookies")))] pub fn cookie_provider(mut self, cookie_store: C) -> RequestBuilder { if let Ok(ref mut req) = self.request { req.config_mut::>() .replace(cookie_store.into_shared()); } self } /// Sets if this request will announce that it accepts gzip encoding. #[cfg(feature = "gzip")] #[cfg_attr(docsrs, doc(cfg(feature = "gzip")))] pub fn gzip(mut self, gzip: bool) -> RequestBuilder { if let Ok(ref mut req) = self.request { req.config_mut::() .get_or_insert_default() .gzip = gzip; } self } /// Sets if this request will announce that it accepts brotli encoding. #[cfg(feature = "brotli")] #[cfg_attr(docsrs, doc(cfg(feature = "brotli")))] pub fn brotli(mut self, brotli: bool) -> RequestBuilder { if let Ok(ref mut req) = self.request { req.config_mut::() .get_or_insert_default() .brotli = brotli; } self } /// Sets if this request will announce that it accepts deflate encoding. #[cfg(feature = "deflate")] #[cfg_attr(docsrs, doc(cfg(feature = "deflate")))] pub fn deflate(mut self, deflate: bool) -> RequestBuilder { if let Ok(ref mut req) = self.request { req.config_mut::() .get_or_insert_default() .deflate = deflate; } self } /// Sets if this request will announce that it accepts zstd encoding. #[cfg(feature = "zstd")] #[cfg_attr(docsrs, doc(cfg(feature = "zstd")))] pub fn zstd(mut self, zstd: bool) -> RequestBuilder { if let Ok(ref mut req) = self.request { req.config_mut::() .get_or_insert_default() .zstd = zstd; } self } /// Set the proxy for this request. pub fn proxy(mut self, proxy: Proxy) -> RequestBuilder { if let Ok(ref mut req) = self.request { req.config_mut::() .get_or_insert_default() .proxy = Some(proxy.into_matcher()); } self } /// Set the local address for this request. pub fn local_address(mut self, local_address: V) -> RequestBuilder where V: Into>, { if let Ok(ref mut req) = self.request { req.config_mut::() .get_or_insert_default() .socket_bind_options .get_or_insert_default() .set_local_address(local_address); } self } /// Set the local addresses for this request. pub fn local_addresses(mut self, ipv4_address: V4, ipv6_address: V6) -> RequestBuilder where V4: Into>, V6: Into>, { if let Ok(ref mut req) = self.request { req.config_mut::() .get_or_insert_default() .socket_bind_options .get_or_insert_default() .set_local_addresses(ipv4_address, ipv6_address); } self } /// Bind connections only on the specified network interface. /// /// This option is only available on the following operating systems: /// /// - Android /// - Fuchsia /// - Linux, /// - macOS and macOS-like systems (iOS, tvOS, watchOS and visionOS) /// - Solaris and illumos /// /// On Android, Linux, and Fuchsia, this uses the /// [`SO_BINDTODEVICE`][man-7-socket] socket option. On macOS and macOS-like /// systems, Solaris, and illumos, this instead uses the [`IP_BOUND_IF` and /// `IPV6_BOUND_IF`][man-7p-ip] socket options (as appropriate). /// /// Note that connections will fail if the provided interface name is not a /// network interface that currently exists when a connection is established. /// /// # Example /// /// ``` /// # fn doc() -> Result<(), wreq::Error> { /// let interface = "lo"; /// let client = wreq::Client::builder() /// .interface(interface) /// .build()?; /// # Ok(()) /// # } /// ``` /// /// [man-7-socket]: https://man7.org/linux/man-pages/man7/socket.7.html /// [man-7p-ip]: https://docs.oracle.com/cd/E86824_01/html/E54777/ip-7p.html #[cfg(any( target_os = "android", target_os = "fuchsia", target_os = "illumos", target_os = "ios", target_os = "linux", target_os = "macos", target_os = "solaris", target_os = "tvos", target_os = "visionos", target_os = "watchos", ))] #[cfg_attr( docsrs, doc(cfg(any( target_os = "android", target_os = "fuchsia", target_os = "illumos", target_os = "ios", target_os = "linux", target_os = "macos", target_os = "solaris", target_os = "tvos", target_os = "visionos", target_os = "watchos", ))) )] pub fn interface(mut self, interface: I) -> RequestBuilder where I: Into>, { if let Ok(ref mut req) = self.request { req.config_mut::() .get_or_insert_default() .socket_bind_options .get_or_insert_default() .set_interface(interface); } self } /// Sets the request builder to emulation the specified HTTP context. /// /// This method sets the necessary headers, HTTP/1 and HTTP/2 options configurations, and TLS /// options config to use the specified HTTP context. It allows the client to mimic the /// behavior of different versions or setups, which can be useful for testing or ensuring /// compatibility with various environments. /// /// # Note /// This will overwrite the existing configuration. /// You must set emulation before you can perform subsequent HTTP1/HTTP2/TLS fine-tuning. pub fn emulation(mut self, emulation: T) -> RequestBuilder { if let Ok(ref mut req) = self.request { let emulation = emulation.into_emulation(); let opts = req.config_mut::().get_or_insert_default(); opts.group.emulate(emulation.group); opts.tls_options = emulation.tls_options; opts.http1_options = emulation.http1_options; opts.http2_options = emulation.http2_options; return self .headers(emulation.headers) .orig_headers(emulation.orig_headers); } self } /// Assigns a logical group to this request. /// /// Groups define the request's identity and execution context. /// Requests in different groups are logically partitioned to ensure /// resource isolation and prevent metadata leakage. pub fn group(mut self, group: Group) -> RequestBuilder { if let Ok(ref mut req) = self.request { req.config_mut::() .get_or_insert_default() .group .request(group); } self } /// Build a `Request`, which can be inspected, modified and executed with /// [`Client::execute()`]. #[inline] pub fn build(self) -> crate::Result { self.request } /// Build a `Request`, which can be inspected, modified and executed with /// [`Client::execute()`]. /// /// This is similar to [`RequestBuilder::build()`], but also returns the /// embedded [`Client`]. #[inline] pub fn build_split(self) -> (Client, crate::Result) { (self.client, self.request) } /// Constructs the Request and sends it to the target URI, returning a /// future Response. /// /// # Errors /// /// This method fails if there was an error while sending request, /// redirect loop was detected or redirect limit was exhausted. /// /// # Example /// /// ```no_run /// # use wreq::Error; /// # /// # async fn run() -> Result<(), Error> { /// let response = wreq::Client::new().get("https://hyper.rs").send().await?; /// # Ok(()) /// # } /// ``` pub fn send(self) -> impl Future> { match self.request { Ok(req) => self.client.execute(req), Err(err) => Pending::Error { error: Some(err) }, } } /// Attempt to clone the RequestBuilder. /// /// `None` is returned if the RequestBuilder can not be cloned, /// i.e. if the request body is a stream. /// /// # Examples /// /// ``` /// # use wreq::Error; /// # /// # fn run() -> Result<(), Error> { /// let client = wreq::Client::new(); /// let builder = client.post("http://httpbin.org/post").body("from a &str!"); /// let clone = builder.try_clone(); /// assert!(clone.is_some()); /// # Ok(()) /// # } /// ``` pub fn try_clone(&self) -> Option { self.request .as_ref() .ok() .and_then(|req| req.try_clone()) .map(|req| RequestBuilder { client: self.client.clone(), request: Ok(req), }) } } /// Check the request URI for a "username:password" type authority, and if /// found, remove it from the URI and return it. fn extract_authority(uri: &mut Uri) -> Option<(String, Option)> { use percent_encoding::percent_decode; let (username, password) = uri.userinfo(); let username: String = percent_decode(username?.as_bytes()) .decode_utf8() .ok()? .into(); let password = password.and_then(|pass| { percent_decode(pass.as_bytes()) .decode_utf8() .ok() .map(String::from) }); if !username.is_empty() || password.is_some() { uri.set_userinfo("", None); return Some((username, password)); } None } impl> From> for Request { #[inline] fn from(req: http::Request) -> Request { Request(req.map(Into::into).map(Some)) } } impl From for http::Request { #[inline] fn from(req: Request) -> http::Request { req.0.map(|body| body.unwrap_or_else(Body::empty)) } } ================================================ FILE: src/client/response.rs ================================================ use std::{ net::SocketAddr, pin::Pin, task::{Context, Poll}, }; use bytes::Bytes; #[cfg(feature = "charset")] use encoding_rs::{Encoding, UTF_8}; #[cfg(feature = "stream")] use futures_util::Stream; use http::{HeaderMap, StatusCode, Uri, Version}; use http_body::{Body as HttpBody, Frame}; use http_body_util::{BodyExt, Collected}; #[cfg(feature = "charset")] use mime::Mime; #[cfg(feature = "json")] use serde::de::DeserializeOwned; use super::{ conn::HttpInfo, core::{http1::ext::ReasonPhrase, upgrade}, }; #[cfg(feature = "cookies")] use crate::cookie; use crate::{Body, Error, Upgraded, client::Connected, error::BoxError, ext::RequestUri}; /// A Response to a submitted [`crate::Request`]. #[derive(Debug)] pub struct Response { uri: Uri, res: http::Response, } impl Response { #[inline] pub(super) fn new(mut res: http::Response, uri: Uri) -> Response where B: HttpBody + Send + Sync + 'static, B::Data: Into, B::Error: Into, { Response { uri: res .extensions_mut() .remove::() .map_or(uri, |request_uri| request_uri.0), res: res.map(Body::wrap), } } /// Get the final [`Uri`] of this [`Response`]. #[inline] pub fn uri(&self) -> &Uri { &self.uri } /// Get the [`StatusCode`] of this [`Response`]. #[inline] pub fn status(&self) -> StatusCode { self.res.status() } /// Get the HTTP [`Version`] of this [`Response`]. #[inline] pub fn version(&self) -> Version { self.res.version() } /// Get the [`HeaderMap`] of this [`Response`]. #[inline] pub fn headers(&self) -> &HeaderMap { self.res.headers() } /// Get a mutable reference to the [`HeaderMap`] of this [`Response`]. #[inline] pub fn headers_mut(&mut self) -> &mut HeaderMap { self.res.headers_mut() } /// Get the content length of the [`Response`], if it is known. /// /// This value does not directly represents the value of the `Content-Length` /// header, but rather the size of the response's body. To read the header's /// value, please use the [`Response::headers`] method instead. /// /// Reasons it may not be known: /// /// - The response does not include a body (e.g. it responds to a `HEAD` request). /// - The response is gzipped and automatically decoded (thus changing the actual decoded /// length). #[inline] pub fn content_length(&self) -> Option { HttpBody::size_hint(self.res.body()).exact() } /// Retrieve the cookies contained in the [`Response`]. /// /// Note that invalid 'Set-Cookie' headers will be ignored. /// /// # Optional /// /// This requires the optional `cookies` feature to be enabled. #[cfg(feature = "cookies")] pub fn cookies(&self) -> impl Iterator> { self.res .headers() .get_all(crate::header::SET_COOKIE) .iter() .map(cookie::Cookie::parse) .filter_map(Result::ok) } /// Get the local address used to get this [`Response`]. pub fn local_addr(&self) -> Option { self.res .extensions() .get::() .map(HttpInfo::local_addr) } /// Get the remote address used to get this [`Response`]. pub fn remote_addr(&self) -> Option { self.res .extensions() .get::() .map(HttpInfo::remote_addr) } // body methods /// Get the full response text. /// /// This method decodes the response body with BOM sniffing /// and with malformed sequences replaced with the [`char::REPLACEMENT_CHARACTER`]. /// Encoding is determined from the `charset` parameter of `Content-Type` header, /// and defaults to `utf-8` if not presented. /// /// Note that the BOM is stripped from the returned String. /// /// # Note /// /// If the `charset` feature is disabled the method will only attempt to decode the /// response as UTF-8, regardless of the given `Content-Type` /// /// # Example /// /// ``` /// # async fn run() -> Result<(), Box> { /// let content = wreq::Client::new() /// .get("http://httpbin.org/range/26") /// .send() /// .await? /// .text() /// .await?; /// /// println!("text: {content:?}"); /// # Ok(()) /// # } /// ``` pub async fn text(self) -> crate::Result { #[cfg(feature = "charset")] { self.text_with_charset("utf-8").await } #[cfg(not(feature = "charset"))] { let full = self.bytes().await?; let text = String::from_utf8_lossy(&full); Ok(text.into_owned()) } } /// Get the full response text given a specific encoding. /// /// This method decodes the response body with BOM sniffing /// and with malformed sequences replaced with the /// [`char::REPLACEMENT_CHARACTER`]. /// You can provide a default encoding for decoding the raw message, while the /// `charset` parameter of `Content-Type` header is still prioritized. For more information /// about the possible encoding name, please go to [`encoding_rs`] docs. /// /// Note that the BOM is stripped from the returned String. /// /// [`encoding_rs`]: https://docs.rs/encoding_rs/0.8/encoding_rs/#relationship-with-windows-code-pages /// /// # Optional /// /// This requires the optional `encoding_rs` feature enabled. /// /// # Example /// /// ``` /// # async fn run() -> Result<(), Box> { /// let content = wreq::Client::new() /// .get("http://httpbin.org/range/26") /// .send() /// .await? /// .text_with_charset("utf-8") /// .await?; /// /// println!("text: {content:?}"); /// # Ok(()) /// # } /// ``` #[cfg(feature = "charset")] #[cfg_attr(docsrs, doc(cfg(feature = "charset")))] pub async fn text_with_charset( self, default_encoding: impl AsRef, ) -> crate::Result { let content_type = self .headers() .get(crate::header::CONTENT_TYPE) .and_then(|value| value.to_str().ok()) .and_then(|value| value.parse::().ok()); let encoding_name = content_type .as_ref() .and_then(|mime| mime.get_param("charset").map(|charset| charset.as_str())) .unwrap_or(default_encoding.as_ref()); let encoding = Encoding::for_label(encoding_name.as_bytes()).unwrap_or(UTF_8); let full = self.bytes().await?; let (text, _, _) = encoding.decode(&full); Ok(text.into_owned()) } /// Try to deserialize the response body as JSON. /// /// # Optional /// /// This requires the optional `json` feature enabled. /// /// # Examples /// /// ``` /// # extern crate wreq; /// # extern crate serde; /// # /// # use wreq::Error; /// # use serde::Deserialize; /// # /// // This `derive` requires the `serde` dependency. /// #[derive(Deserialize)] /// struct Ip { /// origin: String, /// } /// /// # async fn run() -> Result<(), Error> { /// let ip = wreq::Client::new() /// .get("http://httpbin.org/ip") /// .send() /// .await? /// .json::() /// .await?; /// /// println!("ip: {}", ip.origin); /// # Ok(()) /// # } /// # /// # fn main() { } /// ``` /// /// # Errors /// /// This method fails whenever the response body is not in JSON format /// or it cannot be properly deserialized to target type `T`. For more /// details please see [`serde_json::from_reader`]. /// /// [`serde_json::from_reader`]: https://docs.serde.rs/serde_json/fn.from_reader.html #[cfg(feature = "json")] #[cfg_attr(docsrs, doc(cfg(feature = "json")))] pub async fn json(self) -> crate::Result { let full = self.bytes().await?; serde_json::from_slice(&full).map_err(Error::decode) } /// Get the full response body as [`Bytes`]. /// /// # Example /// /// ``` /// # async fn run() -> Result<(), Box> { /// let bytes = wreq::Client::new() /// .get("http://httpbin.org/ip") /// .send() /// .await? /// .bytes() /// .await?; /// /// println!("bytes: {bytes:?}"); /// # Ok(()) /// # } /// ``` #[inline] pub async fn bytes(self) -> crate::Result { BodyExt::collect(self.res.into_body()) .await .map(Collected::::to_bytes) } /// Convert the response into a [`Stream`] of [`Bytes`] from the body. /// /// # Example /// /// ``` /// use futures_util::StreamExt; /// /// # async fn run() -> Result<(), Box> { /// let mut stream = wreq::Client::new() /// .get("http://httpbin.org/ip") /// .send() /// .await? /// .bytes_stream(); /// /// while let Some(item) = stream.next().await { /// println!("Chunk: {:?}", item?); /// } /// # Ok(()) /// # } /// ``` /// /// # Optional /// /// This requires the optional `stream` feature to be enabled. #[inline] #[cfg(feature = "stream")] #[cfg_attr(docsrs, doc(cfg(feature = "stream")))] pub fn bytes_stream(self) -> impl Stream> { http_body_util::BodyDataStream::new(self.res.into_body()) } // extension methods /// Returns a reference to the associated extensions. /// /// # Example /// /// ``` /// # use wreq::{Client, tls::TlsInfo}; /// # async fn run() -> wreq::Result<()> { /// // Build a client that records TLS information. /// let client = Client::builder() /// .tls_info(true) /// .build()?; /// /// // Make a request. /// let resp = client.get("https://www.google.com").send().await?; /// /// // Take the TlsInfo extension to inspect it. /// if let Some(tls_info) = resp.extensions().get::() { /// // Now you own the TlsInfo and can process it. /// println!("Peer certificate: {:?}", tls_info.peer_certificate()); /// } /// /// # Ok(()) /// # } /// ``` #[inline] pub fn extensions(&self) -> &http::Extensions { self.res.extensions() } /// Returns a mutable reference to the associated extensions. /// /// # Example /// /// ``` /// # use wreq::{Client, tls::TlsInfo}; /// # async fn run() -> wreq::Result<()> { /// // Build a client that records TLS information. /// let client = Client::builder() /// .tls_info(true) /// .build()?; /// /// // Make a request. /// let mut resp = client.get("https://www.google.com").send().await?; /// /// // Take the TlsInfo extension to inspect it. /// if let Some(tls_info) = resp.extensions_mut().remove::() { /// // Now you own the TlsInfo and can process it. /// println!("Peer certificate: {:?}", tls_info.peer_certificate()); /// } /// /// # Ok(()) /// # } /// ``` #[inline] pub fn extensions_mut(&mut self) -> &mut http::Extensions { self.res.extensions_mut() } /// Forbids the [`Response`] connection from being recycled back into the pool. /// /// This marks the underlying connection as "poisoned." Once marked, the connection /// will be discarded instead of reused after the current request-response cycle completes. /// /// # Note on Lifecycle /// Marking the connection does not trigger an immediate shutdown. For pooled /// connections, the physical closure is deferred until the `Response` body /// is dropped or the pool's background cleaner reclaims the resource. #[inline] pub fn forbid_recycle(&self) { self.res .extensions() .get::() .map(Connected::poison); } // util methods /// Turn a response into an error if the server returned an error. /// /// # Example /// /// ``` /// # use wreq::Response; /// fn on_response(res: Response) { /// match res.error_for_status() { /// Ok(_res) => (), /// Err(err) => { /// // asserting a 400 as an example /// // it could be any status between 400...599 /// assert_eq!(err.status(), Some(wreq::StatusCode::BAD_REQUEST)); /// } /// } /// } /// # fn main() {} /// ``` pub fn error_for_status(mut self) -> crate::Result { let status = self.status(); if status.is_client_error() || status.is_server_error() { let reason = self.res.extensions_mut().remove::(); Err(Error::status_code(self.uri, status, reason)) } else { Ok(self) } } /// Turn a reference to a response into an error if the server returned an error. /// /// # Example /// /// ``` /// # use wreq::Response; /// fn on_response(res: &Response) { /// match res.error_for_status_ref() { /// Ok(_res) => (), /// Err(err) => { /// // asserting a 400 as an example /// // it could be any status between 400...599 /// assert_eq!(err.status(), Some(wreq::StatusCode::BAD_REQUEST)); /// } /// } /// } /// # fn main() {} /// ``` pub fn error_for_status_ref(&self) -> crate::Result<&Self> { let status = self.status(); if status.is_client_error() || status.is_server_error() { let reason = self.res.extensions().get::().cloned(); Err(Error::status_code(self.uri.clone(), status, reason)) } else { Ok(self) } } /// Consumes the [`Response`] and returns a future for a possible HTTP upgrade. #[inline] pub async fn upgrade(self) -> crate::Result { upgrade::on(self.res).await.map_err(Error::upgrade) } } /// I'm not sure this conversion is that useful... People should be encouraged /// to use [`http::Response`], not `wreq::Response`. impl> From> for Response { fn from(r: http::Response) -> Response { let mut res = r.map(Into::into); let uri = res .extensions_mut() .remove::() .unwrap_or_else(|| RequestUri(Uri::from_static("http://no.url.provided.local"))); Response { res, uri: uri.0 } } } /// A [`Response`] can be converted into a [`http::Response`]. // It's supposed to be the inverse of the conversion above. impl From for http::Response { fn from(r: Response) -> http::Response { let mut res = r.res.map(Body::wrap); res.extensions_mut().insert(RequestUri(r.uri)); res } } /// A [`Response`] can be piped as the [`Body`] of another request. impl From for Body { #[inline] fn from(r: Response) -> Body { Body::wrap(r.res.into_body()) } } /// A [`Response`] implements [`HttpBody`] to allow streaming the body. impl HttpBody for Response { type Data = Bytes; type Error = Error; #[inline] fn poll_frame( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll, Self::Error>>> { Pin::new(self.res.body_mut()).poll_frame(cx) } #[inline] fn is_end_stream(&self) -> bool { self.res.body().is_end_stream() } #[inline] fn size_hint(&self) -> http_body::SizeHint { self.res.body().size_hint() } } ================================================ FILE: src/client/ws/json.rs ================================================ use serde::{Serialize, de::DeserializeOwned}; use super::{Message, Utf8Bytes}; use crate::Error; impl Message { /// Tries to serialize the JSON as a [`Message::Text`]. /// /// # Optional /// /// This requires the optional `json` feature enabled. /// /// # Errors /// /// Serialization can fail if `T`'s implementation of `Serialize` decides to /// fail, or if `T` contains a map with non-string keys. #[cfg_attr(docsrs, doc(cfg(feature = "json")))] pub fn text_from_json(json: &T) -> crate::Result { serde_json::to_string(json) .map(Utf8Bytes::from) .map(Message::Text) .map_err(Error::decode) } /// Tries to serialize the JSON as a [`Message::Binary`]. /// /// # Optional /// /// This requires that the optional `json` feature is enabled. /// /// # Errors /// /// Serialization can fail if `T`'s implementation of `Serialize` decides to /// fail, or if `T` contains a map with non-string keys. #[cfg_attr(docsrs, doc(cfg(feature = "json")))] pub fn binary_from_json(json: &T) -> crate::Result { serde_json::to_vec(json) .map(bytes::Bytes::from) .map(Message::Binary) .map_err(Error::decode) } /// Tries to deserialize the message body as JSON. /// /// # Optional /// /// This requires that the optional `json` feature is enabled. /// /// # Errors /// /// This method fails whenever the response body is not in `JSON` format, /// or it cannot be properly deserialized to target type `T`. /// /// For more details please see [`serde_json::from_str`] and /// [`serde_json::from_slice`]. #[cfg_attr(docsrs, doc(cfg(feature = "json")))] pub fn json(&self) -> crate::Result { use serde::de::Error as _; match self { Self::Text(x) => serde_json::from_str(x).map_err(Error::decode), Self::Binary(x) => serde_json::from_slice(x).map_err(Error::decode), Self::Ping(_) | Self::Pong(_) | Self::Close { .. } => Err(Error::decode( serde_json::Error::custom("neither text nor binary"), )), } } } #[cfg(test)] mod test { use serde::{Deserialize, Serialize}; use super::Message; #[derive(Default, Serialize, Deserialize)] struct Content { message: String, } #[test] pub fn text_json() -> crate::Result<()> { let content = Content::default(); let message = Message::text_from_json(&content)?; assert!(matches!(message, Message::Text(_))); let _: Content = message.json()?; Ok(()) } #[test] pub fn binary_json() -> crate::Result<()> { let content = Content::default(); let message = Message::binary_from_json(&content)?; assert!(matches!(message, Message::Binary(_))); let _: Content = message.json()?; Ok(()) } } ================================================ FILE: src/client/ws/message.rs ================================================ //! WebSocket message types and utilities //! //! This module provides WebSocket message types that wrap the underlying //! tungstenite message implementation, offering a more ergonomic API //! for working with WebSocket communications. use std::{fmt, ops::Deref}; use bytes::Bytes; use super::tungstenite; use crate::Error; /// UTF-8 wrapper for [Bytes]. /// /// An [Utf8Bytes] is always guaranteed to contain valid UTF-8. #[derive(Debug, Clone, PartialEq, Eq, Default)] pub struct Utf8Bytes(pub(super) tungstenite::Utf8Bytes); impl Utf8Bytes { /// Creates from a static str. #[inline] pub const fn from_static(str: &'static str) -> Self { Self(tungstenite::Utf8Bytes::from_static(str)) } /// Returns as a string slice. #[inline] pub fn as_str(&self) -> &str { self.0.as_str() } } impl Deref for Utf8Bytes { type Target = str; /// ``` /// /// Example fn that takes a str slice /// fn a(s: &str) {} /// /// let data = wreq::Utf8Bytes::from_static("foo123"); /// /// // auto-deref as arg /// a(&data); /// /// // deref to str methods /// assert_eq!(data.len(), 6); /// ``` #[inline] fn deref(&self) -> &Self::Target { self.as_str() } } impl fmt::Display for Utf8Bytes { #[inline] fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> fmt::Result { f.write_str(self.as_str()) } } impl TryFrom for Utf8Bytes { type Error = std::str::Utf8Error; #[inline] fn try_from(bytes: Bytes) -> Result { Ok(Self(bytes.try_into()?)) } } impl TryFrom> for Utf8Bytes { type Error = std::str::Utf8Error; #[inline] fn try_from(v: Vec) -> Result { Ok(Self(v.try_into()?)) } } impl From for Utf8Bytes { #[inline] fn from(s: String) -> Self { Self(s.into()) } } impl From<&str> for Utf8Bytes { #[inline] fn from(s: &str) -> Self { Self(s.into()) } } impl From<&String> for Utf8Bytes { #[inline] fn from(s: &String) -> Self { Self(s.into()) } } impl From for Bytes { #[inline] fn from(Utf8Bytes(bytes): Utf8Bytes) -> Self { bytes.into() } } impl PartialEq for Utf8Bytes where for<'a> &'a str: PartialEq, { /// ``` /// let payload = wreq::Utf8Bytes::from_static("foo123"); /// assert_eq!(payload, "foo123"); /// assert_eq!(payload, "foo123".to_string()); /// assert_eq!(payload, &"foo123".to_string()); /// assert_eq!(payload, std::borrow::Cow::from("foo123")); /// ``` #[inline] fn eq(&self, other: &T) -> bool { self.as_str() == *other } } /// Status code used to indicate why an endpoint is closing the WebSocket connection. #[derive(Debug, Clone, Eq, PartialEq)] pub struct CloseCode(pub(super) u16); impl CloseCode { //! Constants for [`CloseCode`]s. //! //! [`CloseCode`]: super::CloseCode /// Indicates a normal closure, meaning that the purpose for which the connection was /// established has been fulfilled. pub const NORMAL: CloseCode = CloseCode(1000); /// Indicates that an endpoint is "going away", such as a server going down or a browser having /// navigated away from a page. pub const AWAY: CloseCode = CloseCode(1001); /// Indicates that an endpoint is terminating the connection due to a protocol error. pub const PROTOCOL: CloseCode = CloseCode(1002); /// Indicates that an endpoint is terminating the connection because it has received a type of /// data that it cannot accept. /// /// For example, an endpoint MAY send this if it understands only text data, but receives a /// binary message. pub const UNSUPPORTED: CloseCode = CloseCode(1003); /// Indicates that no status code was included in a closing frame. pub const STATUS: CloseCode = CloseCode(1005); /// Indicates an abnormal closure. pub const ABNORMAL: CloseCode = CloseCode(1006); /// Indicates that an endpoint is terminating the connection because it has received data /// within a message that was not consistent with the type of the message. /// /// For example, an endpoint received non-UTF-8 RFC3629 data within a text message. pub const INVALID: CloseCode = CloseCode(1007); /// Indicates that an endpoint is terminating the connection because it has received a message /// that violates its policy. /// /// This is a generic status code that can be returned when there is /// no other more suitable status code (e.g., `UNSUPPORTED` or `SIZE`) or if there is a need to /// hide specific details about the policy. pub const POLICY: CloseCode = CloseCode(1008); /// Indicates that an endpoint is terminating the connection because it has received a message /// that is too big for it to process. pub const SIZE: CloseCode = CloseCode(1009); /// Indicates that an endpoint (client) is terminating the connection because the server /// did not respond to extension negotiation correctly. /// /// Specifically, the client has expected the server to negotiate one or more extension(s), /// but the server didn't return them in the response message of the WebSocket handshake. /// The list of extensions that are needed should be given as the reason for closing. /// Note that this status code is not used by the server, /// because it can fail the WebSocket handshake instead. pub const EXTENSION: CloseCode = CloseCode(1010); /// Indicates that a server is terminating the connection because it encountered an unexpected /// condition that prevented it from fulfilling the request. pub const ERROR: CloseCode = CloseCode(1011); /// Indicates that the server is restarting. pub const RESTART: CloseCode = CloseCode(1012); /// Indicates that the server is overloaded and the client should either connect to a different /// IP (when multiple targets exist), or reconnect to the same IP when a user has performed an /// action. pub const AGAIN: CloseCode = CloseCode(1013); } impl From for u16 { #[inline] fn from(code: CloseCode) -> u16 { code.0 } } impl From for CloseCode { #[inline] fn from(code: u16) -> CloseCode { CloseCode(code) } } /// A struct representing the close command. #[derive(Debug, Clone, Eq, PartialEq)] pub struct CloseFrame { /// The reason as a code. pub code: CloseCode, /// The reason as text string. pub reason: Utf8Bytes, } /// A WebSocket message. // // This code comes from https://github.com/snapview/tungstenite-rs/blob/master/src/protocol/message.rs and is under following license: // Copyright (c) 2017 Alexey Galakhov // Copyright (c) 2016 Jason Housley // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. #[derive(Debug, Eq, PartialEq, Clone)] pub enum Message { /// A text WebSocket message Text(Utf8Bytes), /// A binary WebSocket message Binary(Bytes), /// A ping message with the specified payload /// /// The payload here must have a length less than 125 bytes. /// /// Ping messages will be automatically responded to by the server, so you do not have to worry /// about dealing with them yourself. Ping(Bytes), /// A pong message with the specified payload /// /// The payload here must have a length less than 125 bytes. /// /// Pong messages will be automatically sent to the client if a ping message is received, so /// you do not have to worry about constructing them yourself unless you want to implement a /// [unidirectional heartbeat](https://tools.ietf.org/html/rfc6455#section-5.5.3). Pong(Bytes), /// A close message with the optional close frame. /// /// You may "uncleanly" close a WebSocket connection at any time /// by simply dropping the [`super::WebSocket`]. /// However, you may also use the graceful closing protocol, in which /// 1. peer A sends a close frame, and does not send any further messages; /// 2. peer B responds with a close frame, and does not send any further messages; /// 3. peer A processes the remaining messages sent by peer B, before finally /// 4. both peers close the connection. /// /// After sending a close frame, /// you may still read messages, /// but attempts to send another message will error. /// After receiving a close frame, /// wreq will automatically respond with a close frame if necessary /// (you do not have to deal with this yourself). /// Since no further messages will be received, /// you may either do nothing /// or explicitly drop the connection. Close(Option), } impl Message { /// Converts this `Message` into a `tungstenite::Message`. /// /// This method transforms the current `Message` instance into its corresponding /// `tungstenite::Message` representation. This is useful when you need to work /// with the `tungstenite` library directly. /// /// # Returns /// /// A `tungstenite::Message` instance that represents the current `Message`. pub(super) fn into_tungstenite(self) -> tungstenite::Message { match self { Self::Text(text) => tungstenite::Message::Text(text.0), Self::Binary(binary) => tungstenite::Message::Binary(binary), Self::Ping(ping) => tungstenite::Message::Ping(ping), Self::Pong(pong) => tungstenite::Message::Pong(pong), Self::Close(Some(close)) => { tungstenite::Message::Close(Some(tungstenite::protocol::CloseFrame { code: tungstenite::protocol::frame::coding::CloseCode::from(close.code.0), reason: close.reason.0, })) } Self::Close(None) => tungstenite::Message::Close(None), } } /// Converts a `tungstenite::Message` into an `Option`. /// /// This method transforms a given `tungstenite::Message` into its corresponding /// `Message` representation. This is useful when you need to convert messages /// received from the `tungstenite` library into the `Message` type used by this /// library. /// /// # Arguments /// /// * `message` - The `tungstenite::Message` to convert. /// /// # Returns /// /// An `Option` instance that represents the given `tungstenite::Message`. /// Returns `None` if the message is a `Frame` frame, as recommended by the /// `tungstenite` maintainers. pub(super) fn from_tungstenite(message: tungstenite::Message) -> Option { match message { tungstenite::Message::Text(text) => Some(Self::Text(Utf8Bytes(text))), tungstenite::Message::Binary(binary) => Some(Self::Binary(binary)), tungstenite::Message::Ping(ping) => Some(Self::Ping(ping)), tungstenite::Message::Pong(pong) => Some(Self::Pong(pong)), tungstenite::Message::Close(Some(close)) => Some(Self::Close(Some(CloseFrame { code: CloseCode(close.code.into()), reason: Utf8Bytes(close.reason), }))), tungstenite::Message::Close(None) => Some(Self::Close(None)), // we can ignore `Frame` frames as recommended by the tungstenite maintainers // https://github.com/snapview/tungstenite-rs/issues/268 tungstenite::Message::Frame(_) => None, } } /// Consume the WebSocket and return it as binary data. pub fn into_data(self) -> Bytes { match self { Self::Text(string) => Bytes::from(string), Self::Binary(data) | Self::Ping(data) | Self::Pong(data) => data, Self::Close(None) => Bytes::new(), Self::Close(Some(frame)) => Bytes::from(frame.reason), } } /// Attempt to consume the WebSocket message and convert it to a Utf8Bytes. pub fn into_text(self) -> crate::Result { match self { Self::Text(string) => Ok(string), Self::Binary(data) | Self::Ping(data) | Self::Pong(data) => { Utf8Bytes::try_from(data).map_err(Error::decode) } Self::Close(None) => Ok(Utf8Bytes::default()), Self::Close(Some(frame)) => Ok(frame.reason), } } /// Attempt to get a &str from the WebSocket message, /// this will try to convert binary data to utf8. pub fn to_text(&self) -> crate::Result<&str> { match *self { Self::Text(ref string) => Ok(string.as_str()), Self::Binary(ref data) | Self::Ping(ref data) | Self::Pong(ref data) => { std::str::from_utf8(data).map_err(Error::decode) } Self::Close(None) => Ok(""), Self::Close(Some(ref frame)) => Ok(&frame.reason), } } } impl Message { /// Create a new text WebSocket message from a stringable. pub fn text(string: S) -> Message where S: Into, { Message::Text(string.into()) } /// Create a new binary WebSocket message by converting to `Bytes`. pub fn binary(bin: B) -> Message where B: Into, { Message::Binary(bin.into()) } /// Create a new ping WebSocket message by converting to `Bytes`. pub fn ping(bin: B) -> Message where B: Into, { Message::Ping(bin.into()) } /// Create a new pong WebSocket message by converting to `Bytes`. pub fn pong(bin: B) -> Message where B: Into, { Message::Pong(bin.into()) } /// Create a new close WebSocket message with an optional close frame. pub fn close(close: C) -> Message where C: Into>, { Message::Close(close.into()) } } impl From for Message { fn from(string: String) -> Self { Message::Text(string.into()) } } impl<'s> From<&'s str> for Message { fn from(string: &'s str) -> Self { Message::Text(string.into()) } } impl<'b> From<&'b [u8]> for Message { fn from(data: &'b [u8]) -> Self { Message::Binary(Bytes::copy_from_slice(data)) } } impl From> for Message { fn from(data: Vec) -> Self { Message::Binary(data.into()) } } impl From for Vec { fn from(msg: Message) -> Self { msg.into_data().to_vec() } } ================================================ FILE: src/client/ws.rs ================================================ //! WebSocket Upgrade #[cfg(feature = "json")] mod json; pub mod message; use std::{ borrow::Cow, fmt, net::{IpAddr, Ipv4Addr, Ipv6Addr}, ops::{Deref, DerefMut}, pin::Pin, task::{Context, Poll, ready}, }; use bytes::Bytes; use futures_util::{Sink, SinkExt, Stream, StreamExt, stream::FusedStream}; use http::{ HeaderMap, HeaderName, HeaderValue, Method, StatusCode, Uri, Version, header, uri::Scheme, }; use http2::ext::Protocol; use pin_project_lite::pin_project; use tokio_tungstenite::tungstenite::{ self, protocol::{self, CloseFrame, WebSocketConfig}, }; use self::message::{CloseCode, Message, Utf8Bytes}; use super::{emulate::IntoEmulation, request::RequestBuilder, response::Response}; use crate::{Error, Upgraded, header::OrigHeaderMap, proxy::Proxy}; /// A WebSocket stream. type WebSocketStream = tokio_tungstenite::WebSocketStream; /// Wrapper for [`RequestBuilder`] that performs the /// websocket handshake when sent. pub struct WebSocketRequestBuilder { inner: RequestBuilder, accept_key: Option>, protocols: Option>>, config: WebSocketConfig, } impl WebSocketRequestBuilder { /// Creates a new WebSocket request builder. pub fn new(inner: RequestBuilder) -> Self { Self { inner: inner.version(Version::HTTP_11), accept_key: None, protocols: None, config: WebSocketConfig::default(), } } /// Sets a custom WebSocket accept key. /// /// This method allows you to set a custom WebSocket accept key for the connection. /// /// # Arguments /// /// * `key` - The custom WebSocket accept key to set. /// /// # Returns /// /// * `Self` - The modified instance with the custom WebSocket accept key. #[inline] pub fn accept_key(mut self, key: K) -> Self where K: Into>, { self.accept_key = Some(key.into()); self } /// Set HTTP version /// /// Configures the HTTP version used for the WebSocket handshake. /// Defaults to HTTP/1.1. /// /// # HTTP/1.1 (default) /// /// - Uses the standard `Upgrade: websocket` mechanism (RFC 6455) /// - Sends an HTTP `GET` request with `Connection: Upgrade` and `Upgrade: websocket` headers /// - Widely supported by servers /// /// # HTTP/2 /// /// - Uses the Extended CONNECT Protocol (RFC 8441) /// - Sends a `CONNECT` request with the `:protocol: websocket` pseudo-header instead of the /// traditional upgrade mechanism /// - Requires explicit server support for HTTP/2 WebSocket connections /// - Will fail if the server does not support HTTP/2 WebSocket upgrade #[inline] pub fn version(mut self, version: Version) -> Self { self.inner = self.inner.version(version); self } /// Sets the websocket subprotocols to request. /// /// This method allows you to specify the subprotocols that the websocket client /// should request during the handshake. Subprotocols are used to define the type /// of communication expected over the websocket connection. #[inline] pub fn protocols

(mut self, protocols: P) -> Self where P: IntoIterator, P::Item: Into>, { let protocols = protocols.into_iter().map(Into::into).collect(); self.protocols = Some(protocols); self } /// Sets the websocket max_frame_size configuration. #[inline] pub fn max_frame_size(mut self, max_frame_size: usize) -> Self { self.config.max_frame_size = Some(max_frame_size); self } /// Sets the websocket read_buffer_size configuration. #[inline] pub fn read_buffer_size(mut self, read_buffer_size: usize) -> Self { self.config.read_buffer_size = read_buffer_size; self } /// Sets the websocket write_buffer_size configuration. #[inline] pub fn write_buffer_size(mut self, write_buffer_size: usize) -> Self { self.config.write_buffer_size = write_buffer_size; self } /// Sets the websocket max_write_buffer_size configuration. #[inline] pub fn max_write_buffer_size(mut self, max_write_buffer_size: usize) -> Self { self.config.max_write_buffer_size = max_write_buffer_size; self } /// Sets the websocket max_message_size configuration. #[inline] pub fn max_message_size(mut self, max_message_size: usize) -> Self { self.config.max_message_size = Some(max_message_size); self } /// Sets the websocket accept_unmasked_frames configuration. #[inline] pub fn accept_unmasked_frames(mut self, accept_unmasked_frames: bool) -> Self { self.config.accept_unmasked_frames = accept_unmasked_frames; self } /// Add a `Header` to this Request. /// /// If the header is already present, the value will be replaced. #[inline] pub fn header(mut self, key: K, value: V) -> Self where HeaderName: TryFrom, >::Error: Into, HeaderValue: TryFrom, >::Error: Into, { self.inner = self.inner.header(key, value); self } /// Add a set of Headers to the existing ones on this Request. /// /// The headers will be merged in to any already set. #[inline] pub fn headers(mut self, headers: HeaderMap) -> Self { self.inner = self.inner.headers(headers); self } /// Set the original headers for this request. #[inline] pub fn orig_headers(mut self, orig_headers: OrigHeaderMap) -> Self { self.inner = self.inner.orig_headers(orig_headers); self } /// Enable or disable client default headers for this request. /// /// By default, client default headers are included. Set to `false` to skip them. pub fn default_headers(mut self, enable: bool) -> Self { self.inner = self.inner.default_headers(enable); self } /// Enable HTTP authentication. /// /// ```rust /// # use wreq::Error; /// # /// # async fn run() -> Result<(), Error> { /// let client = wreq::Client::new(); /// let resp = client /// .websocket("http://httpbin.org/get") /// .auth("your_token_here") /// .send() /// .await?; /// # Ok(()) /// # } /// ``` #[inline] pub fn auth(mut self, value: V) -> Self where HeaderValue: TryFrom, >::Error: Into, { self.inner = self.inner.auth(value); self } /// Enable HTTP basic authentication. /// /// ```rust /// # use wreq::Error; /// /// # async fn run() -> Result<(), Error> { /// let client = wreq::Client::new(); /// let resp = client /// .websocket("http://httpbin.org/delete") /// .basic_auth("admin", Some("good password")) /// .send() /// .await?; /// # Ok(()) /// # } /// ``` #[inline] pub fn basic_auth(mut self, username: U, password: Option

) -> Self where U: fmt::Display, P: fmt::Display, { self.inner = self.inner.basic_auth(username, password); self } /// Enable HTTP bearer authentication. /// /// ```rust /// # use wreq::Error; /// # /// # async fn run() -> Result<(), Error> { /// let client = wreq::Client::new(); /// let resp = client /// .websocket("http://httpbin.org/get") /// .bearer_auth("your_token_here") /// .send() /// .await?; /// # Ok(()) /// # } /// ``` #[inline] pub fn bearer_auth(mut self, token: T) -> Self where T: fmt::Display, { self.inner = self.inner.bearer_auth(token); self } /// Modify the query string of the URI. /// /// Modifies the URI of this request, adding the parameters provided. /// This method appends and does not overwrite. This means that it can /// be called multiple times and that existing query parameters are not /// overwritten if the same key is used. The key will simply show up /// twice in the query string. /// Calling `.query(&[("foo", "a"), ("foo", "b")])` gives `"foo=a&foo=b"`. /// /// # Note /// This method does not support serializing a single key-value /// pair. Instead of using `.query(("key", "val"))`, use a sequence, such /// as `.query(&[("key", "val")])`. It's also possible to serialize structs /// and maps into a key-value pair. /// /// # Errors /// This method will fail if the object you provide cannot be serialized /// into a query string. #[inline] #[cfg(feature = "query")] #[cfg_attr(docsrs, doc(cfg(feature = "query")))] pub fn query(mut self, query: &T) -> Self { self.inner = self.inner.query(query); self } /// Set the proxy for this request. #[inline] pub fn proxy(mut self, proxy: Proxy) -> Self { self.inner = self.inner.proxy(proxy); self } /// Set the local address for this request. #[inline] pub fn local_address(mut self, local_address: V) -> Self where V: Into>, { self.inner = self.inner.local_address(local_address); self } /// Set the local addresses for this request. #[inline] pub fn local_addresses(mut self, ipv4: V4, ipv6: V6) -> Self where V4: Into>, V6: Into>, { self.inner = self.inner.local_addresses(ipv4, ipv6); self } /// Bind connections only on the specified network interface. /// /// This option is only available on the following operating systems: /// /// - Android /// - Fuchsia /// - Linux, /// - macOS and macOS-like systems (iOS, tvOS, watchOS and visionOS) /// - Solaris and illumos /// /// On Android, Linux, and Fuchsia, this uses the /// [`SO_BINDTODEVICE`][man-7-socket] socket option. On macOS and macOS-like /// systems, Solaris, and illumos, this instead uses the [`IP_BOUND_IF` and /// `IPV6_BOUND_IF`][man-7p-ip] socket options (as appropriate). /// /// Note that connections will fail if the provided interface name is not a /// network interface that currently exists when a connection is established. /// /// # Example /// /// ``` /// # fn doc() -> Result<(), wreq::Error> { /// let interface = "lo"; /// let client = wreq::Client::builder() /// .interface(interface) /// .build()?; /// # Ok(()) /// # } /// ``` /// /// [man-7-socket]: https://man7.org/linux/man-pages/man7/socket.7.html /// [man-7p-ip]: https://docs.oracle.com/cd/E86824_01/html/E54777/ip-7p.html #[cfg(any( target_os = "android", target_os = "fuchsia", target_os = "illumos", target_os = "ios", target_os = "linux", target_os = "macos", target_os = "solaris", target_os = "tvos", target_os = "visionos", target_os = "watchos", ))] #[cfg_attr( docsrs, doc(cfg(any( target_os = "android", target_os = "fuchsia", target_os = "illumos", target_os = "ios", target_os = "linux", target_os = "macos", target_os = "solaris", target_os = "tvos", target_os = "visionos", target_os = "watchos", ))) )] pub fn interface(mut self, interface: I) -> Self where I: Into>, { self.inner = self.inner.interface(interface); self } /// Sets the request builder to emulation the specified HTTP context. /// /// This method sets the necessary headers, HTTP/1 and HTTP/2 options configurations, and TLS /// options config to use the specified HTTP context. It allows the client to mimic the /// behavior of different versions or setups, which can be useful for testing or ensuring /// compatibility with various environments. /// /// # Note /// This will overwrite the existing configuration. /// You must set emulation before you can perform subsequent HTTP1/HTTP2/TLS fine-tuning. #[inline] pub fn emulation(mut self, emulation: T) -> Self { self.inner = self.inner.emulation(emulation); self } /// Sends the request and returns and [`WebSocketResponse`]. pub async fn send(self) -> Result { let (client, request) = self.inner.build_split(); let mut request = request?; // Ensure the scheme is http or https let uri = request.uri_mut(); let scheme = match uri.scheme_str() { Some("ws") => Some(Scheme::HTTP), Some("wss") => Some(Scheme::HTTPS), _ => None, }; if scheme.is_some() { let mut parts = uri.clone().into_parts(); parts.scheme = scheme; *uri = Uri::from_parts(parts).map_err(Error::builder)?; } // Get the version of the request let version = request.version(); // Set the headers for the websocket handshake let headers = request.headers_mut(); headers.insert( header::SEC_WEBSOCKET_VERSION, HeaderValue::from_static("13"), ); // Ensure the request is HTTP 1.1/HTTP 2 let accept_key = match version { Some(Version::HTTP_10 | Version::HTTP_11) => { // Generate a nonce if one wasn't provided let nonce = self .accept_key .unwrap_or_else(|| Cow::Owned(tungstenite::handshake::client::generate_key())); headers.insert(header::UPGRADE, HeaderValue::from_static("websocket")); headers.insert(header::CONNECTION, HeaderValue::from_static("upgrade")); headers.insert( header::SEC_WEBSOCKET_KEY, HeaderValue::from_str(&nonce).map_err(Error::builder)?, ); *request.method_mut() = Method::GET; *request.version_mut() = Some(Version::HTTP_11); Some(nonce) } Some(Version::HTTP_2) => { *request.method_mut() = Method::CONNECT; *request.version_mut() = Some(Version::HTTP_2); request .extensions_mut() .insert(Protocol::from_static("websocket")); None } unsupported => { return Err(Error::upgrade(format!( "unsupported version: {unsupported:?}" ))); } }; // Set websocket subprotocols if let Some(ref protocols) = self.protocols { // Sets subprotocols if !protocols.is_empty() { let subprotocols = protocols .iter() .map(|s| s.as_ref()) .collect::>() .join(", "); request.headers_mut().insert( header::SEC_WEBSOCKET_PROTOCOL, HeaderValue::from_maybe_shared(Bytes::from(subprotocols)) .map_err(Error::builder)?, ); } } client .execute(request) .await .map(|inner| WebSocketResponse { inner, accept_key, protocols: self.protocols, config: self.config, }) } } /// The server's response to the websocket upgrade request. /// /// This implements `Deref`, so you can access all the usual /// information from the [`Response`]. #[derive(Debug)] pub struct WebSocketResponse { inner: Response, accept_key: Option>, protocols: Option>>, config: WebSocketConfig, } impl Deref for WebSocketResponse { type Target = Response; fn deref(&self) -> &Self::Target { &self.inner } } impl DerefMut for WebSocketResponse { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } impl WebSocketResponse { /// Turns the response into a websocket. This checks if the websocket /// handshake was successful. pub async fn into_websocket(self) -> Result { let (inner, protocol) = { let status = self.inner.status(); let headers = self.inner.headers(); match self.inner.version() { // HTTP/1.0 and HTTP/1.1 use the traditional upgrade mechanism Version::HTTP_10 | Version::HTTP_11 => { if status != StatusCode::SWITCHING_PROTOCOLS { return Err(Error::upgrade(format!("unexpected status code: {status}"))); } if !header_contains(self.inner.headers(), header::CONNECTION, "upgrade") { return Err(Error::upgrade("missing connection header")); } if !header_eq(self.inner.headers(), header::UPGRADE, "websocket") { return Err(Error::upgrade("invalid upgrade header")); } match self .accept_key .zip(headers.get(header::SEC_WEBSOCKET_ACCEPT)) { Some((nonce, header)) => { if !header.to_str().is_ok_and(|s| { s == tungstenite::handshake::derive_accept_key(nonce.as_bytes()) }) { return Err(Error::upgrade(format!( "invalid accept key: {header:?}" ))); } } None => { return Err(Error::upgrade("missing accept key")); } } } // HTTP/2 uses the Extended CONNECT Protocol (RFC 8441) // See: https://datatracker.ietf.org/doc/html/rfc8441 Version::HTTP_2 => { if status != StatusCode::OK { return Err(Error::upgrade(format!("unexpected status code: {status}"))); } } _ => { return Err(Error::upgrade(format!( "unsupported version: {:?}", self.inner.version() ))); } } let protocol = headers.get(header::SEC_WEBSOCKET_PROTOCOL).cloned(); let requested = self.protocols.as_ref().filter(|p| !p.is_empty()); let replied = protocol.as_ref().and_then(|v| v.to_str().ok()); match (requested, replied) { // okay, we requested protocols and got one back (Some(req), Some(rep)) => { if !req.contains(&Cow::Borrowed(rep)) { return Err(Error::upgrade(format!("invalid protocol: {rep}"))); } } // server didn't reply with a protocol (Some(_), None) => { return Err(Error::upgrade(format!( "missing protocol: {:?}", self.protocols ))); } // we didn't request any protocols, but got one anyway (None, Some(_)) => { return Err(Error::upgrade(format!("invalid protocol: {protocol:?}"))); } // we didn't request any protocols, so we don't expect one (None, None) => {} }; let inner = WebSocketStream::from_raw_socket( self.inner.upgrade().await?, protocol::Role::Client, Some(self.config), ) .await; (inner, protocol) }; Ok(WebSocket { inner, protocol }) } } /// Checks if the header value is equal to the given value. fn header_eq(headers: &HeaderMap, key: HeaderName, value: &'static str) -> bool { if let Some(header) = headers.get(&key) { header.as_bytes().eq_ignore_ascii_case(value.as_bytes()) } else { false } } /// Checks if the header value contains the given value. fn header_contains(headers: &HeaderMap, key: HeaderName, value: &'static str) -> bool { let header = if let Some(header) = headers.get(&key) { header } else { return false; }; if let Ok(header) = std::str::from_utf8(header.as_bytes()) { header.to_ascii_lowercase().contains(value) } else { false } } pin_project! { /// A websocket connection #[derive(Debug)] pub struct WebSocket { #[pin] inner: WebSocketStream, protocol: Option, } } impl WebSocket { /// Return the selected WebSocket subprotocol, if one has been chosen. #[inline] pub fn protocol(&self) -> Option<&HeaderValue> { self.protocol.as_ref() } /// Receive another message. /// /// Returns `None` if the stream has closed. #[inline] pub async fn recv(&mut self) -> Option> { self.next().await } /// Send a message. #[inline] pub async fn send(&mut self, msg: Message) -> Result<(), Error> { self.inner .send(msg.into_tungstenite()) .await .map_err(Error::websocket) } /// Consumes the [`WebSocket`] and returns the underlying stream. #[inline] pub fn into_inner(self) -> Upgraded { self.inner.into_inner() } /// Closes the connection with a given code and (optional) reason. pub async fn close(mut self, code: C, reason: R) -> Result<(), Error> where C: Into, R: Into, { let close_frame = CloseFrame { code: code.into().0.into(), reason: reason.into().0, }; self.inner .close(Some(close_frame)) .await .map_err(Error::websocket) } } impl Sink for WebSocket { type Error = Error; #[inline] fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.project() .inner .poll_ready(cx) .map_err(Error::websocket) } #[inline] fn start_send(self: Pin<&mut Self>, item: Message) -> Result<(), Self::Error> { self.project() .inner .start_send(item.into_tungstenite()) .map_err(Error::websocket) } #[inline] fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.project() .inner .poll_flush(cx) .map_err(Error::websocket) } #[inline] fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.project() .inner .poll_close(cx) .map_err(Error::websocket) } } impl FusedStream for WebSocket { #[inline] fn is_terminated(&self) -> bool { self.inner.is_terminated() } } impl Stream for WebSocket { type Item = Result; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { loop { match ready!(self.inner.poll_next_unpin(cx)) { Some(Ok(msg)) => { if let Some(msg) = Message::from_tungstenite(msg) { return Poll::Ready(Some(Ok(msg))); } } Some(Err(err)) => return Poll::Ready(Some(Err(Error::body(err)))), None => return Poll::Ready(None), } } } } ================================================ FILE: src/client.rs ================================================ mod body; mod conn; mod core; mod emulate; mod group; mod request; mod response; pub mod future; pub mod layer; #[cfg(feature = "multipart")] pub mod multipart; #[cfg(feature = "ws")] pub mod ws; use std::{ borrow::Cow, collections::HashMap, convert::TryInto, net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, num::NonZeroUsize, sync::Arc, task::{Context, Poll}, time::Duration, }; use http::header::{HeaderMap, HeaderValue, USER_AGENT}; use tower::{ BoxError, Layer, Service, ServiceBuilder, ServiceExt, retry::{Retry, RetryLayer}, util::{BoxCloneSyncService, BoxCloneSyncServiceLayer, Either, Oneshot}, }; #[cfg(any( feature = "gzip", feature = "zstd", feature = "brotli", feature = "deflate", ))] use self::layer::decoder::{AcceptEncoding, DecompressionLayer}; #[cfg(feature = "ws")] use self::ws::WebSocketRequestBuilder; pub use self::{ body::Body, core::{http1, http2, upgrade::Upgraded}, emulate::{Emulation, EmulationBuilder, IntoEmulation}, group::Group, request::{Request, RequestBuilder}, response::Response, }; use self::{ conn::{ BoxedConnectorLayer, BoxedConnectorService, Conn, Connector, HttpTransport, SocketBindOptions, Unnameable, }, core::{ body::Incoming, rt::{TokioExecutor, TokioTimer}, }, future::Pending, layer::{ client::HttpClient, config::{ConfigService, ConfigServiceLayer}, redirect::{FollowRedirect, FollowRedirectLayer}, retry::RetryPolicy, timeout::{ ResponseBodyTimeout, ResponseBodyTimeoutLayer, Timeout, TimeoutBody, TimeoutLayer, TimeoutOptions, }, }, }; pub(crate) use self::{ conn::{ Connected, Connection, descriptor::{ConnectionDescriptor, ConnectionId}, }, core::Error as CoreError, }; #[cfg(feature = "cookies")] use crate::cookie; #[cfg(feature = "hickory-dns")] use crate::dns::hickory::HickoryDnsResolver; use crate::{ IntoUri, Method, Proxy, dns::{DnsResolverWithOverrides, DynResolver, GaiResolver, IntoResolve, Resolve}, error::{self, Error}, header::OrigHeaderMap, http1::Http1Options, http2::Http2Options, proxy::Matcher as ProxyMatcher, redirect::{self, FollowRedirectPolicy}, retry, tls::{ AlpnProtocol, TlsOptions, TlsVersion, keylog::KeyLog, session::{IntoTlsSessionCache, TlsSessionCache}, trust::{CertStore, Identity}, }, }; /// Decompression service type. Identity type when compression features are disabled. #[cfg(not(any( feature = "gzip", feature = "zstd", feature = "brotli", feature = "deflate" )))] type Decompression = T; /// Service wrapper that handles response body decompression. #[cfg(any( feature = "gzip", feature = "zstd", feature = "brotli", feature = "deflate" ))] type Decompression = self::layer::decoder::Decompression; /// Response body type with timeout and optional decompression. #[cfg(any( feature = "gzip", feature = "zstd", feature = "brotli", feature = "deflate" ))] type ResponseBody = TimeoutBody>; /// Response body type with timeout only (no compression features). #[cfg(not(any( feature = "gzip", feature = "zstd", feature = "brotli", feature = "deflate" )))] type ResponseBody = TimeoutBody; /// The complete HTTP client service stack with all middleware layers. type ClientService = Timeout< ResponseBodyTimeout< ConfigService< Decompression< Retry< RetryPolicy, FollowRedirect, FollowRedirectPolicy>, >, >, >, >, >; /// Type-erased client service for dynamic middleware composition. type BoxedClientService = BoxCloneSyncService, http::Response, BoxError>; /// Layer type for wrapping boxed client services with additional middleware. type BoxedClientLayer = BoxCloneSyncServiceLayer< BoxedClientService, http::Request, http::Response, BoxError, >; /// An [`Client`] to make Requests with. /// /// The Client has various configuration values to tweak, but the defaults /// are set to what is usually the most commonly desired value. To configure a /// [`Client`], use [`Client::builder()`]. /// /// The [`Client`] holds a connection pool internally, so it is advised that /// you create one and **reuse** it. /// /// You do **not** have to wrap the [`Client`] in an [`Rc`] or [`Arc`] to **reuse** it, /// because it already uses an [`Arc`] internally. /// /// [`Rc`]: std::rc::Rc #[derive(Clone)] #[repr(transparent)] pub struct Client(Arc>); /// A [`ClientBuilder`] can be used to create a [`Client`] with custom configuration. #[must_use] pub struct ClientBuilder { config: Config, } /// The HTTP version preference for the client. #[repr(u8)] enum HttpVersionPref { Http1, Http2, All, } struct Config { error: Option, headers: HeaderMap, orig_headers: OrigHeaderMap, #[cfg(any( feature = "gzip", feature = "zstd", feature = "brotli", feature = "deflate", ))] accept_encoding: AcceptEncoding, connect_timeout: Option, connection_verbose: bool, pool_idle_timeout: Option, pool_max_idle_per_host: usize, pool_max_size: Option, tcp_nodelay: bool, tcp_reuse_address: bool, tcp_keepalive: Option, tcp_keepalive_interval: Option, tcp_keepalive_retries: Option, #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] tcp_user_timeout: Option, tcp_send_buffer_size: Option, tcp_recv_buffer_size: Option, tcp_happy_eyeballs_timeout: Option, socket_bind_options: SocketBindOptions, proxies: Vec, auto_sys_proxy: bool, retry_policy: retry::Policy, redirect_policy: redirect::Policy, referer: bool, timeout_options: TimeoutOptions, #[cfg(feature = "cookies")] cookie_store: Option>, #[cfg(feature = "hickory-dns")] hickory_dns: bool, dns_overrides: HashMap, Vec>, dns_resolver: Option>, http_version_pref: HttpVersionPref, https_only: bool, layers: Vec, connector_layers: Vec, tls_keylog: Option, tls_info: bool, tls_sni: bool, tls_verify_hostname: bool, tls_identity: Option, tls_cert_store: CertStore, tls_cert_verification: bool, tls_min_version: Option, tls_max_version: Option, tls_session_cache: Option>, tls_options: Option, http1_options: Option, http2_options: Option, } // ===== impl Client ===== impl Default for Client { fn default() -> Self { Self::new() } } impl Client { /// Constructs a new [`Client`]. /// /// # Panics /// /// This method panics if a TLS backend cannot be initialized, or the resolver /// cannot load the system configuration. /// /// Use [`Client::builder()`] if you wish to handle the failure as an [`Error`] /// instead of panicking. #[inline] pub fn new() -> Client { Client::builder().build().expect("Client::new()") } /// Creates a [`ClientBuilder`] to configure a [`Client`]. pub fn builder() -> ClientBuilder { ClientBuilder { config: Config { error: None, headers: HeaderMap::new(), orig_headers: OrigHeaderMap::new(), #[cfg(any( feature = "gzip", feature = "zstd", feature = "brotli", feature = "deflate", ))] accept_encoding: AcceptEncoding::default(), connect_timeout: None, connection_verbose: false, pool_idle_timeout: Some(Duration::from_secs(90)), pool_max_idle_per_host: usize::MAX, pool_max_size: None, tcp_keepalive: Some(Duration::from_secs(15)), tcp_keepalive_interval: Some(Duration::from_secs(15)), tcp_keepalive_retries: Some(3), #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] tcp_user_timeout: Some(Duration::from_secs(30)), tcp_nodelay: true, tcp_reuse_address: false, tcp_send_buffer_size: None, tcp_recv_buffer_size: None, tcp_happy_eyeballs_timeout: Some(Duration::from_millis(300)), socket_bind_options: SocketBindOptions::default(), proxies: Vec::new(), auto_sys_proxy: true, retry_policy: retry::Policy::default(), redirect_policy: redirect::Policy::none(), referer: true, timeout_options: TimeoutOptions::default(), #[cfg(feature = "hickory-dns")] hickory_dns: cfg!(feature = "hickory-dns"), #[cfg(feature = "cookies")] cookie_store: None, dns_overrides: HashMap::new(), dns_resolver: None, http_version_pref: HttpVersionPref::All, https_only: false, http1_options: None, http2_options: None, layers: Vec::new(), connector_layers: Vec::new(), tls_keylog: None, tls_info: false, tls_sni: true, tls_verify_hostname: true, tls_identity: None, tls_cert_store: CertStore::default(), tls_cert_verification: true, tls_min_version: None, tls_max_version: None, tls_session_cache: None, tls_options: None, }, } } /// Convenience method to make a `GET` request to a URI. /// /// # Errors /// /// This method fails whenever the supplied `Uri` cannot be parsed. #[inline] pub fn get(&self, uri: U) -> RequestBuilder { self.request(Method::GET, uri) } /// Convenience method to make a `POST` request to a URI. /// /// # Errors /// /// This method fails whenever the supplied `Uri` cannot be parsed. #[inline] pub fn post(&self, uri: U) -> RequestBuilder { self.request(Method::POST, uri) } /// Convenience method to make a `PUT` request to a URI. /// /// # Errors /// /// This method fails whenever the supplied `Uri` cannot be parsed. #[inline] pub fn put(&self, uri: U) -> RequestBuilder { self.request(Method::PUT, uri) } /// Convenience method to make a `PATCH` request to a URI. /// /// # Errors /// /// This method fails whenever the supplied `Uri` cannot be parsed. #[inline] pub fn patch(&self, uri: U) -> RequestBuilder { self.request(Method::PATCH, uri) } /// Convenience method to make a `DELETE` request to a URI. /// /// # Errors /// /// This method fails whenever the supplied `Uri` cannot be parsed. #[inline] pub fn delete(&self, uri: U) -> RequestBuilder { self.request(Method::DELETE, uri) } /// Convenience method to make a `HEAD` request to a URI. /// /// # Errors /// /// This method fails whenever the supplied `Uri` cannot be parsed. #[inline] pub fn head(&self, uri: U) -> RequestBuilder { self.request(Method::HEAD, uri) } /// Convenience method to make a `OPTIONS` request to a URI. /// /// # Errors /// /// This method fails whenever the supplied `Uri` cannot be parsed. #[inline] pub fn options(&self, uri: U) -> RequestBuilder { self.request(Method::OPTIONS, uri) } /// Start building a `Request` with the `Method` and `Uri`. /// /// Returns a `RequestBuilder`, which will allow setting headers and /// the request body before sending. /// /// # Errors /// /// This method fails whenever the supplied `Uri` cannot be parsed. pub fn request(&self, method: Method, uri: U) -> RequestBuilder { let req = uri.into_uri().map(move |uri| Request::new(method, uri)); RequestBuilder::new(self.clone(), req) } /// Upgrades the [`RequestBuilder`] to perform a /// websocket handshake. This returns a wrapped type, so you must do /// this after you set up your request, and just before you send the /// request. #[inline] #[cfg(feature = "ws")] #[cfg_attr(docsrs, doc(cfg(feature = "ws")))] pub fn websocket(&self, uri: U) -> WebSocketRequestBuilder { WebSocketRequestBuilder::new(self.request(Method::GET, uri)) } /// Executes a `Request`. /// /// A `Request` can be built manually with `Request::new()` or obtained /// from a RequestBuilder with `RequestBuilder::build()`. /// /// You should prefer to use the `RequestBuilder` and /// `RequestBuilder::send()`. /// /// # Errors /// /// This method fails if there was an error while sending request, /// redirect loop was detected or redirect limit was exhausted. pub fn execute(&self, request: Request) -> Pending { let req = http::Request::::from(request); Pending::Request { uri: Some(req.uri().clone()), fut: Box::pin(Oneshot::new((*self.0).clone(), req)), } } } impl tower::Service for Client { type Response = Response; type Error = Error; type Future = Pending; #[inline(always)] fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } #[inline(always)] fn call(&mut self, req: Request) -> Self::Future { self.execute(req) } } impl tower::Service for &'_ Client { type Response = Response; type Error = Error; type Future = Pending; #[inline(always)] fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } #[inline(always)] fn call(&mut self, req: Request) -> Self::Future { self.execute(req) } } // ===== impl ClientBuilder ===== impl ClientBuilder { /// Returns a [`Client`] that uses this [`ClientBuilder`] configuration. /// /// # Errors /// /// This method fails if a TLS backend cannot be initialized, or the resolver /// cannot load the system configuration. pub fn build(self) -> crate::Result { let mut config = self.config; if let Some(err) = config.error { return Err(err); } // Prepare proxies if config.auto_sys_proxy { config.proxies.push(ProxyMatcher::system()); } // Create base client service let service = { let resolver = { let mut resolver: Arc = match config.dns_resolver { Some(dns_resolver) => dns_resolver, #[cfg(feature = "hickory-dns")] None if config.hickory_dns => Arc::new(HickoryDnsResolver::new()), None => Arc::new(GaiResolver::new()), }; if !config.dns_overrides.is_empty() { resolver = Arc::new(DnsResolverWithOverrides::new( resolver, config.dns_overrides, )); } DynResolver::new(resolver) }; let connector = Connector::builder(config.proxies, resolver) .timeout(config.connect_timeout) .tls_info(config.tls_info) .tcp_nodelay(config.tcp_nodelay) .verbose(config.connection_verbose) .with_tls(|tls| { tls.alpn_protocol(match config.http_version_pref { HttpVersionPref::Http1 => Some(AlpnProtocol::HTTP1), HttpVersionPref::Http2 => Some(AlpnProtocol::HTTP2), _ => None, }) .keylog(config.tls_keylog) .cert_store(config.tls_cert_store) .identity(config.tls_identity) .max_version(config.tls_max_version) .min_version(config.tls_min_version) .tls_sni(config.tls_sni) .verify_hostname(config.tls_verify_hostname) .cert_verification(config.tls_cert_verification) .session_store(config.tls_session_cache) }) .with_http(|http| { http.enforce_http(false); http.set_keepalive(config.tcp_keepalive); http.set_keepalive_interval(config.tcp_keepalive_interval); http.set_keepalive_retries(config.tcp_keepalive_retries); http.set_reuse_address(config.tcp_reuse_address); http.set_connect_timeout(config.connect_timeout); http.set_nodelay(config.tcp_nodelay); http.set_send_buffer_size(config.tcp_send_buffer_size); http.set_recv_buffer_size(config.tcp_recv_buffer_size); http.set_happy_eyeballs_timeout(config.tcp_happy_eyeballs_timeout); #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] http.set_tcp_user_timeout(config.tcp_user_timeout); #[cfg(any( target_os = "android", target_os = "fuchsia", target_os = "illumos", target_os = "ios", target_os = "linux", target_os = "macos", target_os = "solaris", target_os = "tvos", target_os = "visionos", target_os = "watchos", ))] if let Some(interface) = config.socket_bind_options.interface { http.set_interface(interface); } http.set_local_addresses( config.socket_bind_options.ipv4_address, config.socket_bind_options.ipv6_address, ); }) .build(config.tls_options, config.connector_layers)?; #[allow(unused_mut)] let mut builder = HttpClient::builder(TokioExecutor::new()); #[cfg(feature = "cookies")] { builder = builder.cookie_store(config.cookie_store); } builder .http1_options(config.http1_options) .http2_options(config.http2_options) .http2_only(matches!(config.http_version_pref, HttpVersionPref::Http2)) .http2_timer(TokioTimer::new()) .pool_timer(TokioTimer::new()) .pool_idle_timeout(config.pool_idle_timeout) .pool_max_idle_per_host(config.pool_max_idle_per_host) .pool_max_size(config.pool_max_size) .build(connector) }; // Configured client service with layers let client = { let service = ServiceBuilder::new() .layer(RetryLayer::new(RetryPolicy::new(config.retry_policy))) .layer({ let policy = FollowRedirectPolicy::new(config.redirect_policy) .with_referer(config.referer) .with_https_only(config.https_only); FollowRedirectLayer::with_policy(policy) }) .service(service); #[cfg(any( feature = "gzip", feature = "zstd", feature = "brotli", feature = "deflate", ))] let service = ServiceBuilder::new() .layer(DecompressionLayer::new(config.accept_encoding)) .service(service); let service = ServiceBuilder::new() .layer(ResponseBodyTimeoutLayer::new( TokioTimer::new(), config.timeout_options, )) .layer(ConfigServiceLayer::new( config.https_only, config.headers, config.orig_headers, )) .service(service); if config.layers.is_empty() { let service = ServiceBuilder::new() .layer(TimeoutLayer::new(config.timeout_options)) .service(service); Either::Left(service) } else { let service = config .layers .into_iter() .fold(BoxCloneSyncService::new(service), |service, layer| { ServiceBuilder::new().layer(layer).service(service) }); let service = ServiceBuilder::new() .layer(TimeoutLayer::new(config.timeout_options)) .service(service) .map_err(error::map_timeout_to_request_error); Either::Right(BoxCloneSyncService::new(service)) } }; Ok(Client(Arc::new(client))) } // Higher-level options /// Sets the `User-Agent` header to be used by this client. /// /// # Example /// /// ```rust /// # async fn doc() -> wreq::Result<()> { /// // Name your user agent after your app? /// static APP_USER_AGENT: &str = concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION"),); /// /// let client = wreq::Client::builder().user_agent(APP_USER_AGENT).build()?; /// let res = client.get("https://www.rust-lang.org").send().await?; /// # Ok(()) /// # } /// ``` pub fn user_agent(mut self, value: V) -> ClientBuilder where V: TryInto, V::Error: Into, { match value.try_into() { Ok(value) => { self.config.headers.insert(USER_AGENT, value); } Err(err) => { self.config.error = Some(Error::builder(err.into())); } }; self } /// Sets the default headers for every request. /// /// # Example /// /// ```rust /// use wreq::header; /// # async fn doc() -> wreq::Result<()> { /// let mut headers = header::HeaderMap::new(); /// headers.insert("X-MY-HEADER", header::HeaderValue::from_static("value")); /// /// // Consider marking security-sensitive headers with `set_sensitive`. /// let mut auth_value = header::HeaderValue::from_static("secret"); /// auth_value.set_sensitive(true); /// headers.insert(header::AUTHORIZATION, auth_value); /// /// // get a client builder /// let client = wreq::Client::builder().default_headers(headers).build()?; /// let res = client.get("https://www.rust-lang.org").send().await?; /// # Ok(()) /// # } /// ``` /// /// Override the default headers: /// /// ```rust /// use wreq::header; /// # async fn doc() -> wreq::Result<()> { /// let mut headers = header::HeaderMap::new(); /// headers.insert("X-MY-HEADER", header::HeaderValue::from_static("value")); /// /// // get a client builder /// let client = wreq::Client::builder().default_headers(headers).build()?; /// let res = client /// .get("https://www.rust-lang.org") /// .header("X-MY-HEADER", "new_value") /// .send() /// .await?; /// # Ok(()) /// # } /// ``` #[inline] pub fn default_headers(mut self, headers: HeaderMap) -> ClientBuilder { crate::util::replace_headers(&mut self.config.headers, headers); self } /// Sets the original headers for every request. #[inline] pub fn orig_headers(mut self, orig_headers: OrigHeaderMap) -> ClientBuilder { self.config.orig_headers.extend(orig_headers); self } /// Enable a persistent cookie store for the client. /// /// Cookies received in responses will be preserved and included in /// additional requests. /// /// By default, no cookie store is used. /// /// # Optional /// /// This requires the optional `cookies` feature to be enabled. #[inline] #[cfg(feature = "cookies")] #[cfg_attr(docsrs, doc(cfg(feature = "cookies")))] pub fn cookie_store(mut self, enable: bool) -> ClientBuilder { if enable { self.cookie_provider(Arc::new(cookie::Jar::default())) } else { self.config.cookie_store = None; self } } /// Set the persistent cookie store for the client. /// /// Cookies received in responses will be passed to this store, and /// additional requests will query this store for cookies. /// /// By default, no cookie store is used. /// /// # Optional /// /// This requires the optional `cookies` feature to be enabled. #[inline] #[cfg(feature = "cookies")] #[cfg_attr(docsrs, doc(cfg(feature = "cookies")))] pub fn cookie_provider(mut self, cookie_store: C) -> ClientBuilder { self.config.cookie_store = Some(cookie_store.into_shared()); self } /// Enable auto gzip decompression by checking the `Content-Encoding` response header. /// /// If auto gzip decompression is turned on: /// /// - When sending a request and if the request's headers do not already contain an /// `Accept-Encoding` **and** `Range` values, the `Accept-Encoding` header is set to `gzip`. /// The request body is **not** automatically compressed. /// - When receiving a response, if its headers contain a `Content-Encoding` value of `gzip`, /// both `Content-Encoding` and `Content-Length` are removed from the headers' set. The /// response body is automatically decompressed. /// /// If the `gzip` feature is turned on, the default option is enabled. /// /// # Optional /// /// This requires the optional `gzip` feature to be enabled #[inline] #[cfg(feature = "gzip")] #[cfg_attr(docsrs, doc(cfg(feature = "gzip")))] pub fn gzip(mut self, enable: bool) -> ClientBuilder { self.config.accept_encoding.gzip = enable; self } /// Enable auto brotli decompression by checking the `Content-Encoding` response header. /// /// If auto brotli decompression is turned on: /// /// - When sending a request and if the request's headers do not already contain an /// `Accept-Encoding` **and** `Range` values, the `Accept-Encoding` header is set to `br`. The /// request body is **not** automatically compressed. /// - When receiving a response, if its headers contain a `Content-Encoding` value of `br`, both /// `Content-Encoding` and `Content-Length` are removed from the headers' set. The response /// body is automatically decompressed. /// /// If the `brotli` feature is turned on, the default option is enabled. /// /// # Optional /// /// This requires the optional `brotli` feature to be enabled #[inline] #[cfg(feature = "brotli")] #[cfg_attr(docsrs, doc(cfg(feature = "brotli")))] pub fn brotli(mut self, enable: bool) -> ClientBuilder { self.config.accept_encoding.brotli = enable; self } /// Enable auto zstd decompression by checking the `Content-Encoding` response header. /// /// If auto zstd decompression is turned on: /// /// - When sending a request and if the request's headers do not already contain an /// `Accept-Encoding` **and** `Range` values, the `Accept-Encoding` header is set to `zstd`. /// The request body is **not** automatically compressed. /// - When receiving a response, if its headers contain a `Content-Encoding` value of `zstd`, /// both `Content-Encoding` and `Content-Length` are removed from the headers' set. The /// response body is automatically decompressed. /// /// If the `zstd` feature is turned on, the default option is enabled. /// /// # Optional /// /// This requires the optional `zstd` feature to be enabled #[inline] #[cfg(feature = "zstd")] #[cfg_attr(docsrs, doc(cfg(feature = "zstd")))] pub fn zstd(mut self, enable: bool) -> ClientBuilder { self.config.accept_encoding.zstd = enable; self } /// Enable auto deflate decompression by checking the `Content-Encoding` response header. /// /// If auto deflate decompression is turned on: /// /// - When sending a request and if the request's headers do not already contain an /// `Accept-Encoding` **and** `Range` values, the `Accept-Encoding` header is set to /// `deflate`. The request body is **not** automatically compressed. /// - When receiving a response, if it's headers contain a `Content-Encoding` value that equals /// to `deflate`, both values `Content-Encoding` and `Content-Length` are removed from the /// headers' set. The response body is automatically decompressed. /// /// If the `deflate` feature is turned on, the default option is enabled. /// /// # Optional /// /// This requires the optional `deflate` feature to be enabled #[inline] #[cfg(feature = "deflate")] #[cfg_attr(docsrs, doc(cfg(feature = "deflate")))] pub fn deflate(mut self, enable: bool) -> ClientBuilder { self.config.accept_encoding.deflate = enable; self } /// Disable auto response body zstd decompression. /// /// This method exists even if the optional `zstd` feature is not enabled. /// This can be used to ensure a `Client` doesn't use zstd decompression /// even if another dependency were to enable the optional `zstd` feature. #[inline] pub fn no_zstd(self) -> ClientBuilder { #[cfg(feature = "zstd")] { self.zstd(false) } #[cfg(not(feature = "zstd"))] { self } } /// Disable auto response body gzip decompression. /// /// This method exists even if the optional `gzip` feature is not enabled. /// This can be used to ensure a `Client` doesn't use gzip decompression /// even if another dependency were to enable the optional `gzip` feature. #[inline] pub fn no_gzip(self) -> ClientBuilder { #[cfg(feature = "gzip")] { self.gzip(false) } #[cfg(not(feature = "gzip"))] { self } } /// Disable auto response body brotli decompression. /// /// This method exists even if the optional `brotli` feature is not enabled. /// This can be used to ensure a `Client` doesn't use brotli decompression /// even if another dependency were to enable the optional `brotli` feature. #[inline] pub fn no_brotli(self) -> ClientBuilder { #[cfg(feature = "brotli")] { self.brotli(false) } #[cfg(not(feature = "brotli"))] { self } } /// Disable auto response body deflate decompression. /// /// This method exists even if the optional `deflate` feature is not enabled. /// This can be used to ensure a `Client` doesn't use deflate decompression /// even if another dependency were to enable the optional `deflate` feature. #[inline] pub fn no_deflate(self) -> ClientBuilder { #[cfg(feature = "deflate")] { self.deflate(false) } #[cfg(not(feature = "deflate"))] { self } } // Redirect options /// Set a `RedirectPolicy` for this client. /// /// Default will follow redirects up to a maximum of 10. #[inline] pub fn redirect(mut self, policy: redirect::Policy) -> ClientBuilder { self.config.redirect_policy = policy; self } /// Enable or disable automatic setting of the `Referer` header. /// /// Default is `true`. #[inline] pub fn referer(mut self, enable: bool) -> ClientBuilder { self.config.referer = enable; self } // Retry options /// Set a request retry policy. pub fn retry(mut self, policy: retry::Policy) -> ClientBuilder { self.config.retry_policy = policy; self } // Proxy options /// Add a `Proxy` to the list of proxies the `Client` will use. /// /// # Note /// /// Adding a proxy will disable the automatic usage of the "system" proxy. /// /// # Example /// ``` /// use wreq::{Client, Proxy}; /// /// let proxy = Proxy::http("http://proxy:8080").unwrap(); /// let client = Client::builder().proxy(proxy).build().unwrap(); /// ``` #[inline] pub fn proxy(mut self, proxy: Proxy) -> ClientBuilder { self.config.proxies.push(proxy.into_matcher()); self.config.auto_sys_proxy = false; self } /// Clear all `Proxies`, so `Client` will use no proxy anymore. /// /// # Note /// To add a proxy exclusion list, use [crate::proxy::Proxy::no_proxy()] /// on all desired proxies instead. /// /// This also disables the automatic usage of the "system" proxy. #[inline] pub fn no_proxy(mut self) -> ClientBuilder { self.config.proxies.clear(); self.config.auto_sys_proxy = false; self } // Timeout options /// Enables a request timeout. /// /// The timeout is applied from when the request starts connecting until the /// response body has finished. /// /// Default is no timeout. #[inline] pub fn timeout(mut self, timeout: Duration) -> ClientBuilder { self.config.timeout_options.total_timeout(timeout); self } /// Set a timeout for only the read phase of a `Client`. /// /// Default is `None`. #[inline] pub fn read_timeout(mut self, timeout: Duration) -> ClientBuilder { self.config.timeout_options.read_timeout(timeout); self } /// Set a timeout for only the connect phase of a `Client`. /// /// Default is `None`. /// /// # Note /// /// This **requires** the futures be executed in a tokio runtime with /// a tokio timer enabled. #[inline] pub fn connect_timeout(mut self, timeout: Duration) -> ClientBuilder { self.config.connect_timeout = Some(timeout); self } /// Set whether connections should emit verbose logs. /// /// Enabling this option will emit [log][] messages at the `TRACE` level /// for read and write operations on connections. /// /// [log]: https://crates.io/crates/log #[inline] pub fn connection_verbose(mut self, verbose: bool) -> ClientBuilder { self.config.connection_verbose = verbose; self } // HTTP options /// Set an optional timeout for idle sockets being kept-alive. /// /// Pass `None` to disable timeout. /// /// Default is 90 seconds. #[inline] pub fn pool_idle_timeout(mut self, val: D) -> ClientBuilder where D: Into>, { self.config.pool_idle_timeout = val.into(); self } /// Sets the maximum idle connection per host allowed in the pool. #[inline] pub fn pool_max_idle_per_host(mut self, max: usize) -> ClientBuilder { self.config.pool_max_idle_per_host = max; self } /// Sets the maximum number of connections in the pool. #[inline] pub fn pool_max_size(mut self, max: usize) -> ClientBuilder { self.config.pool_max_size = NonZeroUsize::new(max); self } /// Restrict the Client to be used with HTTPS only requests. /// /// Defaults to false. #[inline] pub fn https_only(mut self, enabled: bool) -> ClientBuilder { self.config.https_only = enabled; self } /// Only use HTTP/1. #[inline] pub fn http1_only(mut self) -> ClientBuilder { self.config.http_version_pref = HttpVersionPref::Http1; self } /// Only use HTTP/2. #[inline] pub fn http2_only(mut self) -> ClientBuilder { self.config.http_version_pref = HttpVersionPref::Http2; self } /// Sets the HTTP/1 options for the client. #[inline] pub fn http1_options(mut self, options: T) -> ClientBuilder where T: Into>, { self.config.http1_options = options.into(); self } /// Sets the HTTP/2 options for the client. #[inline] pub fn http2_options(mut self, options: T) -> ClientBuilder where T: Into>, { self.config.http2_options = options.into(); self } // TCP options /// Set whether sockets have `TCP_NODELAY` enabled. /// /// Default is `true`. #[inline] pub fn tcp_nodelay(mut self, enabled: bool) -> ClientBuilder { self.config.tcp_nodelay = enabled; self } /// Set that all sockets have `SO_KEEPALIVE` set with the supplied duration. /// /// If `None`, the option will not be set. /// /// Default is 15 seconds. #[inline] pub fn tcp_keepalive(mut self, val: D) -> ClientBuilder where D: Into>, { self.config.tcp_keepalive = val.into(); self } /// Set that all sockets have `SO_KEEPALIVE` set with the supplied interval. /// /// If `None`, the option will not be set. /// /// Default is 15 seconds. #[inline] pub fn tcp_keepalive_interval(mut self, val: D) -> ClientBuilder where D: Into>, { self.config.tcp_keepalive_interval = val.into(); self } /// Set that all sockets have `SO_KEEPALIVE` set with the supplied retry count. /// /// If `None`, the option will not be set. /// /// Default is 3 retries. #[inline] pub fn tcp_keepalive_retries(mut self, retries: C) -> ClientBuilder where C: Into>, { self.config.tcp_keepalive_retries = retries.into(); self } /// Set that all sockets have `TCP_USER_TIMEOUT` set with the supplied duration. /// /// This option controls how long transmitted data may remain unacknowledged before /// the connection is force-closed. /// /// If `None`, the option will not be set. /// /// Default is 30 seconds. #[inline] #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] #[cfg_attr( docsrs, doc(cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))) )] pub fn tcp_user_timeout(mut self, val: D) -> ClientBuilder where D: Into>, { self.config.tcp_user_timeout = val.into(); self } /// Set whether sockets have `SO_REUSEADDR` enabled. #[inline] pub fn tcp_reuse_address(mut self, enabled: bool) -> ClientBuilder { self.config.tcp_reuse_address = enabled; self } /// Sets the size of the TCP send buffer on this client socket. /// /// On most operating systems, this sets the `SO_SNDBUF` socket option. #[inline] pub fn tcp_send_buffer_size(mut self, size: S) -> ClientBuilder where S: Into>, { self.config.tcp_send_buffer_size = size.into(); self } /// Sets the size of the TCP receive buffer on this client socket. /// /// On most operating systems, this sets the `SO_RCVBUF` socket option. #[inline] pub fn tcp_recv_buffer_size(mut self, size: S) -> ClientBuilder where S: Into>, { self.config.tcp_recv_buffer_size = size.into(); self } /// Set timeout for [RFC 6555 (Happy Eyeballs)][RFC 6555] algorithm. /// /// If hostname resolves to both IPv4 and IPv6 addresses and connection /// cannot be established using preferred address family before timeout /// elapses, then connector will in parallel attempt connection using other /// address family. /// /// If `None`, parallel connection attempts are disabled. /// /// Default is 300 milliseconds. /// /// [RFC 6555]: https://tools.ietf.org/html/rfc6555 #[inline] pub fn tcp_happy_eyeballs_timeout(mut self, val: D) -> ClientBuilder where D: Into>, { self.config.tcp_happy_eyeballs_timeout = val.into(); self } /// Bind to a local IP Address. /// /// # Example /// /// ``` /// use std::net::IpAddr; /// let local_addr = IpAddr::from([12, 4, 1, 8]); /// let client = wreq::Client::builder() /// .local_address(local_addr) /// .build() /// .unwrap(); /// ``` #[inline] pub fn local_address(mut self, addr: T) -> ClientBuilder where T: Into>, { self.config .socket_bind_options .set_local_address(addr.into()); self } /// Set that all sockets are bound to the configured IPv4 or IPv6 address (depending on host's /// preferences) before connection. /// /// # Example /// /// /// ``` /// use std::net::{Ipv4Addr, Ipv6Addr}; /// let ipv4 = Ipv4Addr::new(127, 0, 0, 1); /// let ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); /// let client = wreq::Client::builder() /// .local_addresses(ipv4, ipv6) /// .build() /// .unwrap(); /// ``` #[inline] pub fn local_addresses(mut self, ipv4: V4, ipv6: V6) -> ClientBuilder where V4: Into>, V6: Into>, { self.config .socket_bind_options .set_local_addresses(ipv4, ipv6); self } /// Bind connections only on the specified network interface. /// /// This option is only available on the following operating systems: /// /// - Android /// - Fuchsia /// - Linux, /// - macOS and macOS-like systems (iOS, tvOS, watchOS and visionOS) /// - Solaris and illumos /// /// On Android, Linux, and Fuchsia, this uses the /// [`SO_BINDTODEVICE`][man-7-socket] socket option. On macOS and macOS-like /// systems, Solaris, and illumos, this instead uses the [`IP_BOUND_IF` and /// `IPV6_BOUND_IF`][man-7p-ip] socket options (as appropriate). /// /// Note that connections will fail if the provided interface name is not a /// network interface that currently exists when a connection is established. /// /// # Example /// /// ``` /// # fn doc() -> Result<(), wreq::Error> { /// let interface = "lo"; /// let client = wreq::Client::builder() /// .interface(interface) /// .build()?; /// # Ok(()) /// # } /// ``` /// /// [man-7-socket]: https://man7.org/linux/man-pages/man7/socket.7.html /// [man-7p-ip]: https://docs.oracle.com/cd/E86824_01/html/E54777/ip-7p.html #[inline] #[cfg(any( target_os = "android", target_os = "fuchsia", target_os = "illumos", target_os = "ios", target_os = "linux", target_os = "macos", target_os = "solaris", target_os = "tvos", target_os = "visionos", target_os = "watchos", ))] #[cfg_attr( docsrs, doc(cfg(any( target_os = "android", target_os = "fuchsia", target_os = "illumos", target_os = "ios", target_os = "linux", target_os = "macos", target_os = "solaris", target_os = "tvos", target_os = "visionos", target_os = "watchos", ))) )] pub fn interface(mut self, interface: T) -> ClientBuilder where T: Into>, { self.config.socket_bind_options.set_interface(interface); self } // TLS options /// Sets the identity to be used for client certificate authentication. #[inline] pub fn tls_identity(mut self, identity: Identity) -> ClientBuilder { self.config.tls_identity = Some(identity); self } /// Sets the verify certificate store for the client. /// /// This method allows you to specify a custom verify certificate store to be used /// for TLS connections. By default, the system's verify certificate store is used. #[inline] pub fn tls_cert_store(mut self, store: CertStore) -> ClientBuilder { self.config.tls_cert_store = store; self } /// Controls the use of certificate validation. /// /// Defaults to `true`. /// /// # Warning /// /// You should think very carefully before using this method. If /// invalid certificates are trusted, *any* certificate for *any* site /// will be trusted for use. This includes expired certificates. This /// introduces significant vulnerabilities, and should only be used /// as a last resort. #[inline] pub fn tls_cert_verification(mut self, cert_verification: bool) -> ClientBuilder { self.config.tls_cert_verification = cert_verification; self } /// Configures the use of hostname verification when connecting. /// /// Defaults to `true`. /// # Warning /// /// You should think very carefully before you use this method. If hostname verification is not /// used, *any* valid certificate for *any* site will be trusted for use from any other. This /// introduces a significant vulnerability to man-in-the-middle attacks. #[inline] pub fn tls_verify_hostname(mut self, verify_hostname: bool) -> ClientBuilder { self.config.tls_verify_hostname = verify_hostname; self } /// Configures the use of Server Name Indication (SNI) when connecting. /// /// Defaults to `true`. #[inline] pub fn tls_sni(mut self, tls_sni: bool) -> ClientBuilder { self.config.tls_sni = tls_sni; self } /// Configures TLS key logging for the client. #[inline] pub fn tls_keylog(mut self, keylog: KeyLog) -> ClientBuilder { self.config.tls_keylog = Some(keylog); self } /// Set the minimum required TLS version for connections. /// /// By default the TLS backend's own default is used. #[inline] pub fn tls_min_version(mut self, version: TlsVersion) -> ClientBuilder { self.config.tls_min_version = Some(version); self } /// Set the maximum allowed TLS version for connections. /// /// By default there's no maximum. #[inline] pub fn tls_max_version(mut self, version: TlsVersion) -> ClientBuilder { self.config.tls_max_version = Some(version); self } /// Add TLS information as `TlsInfo` extension to responses. /// /// # Optional /// /// feature to be enabled. #[inline] pub fn tls_info(mut self, tls_info: bool) -> ClientBuilder { self.config.tls_info = tls_info; self } /// Sets the TLS session cache. /// /// By default, an in-memory LRU cache is used. Use this method to provide /// a custom [`TlsSessionCache`] implementation (e.g., file-based or distributed). #[inline] pub fn tls_session_cache(mut self, store: S) -> ClientBuilder { self.config.tls_session_cache = Some(store.into_shared()); self } /// Sets the TLS options for the client. #[inline] pub fn tls_options(mut self, options: T) -> ClientBuilder where T: Into>, { self.config.tls_options = options.into(); self } // DNS options /// Disables the hickory-dns async resolver. /// /// This method exists even if the optional `hickory-dns` feature is not enabled. /// This can be used to ensure a `Client` doesn't use the hickory-dns async resolver /// even if another dependency were to enable the optional `hickory-dns` feature. #[inline] #[cfg(feature = "hickory-dns")] #[cfg_attr(docsrs, doc(cfg(feature = "hickory-dns")))] pub fn no_hickory_dns(mut self) -> ClientBuilder { self.config.hickory_dns = false; self } /// Override DNS resolution for specific domains to a particular IP address. /// /// Warning /// /// Since the DNS protocol has no notion of ports, if you wish to send /// traffic to a particular port you must include this port in the URI /// itself, any port in the overridden addr will be ignored and traffic sent /// to the conventional port for the given scheme (e.g. 80 for http). #[inline] pub fn resolve(self, domain: D, addr: SocketAddr) -> ClientBuilder where D: Into>, { self.resolve_to_addrs(domain, std::iter::once(addr)) } /// Override DNS resolution for specific domains to particular IP addresses. /// /// Warning /// /// Since the DNS protocol has no notion of ports, if you wish to send /// traffic to a particular port you must include this port in the URI /// itself, any port in the overridden addresses will be ignored and traffic sent /// to the conventional port for the given scheme (e.g. 80 for http). #[inline] pub fn resolve_to_addrs(mut self, domain: D, addrs: A) -> ClientBuilder where D: Into>, A: IntoIterator, { self.config .dns_overrides .insert(domain.into(), addrs.into_iter().collect()); self } /// Override the DNS resolver implementation. /// /// Pass any type implementing `IntoResolve`. /// Overrides for specific names passed to `resolve` and `resolve_to_addrs` will /// still be applied on top of this resolver. #[inline] pub fn dns_resolver(mut self, resolver: R) -> ClientBuilder { self.config.dns_resolver = Some(resolver.into_shared()); self } // Tower middleware options /// Adds a new Tower [`Layer`](https://docs.rs/tower/latest/tower/trait.Layer.html) to the /// request [`Service`](https://docs.rs/tower/latest/tower/trait.Service.html) which is responsible /// for request processing. /// /// Each subsequent invocation of this function will wrap previous layers. /// /// If configured, the `timeout` will be the outermost layer. /// /// Example usage: /// ``` /// use std::time::Duration; /// /// let client = wreq::Client::builder() /// .timeout(Duration::from_millis(200)) /// .layer(tower::timeout::TimeoutLayer::new(Duration::from_millis(50))) /// .build() /// .unwrap(); /// ``` #[inline] pub fn layer(mut self, layer: L) -> ClientBuilder where L: Layer + Clone + Send + Sync + 'static, L::Service: Service, Response = http::Response, Error = BoxError> + Clone + Send + Sync + 'static, >>::Future: Send + 'static, { let layer = BoxCloneSyncServiceLayer::new(layer); self.config.layers.push(layer); self } /// Adds a new Tower [`Layer`](https://docs.rs/tower/latest/tower/trait.Layer.html) to the /// base connector [`Service`](https://docs.rs/tower/latest/tower/trait.Service.html) which /// is responsible for connection establishment.a /// /// Each subsequent invocation of this function will wrap previous layers. /// /// If configured, the `connect_timeout` will be the outermost layer. /// /// Example usage: /// ``` /// use std::time::Duration; /// /// let client = wreq::Client::builder() /// // resolved to outermost layer, meaning while we are waiting on concurrency limit /// .connect_timeout(Duration::from_millis(200)) /// // underneath the concurrency check, so only after concurrency limit lets us through /// .connector_layer(tower::timeout::TimeoutLayer::new(Duration::from_millis(50))) /// .connector_layer(tower::limit::concurrency::ConcurrencyLimitLayer::new(2)) /// .build() /// .unwrap(); /// ``` #[inline] pub fn connector_layer(mut self, layer: L) -> ClientBuilder where L: Layer + Clone + Send + Sync + 'static, L::Service: Service + Clone + Send + Sync + 'static, >::Future: Send + 'static, { let layer = BoxCloneSyncServiceLayer::new(layer); self.config.connector_layers.push(layer); self } // TLS/HTTP2 emulation options /// Configures the client builder to emulation the specified HTTP context. /// /// This method sets the necessary headers, HTTP/1 and HTTP/2 options configurations, and TLS /// options config to use the specified HTTP context. It allows the client to mimic the /// behavior of different versions or setups, which can be useful for testing or ensuring /// compatibility with various environments. /// /// # Note /// This will overwrite the existing configuration. /// You must set emulation before you can perform subsequent HTTP1/HTTP2/TLS fine-tuning. #[inline] pub fn emulation(self, emulation: T) -> ClientBuilder { let emulation = emulation.into_emulation(); self.tls_options(emulation.tls_options) .http1_options(emulation.http1_options) .http2_options(emulation.http2_options) .default_headers(emulation.headers) .orig_headers(emulation.orig_headers) } } ================================================ FILE: src/config.rs ================================================ //! The `config` module provides a generic mechanism for loading and managing //! request-scoped configuration. //! //! # Design Overview //! //! This module is centered around two abstractions: //! //! - The [`RequestConfigValue`] trait, used to associate a config key type with its value type. //! - The [`RequestConfig`] struct, which wraps an optional value of the type linked via //! [`RequestConfigValue`]. //! //! Under the hood, the [`RequestConfig`] struct holds a single value for the associated config //! type. This value can be conveniently accessed, inserted, or mutated using [`http::Extensions`], //! enabling type-safe configuration storage and retrieval on a per-request basis. //! //! # Motivation //! //! The key design benefit is the ability to store multiple config types—potentially even with the //! same value type (e.g., [`std::time::Duration`])—without code duplication or ambiguity. By //! leveraging trait association, each config key is distinct at the type level, while code for //! storage and access remains totally generic. //! //! # Usage //! //! Implement [`RequestConfigValue`] for any marker type you wish to use as a config key, //! specifying the associated value type. Then use [`RequestConfig`] in [`Extensions`] //! to set or retrieve config values for each key type in a uniform way. use http::Extensions; /// Associate a marker key type with its associated value type stored in [`http::Extensions`]. /// Implement this trait for unit/marker types to declare the concrete `Value` used for that key. pub(crate) trait RequestConfigValue: Clone + 'static { type Value: Clone + Send + Sync + 'static; } /// Typed wrapper that holds an optional configuration value for a given marker key `T`. /// Instances of [`RequestConfig`] are intended to be inserted into [`http::Extensions`]. #[derive(Clone, Copy)] pub(crate) struct RequestConfig(Option); impl Default for RequestConfig { #[inline] fn default() -> Self { RequestConfig(None) } } impl RequestConfig where T: RequestConfigValue, { /// Creates a new `RequestConfig` with the provided value. #[inline] pub(crate) const fn new(v: Option) -> Self { RequestConfig(v) } /// Returns a reference to the inner value of this request-scoped configuration. #[inline] pub(crate) const fn as_ref(&self) -> Option<&T::Value> { self.0.as_ref() } /// Retrieve the value from the request-scoped configuration. /// /// If the request specifies a value, use that value; otherwise, attempt to retrieve it from the /// current instance (typically a client instance). #[inline] pub(crate) fn fetch<'a>(&'a self, ext: &'a Extensions) -> Option<&'a T::Value> { ext.get::>() .and_then(Self::as_ref) .or(self.as_ref()) } /// Stores this value into the given [`http::Extensions`], if a value of the same type is not /// already present. /// /// This method checks whether the provided [`http::Extensions`] contains a /// [`RequestConfig`]. If not, it clones the current value and inserts it into the /// extensions. If a value already exists, the method does nothing. #[inline] pub(crate) fn store<'a>(&'a self, ext: &'a mut Extensions) -> &'a mut Option { &mut ext.get_or_insert_with(|| self.clone()).0 } /// Loads the internal value from the provided [`http::Extensions`], if present. /// /// This method attempts to remove a value of type [`RequestConfig`] from the provided /// [`http::Extensions`]. If such a value exists, the current internal value is replaced with /// the removed value. If not, the internal value remains unchanged. #[inline] pub(crate) fn load(&mut self, ext: &mut Extensions) -> Option<&T::Value> { if let Some(value) = RequestConfig::::remove(ext) { self.0.replace(value); } self.as_ref() } /// Returns an immutable reference to the stored value from the given [`http::Extensions`], if /// present. /// /// Internally fetches [`RequestConfig`] and returns a reference to its inner value, if set. #[inline] pub(crate) fn get(ext: &Extensions) -> Option<&T::Value> { ext.get::>()?.0.as_ref() } /// Returns a mutable reference to the inner value in [`http::Extensions`], inserting a default /// if missing. /// /// This ensures a [`RequestConfig`] exists and returns a mutable reference to its inner /// `Option`. #[inline] pub(crate) fn get_mut(ext: &mut Extensions) -> &mut Option { &mut ext.get_or_insert_default::>().0 } /// Removes and returns the stored value from the given [`http::Extensions`], if present. /// /// This consumes the [`RequestConfig`] entry and extracts its inner value. #[inline] pub(crate) fn remove(ext: &mut Extensions) -> Option { ext.remove::>()?.0 } } /// Implements [`RequestConfigValue`] for a given type. macro_rules! impl_request_config_value { ($type:ty) => { impl crate::config::RequestConfigValue for $type { type Value = Self; } }; ($type:ty, $value:ty) => { impl crate::config::RequestConfigValue for $type { type Value = $value; } }; } ================================================ FILE: src/cookie.rs ================================================ //! HTTP Cookies use std::{collections::HashMap, convert::TryInto, fmt, sync::Arc, time::SystemTime}; use bytes::Bytes; use cookie::{Cookie as RawCookie, CookieJar, Expiration, SameSite, time::Duration}; use http::{Uri, Version}; use crate::{IntoUri, error::Error, ext::UriExt, header::HeaderValue, sync::RwLock}; /// Cookie header values in two forms. #[derive(Debug, Clone)] #[non_exhaustive] pub enum Cookies { /// All cookies combined into one header (compressed). Compressed(HeaderValue), /// Each cookie sent as its own header (uncompressed). Uncompressed(Vec), /// No cookies. Empty, } /// Actions for a persistent cookie store providing session support. pub trait CookieStore: Send + Sync { /// Store a set of Set-Cookie header values received from `uri` fn set_cookies(&self, cookie_headers: &mut dyn Iterator, uri: &Uri); /// Returns cookies for the given URI and HTTP version. /// /// Following [RFC 9112 §5.6.3], HTTP/1.1 combines all cookies into a single header. /// For [HTTP/2] and above, cookies are sent as separate header fields /// as per [RFC 9113 §8.1.2.5]. /// /// [RFC 9112 §5.6.3]: https://www.rfc-editor.org/rfc/rfc9112#section-5.6.3 /// [RFC 9113 §8.1.2.5]: https://www.rfc-editor.org/rfc/rfc9113#section-8.1.2.5 /// [HTTP/2]: https://datatracker.ietf.org/doc/html/rfc9113 fn cookies(&self, uri: &Uri, version: Version) -> Cookies; } impl_into_shared!( /// Trait for converting types into a shared cookie store ([`Arc`]). /// /// Implemented for any [`CookieStore`] type, [`Arc`] where `T: CookieStore`, and [`Arc`]. Enables ergonomic conversion to a trait object for use in APIs without manual /// boxing. pub trait IntoCookieStore => CookieStore ); impl_request_config_value!(Arc); /// Trait for converting types into an owned cookie ([`Cookie<'static>`]). pub trait IntoCookie { /// Converts the implementor into a optional owned [`Cookie<'static>`]. fn into_cookie(self) -> Option>; } /// A single HTTP cookie. #[derive(Debug, Clone)] pub struct Cookie<'a>(RawCookie<'a>); /// A good default `CookieStore` implementation. /// /// This is the implementation used when simply calling `cookie_store(true)`. /// This type is exposed to allow creating one and filling it with some /// existing cookies more easily, before creating a [`crate::Client`]. #[derive(Debug, Default)] pub struct Jar(RwLock>>); // ===== impl IntoCookie ===== impl IntoCookie for Cookie<'_> { #[inline] fn into_cookie(self) -> Option> { Some(self.into_owned()) } } impl IntoCookie for RawCookie<'_> { #[inline] fn into_cookie(self) -> Option> { Some(Cookie(self.into_owned())) } } impl IntoCookie for &str { #[inline] fn into_cookie(self) -> Option> { RawCookie::parse(self).map(|c| Cookie(c.into_owned())).ok() } } // ===== impl Cookie ===== impl<'a> Cookie<'a> { pub(crate) fn parse(value: &'a HeaderValue) -> crate::Result> { std::str::from_utf8(value.as_bytes()) .map_err(cookie::ParseError::from) .and_then(cookie::Cookie::parse) .map_err(Error::decode) .map(Cookie) } /// The name of the cookie. #[inline] pub fn name(&self) -> &str { self.0.name() } /// The value of the cookie. #[inline] pub fn value(&self) -> &str { self.0.value() } /// Returns true if the 'HttpOnly' directive is enabled. #[inline] pub fn http_only(&self) -> bool { self.0.http_only().unwrap_or(false) } /// Returns true if the 'Secure' directive is enabled. #[inline] pub fn secure(&self) -> bool { self.0.secure().unwrap_or(false) } /// Returns true if 'SameSite' directive is 'Lax'. #[inline] pub fn same_site_lax(&self) -> bool { self.0.same_site() == Some(SameSite::Lax) } /// Returns true if 'SameSite' directive is 'Strict'. #[inline] pub fn same_site_strict(&self) -> bool { self.0.same_site() == Some(SameSite::Strict) } /// Returns the path directive of the cookie, if set. #[inline] pub fn path(&self) -> Option<&str> { self.0.path() } /// Returns the domain directive of the cookie, if set. #[inline] pub fn domain(&self) -> Option<&str> { self.0.domain() } /// Get the Max-Age information. #[inline] pub fn max_age(&self) -> Option { self.0.max_age().and_then(|d| d.try_into().ok()) } /// The cookie expiration time. #[inline] pub fn expires(&self) -> Option { match self.0.expires() { Some(Expiration::DateTime(offset)) => Some(SystemTime::from(offset)), None | Some(Expiration::Session) => None, } } /// Converts `self` into a `Cookie` with a static lifetime with as few /// allocations as possible. #[inline] pub fn into_owned(self) -> Cookie<'static> { Cookie(self.0.into_owned()) } } impl fmt::Display for Cookie<'_> { #[inline] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.0.fmt(f) } } impl<'c> From> for Cookie<'c> { #[inline] fn from(cookie: RawCookie<'c>) -> Cookie<'c> { Cookie(cookie) } } impl<'c> From> for RawCookie<'c> { #[inline] fn from(cookie: Cookie<'c>) -> RawCookie<'c> { cookie.0 } } // ===== impl Jar ===== macro_rules! into_uri { ($expr:expr) => { match $expr.into_uri() { Ok(u) => u, Err(_) => return, } }; } impl Jar { /// Get a cookie by name for a given Uri. /// /// Returns the cookie with the specified name for the domain and path /// derived from the given Uri, if it exists. /// /// # Example /// ``` /// use wreq::cookie::Jar; /// let jar = Jar::default(); /// jar.add("foo=bar; Path=/foo; Domain=example.com", "http://example.com/foo"); /// let cookie = jar.get("foo", "http://example.com/foo").unwrap(); /// assert_eq!(cookie.value(), "bar"); /// ``` pub fn get(&self, name: &str, uri: U) -> Option> { let uri = uri.into_uri().ok()?; let host = normalize_domain(uri.host()?); let cookie = self .0 .read() .get(host)? .get(uri.path())? .get(name)? .clone() .into_owned(); Some(Cookie(cookie)) } /// Get all cookies in this jar. /// /// Returns an iterator over all cookies currently stored in the jar, /// regardless of domain or path. /// /// # Example /// ``` /// use wreq::cookie::Jar; /// let jar = Jar::default(); /// jar.add("foo=bar; Domain=example.com", "http://example.com"); /// for cookie in jar.get_all() { /// println!("{}={}", cookie.name(), cookie.value()); /// } /// ``` pub fn get_all(&self) -> impl Iterator> { self.0 .read() .iter() .flat_map(|(domain, path_map)| { path_map.iter().flat_map(|(path, name_map)| { name_map.iter().map(|cookie| { let mut cookie = cookie.clone().into_owned(); if cookie.domain().is_none() { cookie.set_domain(domain.to_owned()); } if cookie.path().is_none() { cookie.set_path(path.to_owned()); } Cookie(cookie) }) }) }) .collect::>() .into_iter() } /// Add a cookie to this jar. /// /// # Example /// /// ``` /// use wreq::cookie::Jar; /// use cookie::CookieBuilder; /// let jar = Jar::default(); /// let cookie = CookieBuilder::new("foo", "bar") /// .domain("example.com") /// .path("/") /// .build(); /// jar.add(cookie, "http://example.com"); /// /// let cookie = CookieBuilder::new("foo", "bar") /// .domain("example.com") /// .path("/") /// .build(); /// jar.add(cookie, "http://example.com"); /// ``` pub fn add(&self, cookie: C, uri: U) where C: IntoCookie, U: IntoUri, { if let Some(cookie) = cookie.into_cookie() { let uri = into_uri!(uri); let mut cookie: RawCookie<'static> = cookie.into(); // If the request-uri contains no host component: let Some(host) = uri.host() else { return; }; // If the canonicalized request-host does not domain-match the // domain-attribute: // Ignore the cookie entirely and abort these steps. // // RFC 6265 §5.3 + §5.1.3: // https://datatracker.ietf.org/doc/html/rfc6265#section-5.3 // https://datatracker.ietf.org/doc/html/rfc6265#section-5.1.3 let domain = if let Some(domain) = cookie.domain() { let domain = normalize_domain(domain); if domain.is_empty() || !domain_match(normalize_domain(host), domain) { return; } domain } else { normalize_domain(host) }; // If the request-uri contains no path component or if the first character of the // path component of the request-uri is not a %x2F ("/") OR if the cookie's path- // attribute is missing or does not start with a %x2F ("/"): // Let cookie-path be the default-path of the request-uri. // Otherwise: // Let cookie-path be the substring of the request-uri's path from the first // character up to, not including, the right-most %x2F ("/"). // // RFC 6265 §5.2.4 + §5.1.4: // https://datatracker.ietf.org/doc/html/rfc6265#section-5.2.4 // https://datatracker.ietf.org/doc/html/rfc6265#section-5.1.4 let path = cookie .path() .filter(|path| path.starts_with(DEFAULT_PATH)) .unwrap_or_else(|| normalize_path(uri.path())); let mut inner = self.0.write(); let name_map = inner .entry(domain.to_owned()) .or_default() .entry(path.to_owned()) .or_default(); // RFC 6265: If Max-Age=0 or Expires in the past, remove the cookie let expired = cookie .expires_datetime() .is_some_and(|dt| dt <= SystemTime::now()) || cookie.max_age().is_some_and(Duration::is_zero); if expired { name_map.remove(cookie); } else { cookie.set_path(path.to_owned()); name_map.add(cookie); } } } /// Remove a cookie by name for a given Uri. /// /// Removes the cookie with the specified name for the domain and path /// derived from the given Uri, if it exists. /// /// # Example /// ``` /// use wreq::cookie::Jar; /// let jar = Jar::default(); /// jar.add("foo=bar; Path=/foo; Domain=example.com", "http://example.com/foo"); /// assert!(jar.get("foo", "http://example.com/foo").is_some()); /// jar.remove("foo", "http://example.com/foo"); /// assert!(jar.get("foo", "http://example.com/foo").is_none()); /// ``` pub fn remove(&self, cookie: C, uri: U) where C: Into>, U: IntoUri, { let uri = into_uri!(uri); if let Some(host) = uri.host() { let host = normalize_domain(host); let mut inner = self.0.write(); if let Some(path_map) = inner.get_mut(host) { if let Some(name_map) = path_map.get_mut(uri.path()) { name_map.remove(cookie.into()); } } } } /// Clear all cookies from this jar. /// /// Removes all cookies from the jar, leaving it empty. /// /// # Example /// ``` /// use wreq::cookie::Jar; /// let jar = Jar::default(); /// jar.add("foo=bar; Domain=example.com", "http://example.com"); /// assert_eq!(jar.get_all().count(), 1); /// jar.clear(); /// assert_eq!(jar.get_all().count(), 0); /// ``` pub fn clear(&self) { self.0.write().clear(); } } impl CookieStore for Jar { fn set_cookies(&self, cookie_headers: &mut dyn Iterator, uri: &Uri) { let cookies = cookie_headers .map(Cookie::parse) .filter_map(Result::ok) .map(|cookie| cookie.0.into_owned()); for cookie in cookies { self.add(cookie, uri); } } fn cookies(&self, uri: &Uri, version: Version) -> Cookies { let host = match uri.host() { Some(h) => normalize_domain(h), None => return Cookies::Empty, }; let store = self.0.read(); let iter = store .iter() .filter(|(domain, _)| domain_match(host, domain)) .flat_map(|(_, path_map)| { path_map .iter() .filter(|(path, _)| path_match(uri.path(), path)) .flat_map(|(_, name_map)| { name_map.iter().filter(|cookie| { if cookie.secure() == Some(true) && uri.is_http() { return false; } if cookie .expires_datetime() .is_some_and(|dt| dt <= SystemTime::now()) { return false; } true }) }) }); if matches!(version, Version::HTTP_2 | Version::HTTP_3) { let cookies = iter .map(|cookie| { let name = cookie.name(); let value = cookie.value(); let mut cookie_str = String::with_capacity(name.len() + 1 + value.len()); cookie_str.push_str(name); cookie_str.push('='); cookie_str.push_str(value); HeaderValue::from_maybe_shared(Bytes::from(cookie_str)) }) .filter_map(Result::ok) .collect(); Cookies::Uncompressed(cookies) } else { let cookies = iter.fold(String::new(), |mut cookies, cookie| { if !cookies.is_empty() { cookies.push_str("; "); } cookies.push_str(cookie.name()); cookies.push('='); cookies.push_str(cookie.value()); cookies }); if cookies.is_empty() { return Cookies::Empty; } HeaderValue::from_maybe_shared(Bytes::from(cookies)) .map(Cookies::Compressed) .unwrap_or(Cookies::Empty) } } } const DEFAULT_PATH: &str = "/"; /// Determines if the given `host` matches the cookie `domain` according to /// [RFC 6265 section 5.1.3](https://datatracker.ietf.org/doc/html/rfc6265#section-5.1.3). /// /// - Returns true if the host and domain are identical. /// - Returns true if the host is a subdomain of the domain (host ends with ".domain"). /// - Returns false otherwise. fn domain_match(host: &str, domain: &str) -> bool { if domain.is_empty() { return false; } if host == domain { return true; } host.len() > domain.len() && host.as_bytes()[host.len() - domain.len() - 1] == b'.' && host.ends_with(domain) } /// Determines if the request path matches the cookie path according to /// [RFC 6265 section 5.1.4](https://datatracker.ietf.org/doc/html/rfc6265#section-5.1.4). /// /// - Returns true if the request path and cookie path are identical. /// - Returns true if the request path starts with the cookie path, and /// - the cookie path ends with '/', or /// - the next character in the request path after the cookie path is '/'. /// - Returns false otherwise. fn path_match(req_path: &str, cookie_path: &str) -> bool { req_path == cookie_path || req_path.starts_with(cookie_path) && (cookie_path.ends_with(DEFAULT_PATH) || req_path[cookie_path.len()..].starts_with(DEFAULT_PATH)) } /// Normalizes a domain by stripping any port information. /// /// According to [RFC 6265 section 5.2.3](https://datatracker.ietf.org/doc/html/rfc6265#section-5.2.3), /// the domain attribute of a cookie must not include a port. If a port is present (non-standard), /// it will be ignored for domain matching purposes. fn normalize_domain(domain: &str) -> &str { let host_without_port = domain.split(':').next().unwrap_or(domain); let without_leading = host_without_port .strip_prefix(".") .unwrap_or(host_without_port); without_leading.strip_suffix(".").unwrap_or(without_leading) } /// Computes the normalized default path for a cookie as specified in /// [RFC 6265 section 5.1.4](https://datatracker.ietf.org/doc/html/rfc6265#section-5.1.4). /// /// This function normalizes the path for a cookie, ensuring it matches /// browser and server expectations for default cookie scope. fn normalize_path(path: &str) -> &str { if !path.starts_with(DEFAULT_PATH) { return DEFAULT_PATH; } if let Some(pos) = path.rfind(DEFAULT_PATH) { if pos == 0 { return DEFAULT_PATH; } return &path[..pos]; } DEFAULT_PATH } #[cfg(test)] mod tests { use http::{Uri, Version}; use super::{CookieStore, Cookies, Jar}; #[test] fn jar_get_all_backfills_domain_and_path() { let jar = Jar::default(); jar.add("session=abc", "http://example.com/foo/bar"); let cookies = jar.get_all().collect::>(); assert_eq!(cookies.len(), 1); let cookie = &cookies[0]; assert_eq!(cookie.name(), "session"); assert_eq!(cookie.value(), "abc"); assert_eq!(cookie.domain(), Some("example.com")); assert_eq!(cookie.path(), Some("/foo")); } #[test] fn jar_get_all_keeps_existing_domain_and_path() { let jar = Jar::default(); jar.add( "session=abc; Domain=example.com; Path=/custom", "http://example.com/foo/bar", ); let cookies = jar.get_all().collect::>(); assert_eq!(cookies.len(), 1); let cookie = &cookies[0]; assert_eq!(cookie.name(), "session"); assert_eq!(cookie.value(), "abc"); assert_eq!(cookie.domain(), Some("example.com")); assert_eq!(cookie.path(), Some("/custom")); } #[test] fn jar_get_all_backfills_only_missing_field() { let jar = Jar::default(); jar.add("a=1; Domain=example.com", "http://example.com/foo/bar"); jar.add("b=2; Path=/fixed", "http://example.com/foo/bar"); let mut cookies = jar.get_all().collect::>(); cookies.sort_by(|left, right| left.name().cmp(right.name())); let a = &cookies[0]; assert_eq!(a.name(), "a"); assert_eq!(a.domain(), Some("example.com")); assert_eq!(a.path(), Some("/foo")); let b = &cookies[1]; assert_eq!(b.name(), "b"); assert_eq!(b.domain(), Some("example.com")); assert_eq!(b.path(), Some("/fixed")); } #[test] fn jar_add_rejects_mismatched_domain() { let jar = Jar::default(); jar.add("session=abc; Domain=other.com", "http://example.com/foo"); assert_eq!(jar.get_all().count(), 0); } #[test] fn jar_add_accepts_matching_parent_domain() { let jar = Jar::default(); jar.add( "session=abc; Domain=example.com", "http://api.example.com/foo", ); let cookies = jar.get_all().collect::>(); assert_eq!(cookies.len(), 1); assert_eq!(cookies[0].domain(), Some("example.com")); } #[test] fn jar_get_all_export_import_keeps_effective_path() { let source = Jar::default(); source.add("session=abc", "http://example.com/foo/bar"); let exported = source.get_all().collect::>(); assert_eq!(exported.len(), 1); assert_eq!(exported[0].path(), Some("/foo")); let target = Jar::default(); for cookie in exported { target.add(cookie, "http://example.com/another/deeper"); } let imported = target.get_all().collect::>(); assert_eq!(imported.len(), 1); assert_eq!(imported[0].path(), Some("/foo")); } #[test] fn cookie_store_invalid_explicit_path_falls_back_to_default_path() { let jar = Jar::default(); jar.add("key=val; Path=noslash", "http://example.com/foo/bar"); assert!(jar.get("key", "http://example.com/foo").is_some()); assert!(jar.get("key", "http://example.com/noslash").is_none()); let cookies = jar.get_all().collect::>(); assert_eq!(cookies.len(), 1); assert_eq!(cookies[0].path(), Some("/foo")); } #[test] fn jar_sends_parent_domain_cookie_to_subdomain() { let jar = Jar::default(); jar.add( "session=abc; Domain=example.com; Path=/", "http://example.com/login", ); let should_receive = [ "http://example.com/dashboard", "http://api.example.com/dashboard", "http://sub.api.example.com/dashboard", ]; for uri_str in &should_receive { let uri = Uri::from_static(uri_str); match jar.cookies(&uri, Version::HTTP_11) { Cookies::Compressed(v) => assert_eq!( v.to_str().unwrap(), "session=abc", "expected cookie to be sent to {uri_str}" ), other => panic!("expected Compressed cookie for {uri_str}, got {other:?}"), } } let should_not_receive = [ "http://notexample.com/dashboard", "http://fakeexample.com/dashboard", ]; for uri_str in &should_not_receive { let uri = Uri::from_static(uri_str); assert!( matches!(jar.cookies(&uri, Version::HTTP_11), Cookies::Empty), "cookie must NOT be sent to {uri_str}" ); } } #[test] fn jar_subdomain_cookie_does_not_leak_to_parent_or_sibling() { let jar = Jar::default(); jar.add( "token=xyz; Domain=api.example.com; Path=/", "http://api.example.com/", ); let uri = Uri::from_static("http://api.example.com/"); assert!( matches!(jar.cookies(&uri, Version::HTTP_11), Cookies::Compressed(_)), "cookie must be sent to api.example.com" ); let must_not_receive = [ "http://example.com/", "http://other.example.com/", "http://notapi.example.com/", ]; for uri_str in &must_not_receive { let uri = Uri::from_static(uri_str); assert!( matches!(jar.cookies(&uri, Version::HTTP_11), Cookies::Empty), "cookie must NOT leak to {uri_str}" ); } } } ================================================ FILE: src/dns/gai.rs ================================================ use std::{ future::Future, io, net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, ToSocketAddrs}, pin::Pin, task::{self, Poll}, vec, }; use tokio::task::JoinHandle; use tower::Service; use super::{Addrs, Name, Resolve, Resolving}; /// A resolver using blocking `getaddrinfo` calls in a threadpool. #[derive(Clone, Default)] pub struct GaiResolver { _priv: (), } /// An iterator of IP addresses returned from `getaddrinfo`. pub struct GaiAddrs { inner: SocketAddrs, } /// A future to resolve a name returned by `GaiResolver`. pub struct GaiFuture { inner: JoinHandle>, } /// A wrapper around `SocketAddrs` to implement the `Iterator` trait. pub(crate) struct SocketAddrs { iter: vec::IntoIter, } // ==== impl GaiResolver ==== impl GaiResolver { /// Creates a new [`GaiResolver`]. pub fn new() -> Self { GaiResolver { _priv: () } } } impl Service for GaiResolver { type Response = GaiAddrs; type Error = io::Error; type Future = GaiFuture; fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll> { Poll::Ready(Ok(())) } fn call(&mut self, name: Name) -> Self::Future { let blocking = tokio::task::spawn_blocking(move || { debug!("resolving {}", name); (name.as_str(), 0) .to_socket_addrs() .map(|i| SocketAddrs { iter: i }) }); GaiFuture { inner: blocking } } } impl Resolve for GaiResolver { fn resolve(&self, name: Name) -> Resolving { let mut this = self.clone(); Box::pin(async move { this.call(name) .await .map(|addrs| Box::new(addrs) as Addrs) .map_err(Into::into) }) } } // ==== impl GaiFuture ==== impl Future for GaiFuture { type Output = Result; fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { Pin::new(&mut self.inner).poll(cx).map(|res| match res { Ok(Ok(addrs)) => Ok(GaiAddrs { inner: addrs }), Ok(Err(err)) => Err(err), Err(join_err) => { if join_err.is_cancelled() { Err(io::Error::new(io::ErrorKind::Interrupted, join_err)) } else { panic!("gai background task failed: {join_err:?}") } } }) } } impl Drop for GaiFuture { fn drop(&mut self) { self.inner.abort(); } } // ==== impl GaiAddrs ==== impl Iterator for GaiAddrs { type Item = SocketAddr; fn next(&mut self) -> Option { self.inner.next() } } // ==== impl SocketAddrs ==== impl SocketAddrs { pub(crate) fn new(addrs: Vec) -> Self { SocketAddrs { iter: addrs.into_iter(), } } pub(crate) fn try_parse(host: &str, port: u16) -> Option { if let Ok(addr) = host.parse::() { let addr = SocketAddrV4::new(addr, port); return Some(SocketAddrs { iter: vec![SocketAddr::V4(addr)].into_iter(), }); } if let Ok(addr) = host.parse::() { let addr = SocketAddrV6::new(addr, port, 0, 0); return Some(SocketAddrs { iter: vec![SocketAddr::V6(addr)].into_iter(), }); } None } #[inline] fn filter(self, predicate: impl FnMut(&SocketAddr) -> bool) -> SocketAddrs { SocketAddrs::new(self.iter.filter(predicate).collect()) } pub(crate) fn split_by_preference( self, local_addr_ipv4: Option, local_addr_ipv6: Option, ) -> (SocketAddrs, SocketAddrs) { match (local_addr_ipv4, local_addr_ipv6) { (Some(_), None) => (self.filter(SocketAddr::is_ipv4), SocketAddrs::new(vec![])), (None, Some(_)) => (self.filter(SocketAddr::is_ipv6), SocketAddrs::new(vec![])), _ => { let preferring_v6 = self .iter .as_slice() .first() .map(SocketAddr::is_ipv6) .unwrap_or(false); let (preferred, fallback) = self .iter .partition::, _>(|addr| addr.is_ipv6() == preferring_v6); (SocketAddrs::new(preferred), SocketAddrs::new(fallback)) } } } pub(crate) fn is_empty(&self) -> bool { self.iter.as_slice().is_empty() } pub(crate) fn len(&self) -> usize { self.iter.as_slice().len() } } impl Iterator for SocketAddrs { type Item = SocketAddr; #[inline] fn next(&mut self) -> Option { self.iter.next() } } #[cfg(test)] mod tests { use std::net::{Ipv4Addr, Ipv6Addr}; use super::*; #[test] fn test_ip_addrs_split_by_preference() { let ip_v4 = Ipv4Addr::new(127, 0, 0, 1); let ip_v6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); let v4_addr = (ip_v4, 80).into(); let v6_addr = (ip_v6, 80).into(); let (mut preferred, mut fallback) = SocketAddrs { iter: vec![v4_addr, v6_addr].into_iter(), } .split_by_preference(None, None); assert!(preferred.next().unwrap().is_ipv4()); assert!(fallback.next().unwrap().is_ipv6()); let (mut preferred, mut fallback) = SocketAddrs { iter: vec![v6_addr, v4_addr].into_iter(), } .split_by_preference(None, None); assert!(preferred.next().unwrap().is_ipv6()); assert!(fallback.next().unwrap().is_ipv4()); let (mut preferred, mut fallback) = SocketAddrs { iter: vec![v4_addr, v6_addr].into_iter(), } .split_by_preference(Some(ip_v4), Some(ip_v6)); assert!(preferred.next().unwrap().is_ipv4()); assert!(fallback.next().unwrap().is_ipv6()); let (mut preferred, mut fallback) = SocketAddrs { iter: vec![v6_addr, v4_addr].into_iter(), } .split_by_preference(Some(ip_v4), Some(ip_v6)); assert!(preferred.next().unwrap().is_ipv6()); assert!(fallback.next().unwrap().is_ipv4()); let (mut preferred, fallback) = SocketAddrs { iter: vec![v4_addr, v6_addr].into_iter(), } .split_by_preference(Some(ip_v4), None); assert!(preferred.next().unwrap().is_ipv4()); assert!(fallback.is_empty()); let (mut preferred, fallback) = SocketAddrs { iter: vec![v4_addr, v6_addr].into_iter(), } .split_by_preference(None, Some(ip_v6)); assert!(preferred.next().unwrap().is_ipv6()); assert!(fallback.is_empty()); } #[test] fn test_name_from_str() { const DOMAIN: &str = "test.example.com"; let name = Name::from(DOMAIN); assert_eq!(name.as_str(), DOMAIN); assert_eq!(name.to_string(), DOMAIN); } } ================================================ FILE: src/dns/hickory.rs ================================================ //! DNS resolution via the [hickory-resolver](https://github.com/hickory-dns/hickory-dns) crate use std::{net::SocketAddr, sync::LazyLock}; use hickory_resolver::{ TokioResolver, config::{self, LookupIpStrategy, ResolverConfig}, net::runtime::TokioRuntimeProvider, }; use super::{Addrs, Name, Resolve, Resolving}; /// Wrapper around an [`TokioResolver`], which implements the `Resolve` trait. #[derive(Debug, Clone)] pub struct HickoryDnsResolver { /// Shared, lazily-initialized Tokio-based DNS resolver. /// /// Backed by [`LazyLock`] to guarantee thread-safe, one-time creation. /// On initialization, it attempts to load the system's DNS configuration; /// if unavailable, it falls back to sensible default settings. resolver: &'static LazyLock, } impl HickoryDnsResolver { /// Create a new resolver with the default configuration, /// which reads from `/etc/resolve.conf`. The options are /// overridden to look up both IPv4 and IPv6 addresses /// to support the "happy eyeballs" algorithm. /// /// SAFETY: `build` only fails if DNS-over-TLS is enabled and default TLS config creation fails. pub fn new() -> HickoryDnsResolver { static RESOLVER: LazyLock = LazyLock::new(|| { let mut builder = match TokioResolver::builder_tokio() { Ok(resolver) => { debug!("using system DNS configuration"); resolver } Err(_err) => { debug!("error reading DNS system conf: {}, using defaults", _err); TokioResolver::builder_with_config( ResolverConfig::udp_and_tcp(&config::GOOGLE), TokioRuntimeProvider::default(), ) } }; builder.options_mut().ip_strategy = LookupIpStrategy::Ipv4AndIpv6; builder.build().expect("failed to create DNS resolver") }); HickoryDnsResolver { resolver: &RESOLVER, } } } impl Resolve for HickoryDnsResolver { fn resolve(&self, name: Name) -> Resolving { let resolver = self.clone(); Box::pin(async move { let lookup = resolver.resolver.lookup_ip(name.as_str()).await?; let addrs: Addrs = Box::new( lookup .iter() .map(|ip_addr| SocketAddr::new(ip_addr, 0)) .collect::>() .into_iter(), ); Ok(addrs) }) } } ================================================ FILE: src/dns/resolve.rs ================================================ use std::{ borrow::Cow, collections::HashMap, fmt, future::Future, net::SocketAddr, pin::Pin, sync::Arc, task::{Context, Poll}, }; use tower::{BoxError, Service}; /// A domain name to resolve into IP addresses. #[derive(Clone, Hash, Eq, PartialEq)] pub struct Name { host: Box, } impl Name { /// Creates a new [`Name`] from a string slice. #[inline] pub fn new(host: Box) -> Name { Name { host } } /// View the hostname as a string slice. #[inline] pub fn as_str(&self) -> &str { &self.host } } impl From<&str> for Name { fn from(value: &str) -> Self { Name::new(value.into()) } } impl fmt::Debug for Name { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&self.host, f) } } impl fmt::Display for Name { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(&self.host, f) } } /// Alias for an `Iterator` trait object over `SocketAddr`. pub type Addrs = Box + Send>; /// Alias for the `Future` type returned by a DNS resolver. pub type Resolving = Pin> + Send>>; /// Trait for customizing DNS resolution in wreq. pub trait Resolve: Send + Sync { /// Performs DNS resolution on a `Name`. /// The return type is a future containing an iterator of `SocketAddr`. /// /// It differs from `tower::Service` in several ways: /// * It is assumed that `resolve` will always be ready to poll. /// * It does not need a mutable reference to `self`. /// * Since trait objects cannot make use of associated types, it requires wrapping the /// returned `Future` and its contained `Iterator` with `Box`. /// /// Explicitly specified port in the URI will override any port in the resolved `SocketAddr`s. /// Otherwise, port `0` will be replaced by the conventional port for the given scheme (e.g. 80 /// for http). fn resolve(&self, name: Name) -> Resolving; } impl_into_shared!( /// Trait for converting types into a shared DNS resolver ([`Arc`]). /// /// Implemented for any [`Resolve`] type, [`Arc`] where `T: Resolve`, and [`Arc`]. /// Enables ergonomic conversion to a trait object for use in APIs without manual Arc wrapping. pub trait IntoResolve => Resolve ); /// Adapter that wraps a [`Resolve`] trait object to work with Tower's `Service` trait. /// /// This allows custom DNS resolvers implementing `Resolve` to be used in contexts /// that expect a `Service` implementation. #[derive(Clone)] pub(crate) struct DynResolver { resolver: Arc, } impl DynResolver { /// Creates a new [`DynResolver`] with the provided resolver. pub(crate) fn new(resolver: Arc) -> Self { Self { resolver } } } impl Service for DynResolver { type Response = Addrs; type Error = BoxError; type Future = Resolving; fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } fn call(&mut self, name: Name) -> Self::Future { self.resolver.resolve(name) } } /// DNS resolver that supports hostname overrides. /// /// This resolver first checks for manual hostname-to-IP mappings before /// falling back to the underlying DNS resolver. Useful for testing or /// bypassing DNS for specific domains. pub(crate) struct DnsResolverWithOverrides { dns_resolver: Arc, overrides: Arc, Vec>>, } impl DnsResolverWithOverrides { /// Creates a new [`DnsResolverWithOverrides`] with the provided DNS resolver and overrides. pub(crate) fn new( dns_resolver: Arc, overrides: HashMap, Vec>, ) -> Self { DnsResolverWithOverrides { dns_resolver, overrides: Arc::new(overrides), } } } impl Resolve for DnsResolverWithOverrides { fn resolve(&self, name: Name) -> Resolving { match self.overrides.get(name.as_str()) { Some(dest) => { let addrs: Addrs = Box::new(dest.clone().into_iter()); Box::pin(std::future::ready(Ok(addrs))) } None => self.dns_resolver.resolve(name), } } } ================================================ FILE: src/dns.rs ================================================ //! DNS resolution pub(crate) mod gai; #[cfg(feature = "hickory-dns")] pub(crate) mod hickory; pub(crate) mod resolve; pub use resolve::{Addrs, IntoResolve, Name, Resolve, Resolving}; pub(crate) use self::{ gai::{GaiResolver, SocketAddrs}, resolve::{DnsResolverWithOverrides, DynResolver}, sealed::{InternalResolve, resolve}, }; mod sealed { use std::{ future::Future, net::SocketAddr, task::{self, Poll}, }; use tower::{BoxError, Service}; use super::Name; /// Internal adapter trait for DNS resolvers. /// /// This trait provides a unified interface for different resolver implementations, /// allowing both custom [`super::Resolve`] types and Tower [`Service`] implementations /// to be used interchangeably within the connector. pub trait InternalResolve { type Addrs: Iterator; type Error: Into; type Future: Future>; fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll>; fn resolve(&mut self, name: Name) -> Self::Future; } /// Automatic implementation for any Tower [`Service`] that resolves names to socket addresses. impl InternalResolve for S where S: Service, S::Response: Iterator, S::Error: Into, { type Addrs = S::Response; type Error = S::Error; type Future = S::Future; fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { Service::poll_ready(self, cx) } fn resolve(&mut self, name: Name) -> Self::Future { Service::call(self, name) } } pub async fn resolve(resolver: &mut R, name: Name) -> Result where R: InternalResolve, { std::future::poll_fn(|cx| resolver.poll_ready(cx)).await?; resolver.resolve(name).await } } ================================================ FILE: src/error.rs ================================================ use std::{error::Error as StdError, fmt, io}; use http::Uri; use crate::{StatusCode, client::http1::ext::ReasonPhrase, util::Escape}; /// A `Result` alias where the `Err` case is `wreq::Error`. pub type Result = std::result::Result; /// A boxed error type that can be used for dynamic error handling. pub type BoxError = Box; /// The Errors that may occur when processing a `Request`. /// /// Note: Errors may include the full URI used to make the `Request`. If the URI /// contains sensitive information (e.g. an API key as a query parameter), be /// sure to remove it ([`without_uri`](Error::without_uri)) pub struct Error { inner: Box, } struct Inner { kind: Kind, source: Option, uri: Option, } #[derive(Debug)] enum Kind { Builder, Request, Tls, Redirect, Status(StatusCode, Option), Body, Decode, Upgrade, #[cfg(feature = "ws")] WebSocket, } impl Error { fn new(kind: Kind, source: Option) -> Error where E: Into, { Error { inner: Box::new(Inner { kind, source: source.map(Into::into), uri: None, }), } } #[inline] pub(crate) fn builder>(e: E) -> Error { Error::new(Kind::Builder, Some(e)) } #[inline] pub(crate) fn body>(e: E) -> Error { Error::new(Kind::Body, Some(e)) } #[inline] pub(crate) fn tls>(e: E) -> Error { Error::new(Kind::Tls, Some(e)) } #[inline] pub(crate) fn decode>(e: E) -> Error { Error::new(Kind::Decode, Some(e)) } #[inline] pub(crate) fn request>(e: E) -> Error { Error::new(Kind::Request, Some(e)) } #[inline] pub(crate) fn redirect>(e: E, uri: Uri) -> Error { Error::new(Kind::Redirect, Some(e)).with_uri(uri) } #[inline] pub(crate) fn upgrade>(e: E) -> Error { Error::new(Kind::Upgrade, Some(e)) } #[inline] #[cfg(feature = "ws")] pub(crate) fn websocket>(e: E) -> Error { Error::new(Kind::WebSocket, Some(e)) } #[inline] pub(crate) fn status_code(uri: Uri, status: StatusCode, reason: Option) -> Error { Error::new(Kind::Status(status, reason), None::).with_uri(uri) } #[inline] pub(crate) fn uri_bad_scheme(uri: Uri) -> Error { Error::new(Kind::Builder, Some(BadScheme)).with_uri(uri) } } impl Error { /// Returns a possible URI related to this error. /// /// # Examples /// /// ``` /// # async fn run() { /// // displays last stop of a redirect loop /// let response = wreq::get("http://site.with.redirect.loop") /// .send() /// .await; /// if let Err(e) = response { /// if e.is_redirect() { /// if let Some(final_stop) = e.uri() { /// println!("redirect loop at {}", final_stop); /// } /// } /// } /// # } /// ``` #[inline] pub fn uri(&self) -> Option<&Uri> { self.inner.uri.as_ref() } /// Returns a mutable reference to the URI related to this error /// /// This is useful if you need to remove sensitive information from the URI /// (e.g. an API key in the query), but do not want to remove the URI /// entirely. #[inline] pub fn uri_mut(&mut self) -> Option<&mut Uri> { self.inner.uri.as_mut() } /// Add a uri related to this error (overwriting any existing) #[inline] pub fn with_uri(mut self, uri: Uri) -> Self { self.inner.uri = Some(uri); self } /// Strip the related uri from this error (if, for example, it contains /// sensitive information) #[inline] pub fn without_uri(mut self) -> Self { self.inner.uri = None; self } /// Returns true if the error is from a type Builder. #[inline] pub fn is_builder(&self) -> bool { matches!(self.inner.kind, Kind::Builder) } /// Returns true if the error is from a `RedirectPolicy`. #[inline] pub fn is_redirect(&self) -> bool { matches!(self.inner.kind, Kind::Redirect) } /// Returns true if the error is from `Response::error_for_status`. #[inline] pub fn is_status(&self) -> bool { matches!(self.inner.kind, Kind::Status(_, _)) } /// Returns true if the error is related to a timeout. pub fn is_timeout(&self) -> bool { let mut source = self.source(); while let Some(err) = source { if err.is::() { return true; } if let Some(core_err) = err.downcast_ref::() { if core_err.is_timeout() { return true; } } if let Some(io) = err.downcast_ref::() { if io.kind() == io::ErrorKind::TimedOut { return true; } } source = err.source(); } false } /// Returns true if the error is related to the request #[inline] pub fn is_request(&self) -> bool { matches!(self.inner.kind, Kind::Request) } /// Returns true if the error is related to connect pub fn is_connect(&self) -> bool { use crate::client::layer::client::Error; let mut source = self.source(); while let Some(err) = source { if let Some(err) = err.downcast_ref::() { if err.is_connect() { return true; } } source = err.source(); } false } /// Returns true if the error is related to proxy connect pub fn is_proxy_connect(&self) -> bool { use crate::client::layer::client::Error; let mut source = self.source(); while let Some(err) = source { if let Some(err) = err.downcast_ref::() { if err.is_proxy_connect() { return true; } } source = err.source(); } false } /// Returns true if the error is related to a connection reset. pub fn is_connection_reset(&self) -> bool { let mut source = self.source(); while let Some(err) = source { if let Some(io) = err.downcast_ref::() { if io.kind() == io::ErrorKind::ConnectionReset { return true; } } source = err.source(); } false } /// Returns true if the error is related to the request or response body #[inline] pub fn is_body(&self) -> bool { matches!(self.inner.kind, Kind::Body) } /// Returns true if the error is related to TLS #[inline] pub fn is_tls(&self) -> bool { matches!(self.inner.kind, Kind::Tls) } /// Returns true if the error is related to decoding the response's body #[inline] pub fn is_decode(&self) -> bool { matches!(self.inner.kind, Kind::Decode) } /// Returns true if the error is related to upgrading the connection #[inline] pub fn is_upgrade(&self) -> bool { matches!(self.inner.kind, Kind::Upgrade) } /// Returns true if the error is related to WebSocket operations #[inline] #[cfg(feature = "ws")] pub fn is_websocket(&self) -> bool { matches!(self.inner.kind, Kind::WebSocket) } /// Returns the status code, if the error was generated from a response. pub fn status(&self) -> Option { match self.inner.kind { Kind::Status(code, _) => Some(code), _ => None, } } } /// Maps external timeout errors (such as `tower::timeout::error::Elapsed`) /// to the internal `TimedOut` error type used for connector operations. /// Returns the original error if it is not a timeout. #[inline] pub(crate) fn map_timeout_to_connector_error(error: BoxError) -> BoxError { if error.is::() { Box::new(TimedOut) } else { error } } /// Maps external timeout errors (such as `tower::timeout::error::Elapsed`) /// to the internal request-level `Error` type. /// Returns the original error if it is not a timeout. #[inline] pub(crate) fn map_timeout_to_request_error(error: BoxError) -> BoxError { if error.is::() { Box::new(Error::request(TimedOut)) } else { error } } impl fmt::Debug for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut builder = f.debug_struct("wreq::Error"); builder.field("kind", &self.inner.kind); if let Some(ref uri) = self.inner.uri { builder.field("uri", uri); } if let Some(ref source) = self.inner.source { builder.field("source", source); } builder.finish() } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self.inner.kind { Kind::Builder => f.write_str("builder error")?, Kind::Request => f.write_str("error sending request")?, Kind::Body => f.write_str("request or response body error")?, Kind::Tls => f.write_str("tls error")?, Kind::Decode => f.write_str("error decoding response body")?, Kind::Redirect => f.write_str("error following redirect")?, Kind::Upgrade => f.write_str("error upgrading connection")?, #[cfg(feature = "ws")] Kind::WebSocket => f.write_str("websocket error")?, Kind::Status(ref code, ref reason) => { let prefix = if code.is_client_error() { "HTTP status client error" } else { debug_assert!(code.is_server_error()); "HTTP status server error" }; if let Some(reason) = reason { write!( f, "{prefix} ({} {})", code.as_str(), Escape::new(reason.as_ref()) )?; } else { write!(f, "{prefix} ({code})")?; } } }; if let Some(uri) = &self.inner.uri { write!(f, " for uri ({})", uri)?; } if let Some(e) = &self.inner.source { write!(f, ": {e}")?; } Ok(()) } } impl StdError for Error { #[inline] fn source(&self) -> Option<&(dyn StdError + 'static)> { self.inner.source.as_ref().map(|e| &**e as _) } } #[derive(Debug)] pub(crate) struct TimedOut; #[derive(Debug)] pub(crate) struct BadScheme; #[derive(Debug)] pub(crate) struct ProxyConnect(pub(crate) BoxError); // ==== impl TimedOut ==== impl StdError for TimedOut {} impl fmt::Display for TimedOut { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str("operation timed out") } } // ==== impl BadScheme ==== impl StdError for BadScheme {} impl fmt::Display for BadScheme { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str("URI scheme is not allowed") } } // ==== impl ProxyConnect ==== impl StdError for ProxyConnect { #[inline] fn source(&self) -> Option<&(dyn StdError + 'static)> { Some(&*self.0) } } impl fmt::Display for ProxyConnect { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "proxy connect error: {}", self.0) } } #[cfg(test)] mod tests { use super::*; fn assert_send() {} fn assert_sync() {} impl super::Error { fn into_io(self) -> io::Error { io::Error::other(self) } } fn decode_io(e: io::Error) -> Error { if e.get_ref().map(|r| r.is::()).unwrap_or(false) { *e.into_inner() .expect("io::Error::get_ref was Some(_)") .downcast::() .expect("StdError::is() was true") } else { Error::decode(e) } } #[test] fn test_source_chain() { let root = Error::new(Kind::Request, None::); assert!(root.source().is_none()); let link = Error::body(root); assert!(link.source().is_some()); assert_send::(); assert_sync::(); } #[test] fn mem_size_of() { use std::mem::size_of; assert_eq!(size_of::(), size_of::()); } #[test] fn roundtrip_io_error() { let orig = Error::request("orig"); // Convert wreq::Error into an io::Error... let io = orig.into_io(); // Convert that io::Error back into a wreq::Error... let err = decode_io(io); // It should have pulled out the original, not nested it... match err.inner.kind { Kind::Request => (), _ => panic!("{err:?}"), } } #[test] fn from_unknown_io_error() { let orig = io::Error::other("orly"); let err = decode_io(orig); match err.inner.kind { Kind::Decode => (), _ => panic!("{err:?}"), } } #[test] fn is_timeout() { let err = Error::request(super::TimedOut); assert!(err.is_timeout()); let io = io::Error::from(io::ErrorKind::TimedOut); let nested = Error::request(io); assert!(nested.is_timeout()); } #[test] fn is_connection_reset() { let err = Error::request(io::Error::new( io::ErrorKind::ConnectionReset, "connection reset", )); assert!(err.is_connection_reset()); let io = io::Error::other(err); let nested = Error::request(io); assert!(nested.is_connection_reset()); } } ================================================ FILE: src/ext.rs ================================================ //! Extension utilities. use bytes::Bytes; use http::uri::{Authority, Scheme, Uri}; use percent_encoding::{AsciiSet, CONTROLS}; use crate::Body; /// See: const FRAGMENT: &AsciiSet = &CONTROLS.add(b' ').add(b'"').add(b'<').add(b'>').add(b'`'); /// See: const PATH: &AsciiSet = &FRAGMENT.add(b'#').add(b'?').add(b'{').add(b'}'); /// See: const USERINFO: &AsciiSet = &PATH .add(b'/') .add(b':') .add(b';') .add(b'=') .add(b'@') .add(b'[') .add(b'\\') .add(b']') .add(b'^') .add(b'|'); macro_rules! impl_into_shared { ($(#[$meta:meta])* $vis:vis trait $name:ident => $target:path) => { $(#[$meta])* $vis trait $name { #[doc = concat!("Converts this type into a shared [`", stringify!($target), "`].")] fn into_shared(self) -> Arc; } impl $name for Arc { #[inline] fn into_shared(self) -> Arc { self } } impl $name for Arc { #[inline] fn into_shared(self) -> Arc { self } } impl $name for R { #[inline] fn into_shared(self) -> Arc { Arc::new(self) } } }; } /// Extension trait for http::Response objects /// /// Provides methods to extract URI information from HTTP responses pub trait ResponseExt { /// Returns a reference to the `Uri` associated with this response, if available. fn uri(&self) -> Option<&Uri>; } /// Extension trait for http::response::Builder objects /// /// Allows the user to add a `Uri` to the http::Response pub trait ResponseBuilderExt { /// A builder method for the `http::response::Builder` type that allows the user to add a `Uri` /// to the `http::Response` fn uri(self, uri: Uri) -> Self; } /// Extension type to store the request URI in a response's extensions. #[derive(Clone)] pub(crate) struct RequestUri(pub Uri); /// Extension trait for `Uri` helpers. pub(crate) trait UriExt { /// Returns true if the URI scheme is HTTP. fn is_http(&self) -> bool; /// Returns true if the URI scheme is HTTPS. fn is_https(&self) -> bool; /// Returns the port of the URI, or the default port for the scheme if none is specified. fn port_or_default(&self) -> u16; /// Sets the query component of the URI, replacing any existing query. #[cfg(feature = "query")] fn set_query(&mut self, query: String); /// Returns the username and password from the URI's userinfo, if present. fn userinfo(&self) -> (Option<&str>, Option<&str>); /// Sets the username and password in the URI's userinfo component. fn set_userinfo(&mut self, username: &str, password: Option<&str>); } // ===== impl ResponseExt ===== impl ResponseExt for http::Response { fn uri(&self) -> Option<&Uri> { self.extensions().get::().map(|r| &r.0) } } // ===== impl ResponseBuilderExt ===== impl ResponseBuilderExt for http::response::Builder { fn uri(self, uri: Uri) -> Self { self.extension(RequestUri(uri)) } } // ===== impl UriExt ===== impl UriExt for Uri { #[inline] fn is_http(&self) -> bool { self.scheme() == Some(&Scheme::HTTP) } #[inline] fn is_https(&self) -> bool { self.scheme() == Some(&Scheme::HTTPS) } fn port_or_default(&self) -> u16 { match Uri::port(self) { Some(p) => p.as_u16(), None if self.is_https() => 443u16, _ => 80u16, } } #[cfg(feature = "query")] fn set_query(&mut self, query: String) { use http::uri::PathAndQuery; if query.is_empty() { return; } let path = self.path(); let parts = match PathAndQuery::from_maybe_shared(Bytes::from(format!("{path}?{query}"))) { Ok(path_and_query) => { let mut parts = self.clone().into_parts(); parts.path_and_query.replace(path_and_query); parts } Err(_err) => { debug!("Failed to set query in URI: {_err}"); return; } }; if let Ok(uri) = Uri::from_parts(parts) { *self = uri; } } fn userinfo(&self) -> (Option<&str>, Option<&str>) { self.authority() .and_then(|auth| auth.as_str().rsplit_once('@')) .map_or((None, None), |(userinfo, _)| { match userinfo.split_once(':') { Some((u, p)) => ((!u.is_empty()).then_some(u), (!p.is_empty()).then_some(p)), None => (Some(userinfo), None), } }) } fn set_userinfo(&mut self, username: &str, password: Option<&str>) { let mut parts = self.clone().into_parts(); let authority = match self.authority() { Some(authority) => authority, None => return, }; let host_and_port = authority .as_str() .rsplit_once('@') .map(|(_, host)| host) .unwrap_or_else(|| authority.as_str()); let authority = match (username.is_empty(), password) { (true, None) => Bytes::from(host_and_port.to_owned()), (true, Some(password)) => { let pass = percent_encoding::utf8_percent_encode(password, USERINFO); Bytes::from(format!(":{pass}@{host_and_port}")) } (false, Some(password)) => { let username = percent_encoding::utf8_percent_encode(username, USERINFO); let password = percent_encoding::utf8_percent_encode(password, USERINFO); Bytes::from(format!("{username}:{password}@{host_and_port}")) } (false, None) => { let username = percent_encoding::utf8_percent_encode(username, USERINFO); Bytes::from(format!("{username}@{host_and_port}")) } }; match Authority::from_maybe_shared(authority) { Ok(authority) => { parts.authority.replace(authority); } Err(_err) => { debug!("Failed to set userinfo in URI: {_err}"); return; } }; if let Ok(uri) = Uri::from_parts(parts) { *self = uri; } } } #[cfg(test)] mod tests { use http::{Uri, response::Builder}; use super::{RequestUri, ResponseBuilderExt, ResponseExt, UriExt}; use crate::Body; #[test] fn test_uri_ext_is_https() { let https_uri: Uri = "https://example.com".parse().unwrap(); let http_uri: Uri = "http://example.com".parse().unwrap(); assert!(https_uri.is_https()); assert!(!http_uri.is_https()); assert!(http_uri.is_http()); assert!(!https_uri.is_http()); } #[test] fn test_userinfo_with_username_and_password() { let uri: Uri = "http://user:pass@example.com".parse().unwrap(); let (username, password) = uri.userinfo(); assert_eq!(username, Some("user")); assert_eq!(password, Some("pass")); } #[test] fn test_userinfo_with_empty_username() { let uri: Uri = "http://:pass@example.com".parse().unwrap(); let (username, password) = uri.userinfo(); assert_eq!(username, None); assert_eq!(password, Some("pass")); } #[test] fn test_userinfo_with_empty_password() { let uri: Uri = "http://user:@example.com".parse().unwrap(); let (username, password) = uri.userinfo(); assert_eq!(username, Some("user")); assert_eq!(password, None); let uri: Uri = "http://user@example.com".parse().unwrap(); let (username, password) = uri.userinfo(); assert_eq!(username, Some("user")); assert_eq!(password, None); } #[test] fn test_userinfo_without_colon() { let uri: Uri = "http://something@example.com".parse().unwrap(); let (username, password) = uri.userinfo(); assert_eq!(username, Some("something")); assert_eq!(password, None); } #[test] fn test_userinfo_without_at() { let uri: Uri = "http://example.com".parse().unwrap(); let (username, password) = uri.userinfo(); assert_eq!(username, None); assert_eq!(password, None); } #[test] fn test_set_userinfo_both() { let mut uri: Uri = "http://example.com/path".parse().unwrap(); uri.set_userinfo("user", Some("pass")); let (username, password) = uri.userinfo(); assert_eq!(username, Some("user")); assert_eq!(password, Some("pass")); assert_eq!(uri.to_string(), "http://user:pass@example.com/path"); } #[test] fn test_set_userinfo_empty_username() { let mut uri: Uri = "http://user:pass@example.com/path".parse().unwrap(); uri.set_userinfo("", Some("pass")); let (username, password) = uri.userinfo(); assert_eq!(username, None); assert_eq!(password, Some("pass")); assert_eq!(uri.to_string(), "http://:pass@example.com/path"); } #[test] fn test_set_userinfo_none_password() { let mut uri: Uri = "http://user:pass@example.com/path".parse().unwrap(); uri.set_userinfo("user", None); let (username, password) = uri.userinfo(); assert_eq!(username, Some("user")); assert_eq!(password, None); assert_eq!(uri.to_string(), "http://user@example.com/path"); } #[test] fn test_set_userinfo_empty_username_and_password() { let mut uri: Uri = "http://user:pass@example.com/path".parse().unwrap(); uri.set_userinfo("", None); let (username, password) = uri.userinfo(); assert_eq!(username, None); assert_eq!(password, None); assert_eq!(uri.to_string(), "http://example.com/path"); } #[test] fn test_set_userinfo_with_encoding() { use http::Uri; use crate::ext::UriExt; let mut uri: Uri = "http://example.com/path".parse().unwrap(); uri.set_userinfo("us er", Some("p@ss:word!")); let (username, password) = uri.userinfo(); assert_eq!(username, Some("us%20er")); assert_eq!(password, Some("p%40ss%3Aword!")); assert_eq!( uri.to_string(), "http://us%20er:p%40ss%3Aword!@example.com/path" ); } #[test] fn test_set_userinfo_only_username_with_encoding() { use http::Uri; use crate::ext::UriExt; let mut uri: Uri = "http://example.com/".parse().unwrap(); uri.set_userinfo("user name", None); let (username, password) = uri.userinfo(); assert_eq!(username, Some("user%20name")); assert_eq!(password, None); assert_eq!(uri.to_string(), "http://user%20name@example.com/"); } #[test] fn test_set_userinfo_only_password_with_encoding() { use http::Uri; use crate::ext::UriExt; let mut uri: Uri = "http://example.com/".parse().unwrap(); uri.set_userinfo("", Some("p@ss word")); let (username, password) = uri.userinfo(); assert_eq!(username, None); assert_eq!(password, Some("p%40ss%20word")); assert_eq!(uri.to_string(), "http://:p%40ss%20word@example.com/"); } #[cfg(feature = "query")] #[test] fn test_set_query() { let mut uri: Uri = "http://example.com/path".parse().unwrap(); uri.set_query("key=value&foo=bar".to_string()); assert_eq!(uri.to_string(), "http://example.com/path?key=value&foo=bar"); let mut uri: Uri = "http://example.com/path?existing=param".parse().unwrap(); uri.set_query("newkey=newvalue".to_string()); assert_eq!(uri.to_string(), "http://example.com/path?newkey=newvalue"); let mut uri: Uri = "http://example.com/path".parse().unwrap(); uri.set_query("".to_string()); assert_eq!(uri.to_string(), "http://example.com/path"); } #[test] fn test_response_builder_ext() { let uri = Uri::try_from("http://example.com").unwrap(); let response = Builder::new() .status(200) .uri(uri.clone()) .body(Body::empty()) .unwrap(); assert_eq!(response.uri(), Some(&uri)); } #[test] fn test_response_ext() { let uri = Uri::try_from("http://example.com").unwrap(); let response = http::Response::builder() .status(200) .extension(RequestUri(uri.clone())) .body(Body::empty()) .unwrap(); assert_eq!(response.uri(), Some(&uri)); } } ================================================ FILE: src/header.rs ================================================ //! HTTP header types //! //! This module provides [`HeaderName`], [`HeaderMap`], [`OrigHeaderMap`], [`OrigHeaderName`], and a //! number of types used for interacting with `HeaderMap`. These types allow representing both //! HTTP/1 and HTTP/2 headers. pub use http::header::*; pub use name::OrigHeaderName; /// Trait for types that can be converted into an [`OrigHeaderName`] (case-preserved header). /// /// This trait is sealed, so only known types can implement it. /// Supported types: /// - `&'static str` /// - `String` /// - `Bytes` /// - `HeaderName` /// - `&HeaderName` /// - `OrigHeaderName` /// - `&OrigHeaderName` pub trait IntoOrigHeaderName: sealed::Sealed { /// Converts the type into an [`OrigHeaderName`]. fn into_orig_header_name(self) -> OrigHeaderName; } /// A map from header names to their original casing as received in an HTTP message. /// /// [`OrigHeaderMap`] not only preserves the original case of each header name as it appeared /// in the request or response, but also maintains the insertion order of headers. This makes /// it suitable for use cases where the order of headers matters, such as HTTP/1.x message /// serialization, proxying, or reproducing requests/responses exactly as received. #[derive(Debug, Clone, Default)] pub struct OrigHeaderMap(HeaderMap); // ===== impl OrigHeaderMap ===== impl OrigHeaderMap { /// Creates a new, empty [`OrigHeaderMap`]. #[inline] pub fn new() -> Self { Self(HeaderMap::default()) } /// Creates an empty [`OrigHeaderMap`] with the specified capacity. #[inline] pub fn with_capacity(size: usize) -> Self { Self(HeaderMap::with_capacity(size)) } /// Insert a new header name into the collection. /// /// If the map did not previously have this key present, then `false` is /// returned. /// /// If the map did have this key present, the new value is pushed to the end /// of the list of values currently associated with the key. The key is not /// updated, though; this matters for types that can be `==` without being /// identical. #[inline] pub fn insert(&mut self, orig: N) -> bool where N: IntoOrigHeaderName, { let orig_header_name = orig.into_orig_header_name(); match &orig_header_name.kind { name::Kind::Cased(bytes) => HeaderName::from_bytes(bytes) .map(|name| self.0.append(name, orig_header_name)) .unwrap_or(false), name::Kind::Standard(header_name) => { self.0.append(header_name.clone(), orig_header_name) } } } /// Extends the map with all entries from another [`OrigHeaderMap`], preserving order. #[inline] pub fn extend(&mut self, iter: OrigHeaderMap) { self.0.extend(iter.0); } /// Returns the number of headers stored in the map. /// /// This number represents the total number of **values** stored in the map. /// This number can be greater than or equal to the number of **keys** /// stored given that a single key may have more than one associated value. #[inline] pub fn len(&self) -> usize { self.0.len() } /// Returns true if the map contains no elements. #[inline] pub fn is_empty(&self) -> bool { self.0.is_empty() } /// Returns an iterator over all header names and their original spellings, in insertion order. #[inline] pub fn iter(&self) -> impl Iterator { self.0.iter() } } impl OrigHeaderMap { /// Sorts headers by this map, preserving original casing. /// Headers in the map come first, others follow. pub(crate) fn sort_headers(&self, headers: &mut HeaderMap) { if headers.len() <= 1 || self.0.is_empty() { return; } // Create a new header map to store the sorted headers let mut sorted_headers = HeaderMap::with_capacity(headers.keys_len()); // First insert headers in the specified order for name in self.0.keys() { for value in headers.get_all(name) { sorted_headers.append(name.clone(), value.clone()); } headers.remove(name); } // Then insert any remaining headers that were not ordered let mut prev_name: Option = None; for (name, value) in headers.drain() { match (name, &prev_name) { (Some(name), _) => { prev_name.replace(name.clone()); sorted_headers.insert(name, value); } (None, Some(prev_name)) => { sorted_headers.append(prev_name, value); } _ => {} } } std::mem::swap(headers, &mut sorted_headers); } /// Calls the given function for each header in this map's order, preserving original casing. /// Headers in the map are processed first, others follow. pub(crate) fn sort_headers_for_each(&self, headers: &mut HeaderMap, mut dst: F) where F: FnMut(&[u8], &HeaderValue), { // First, sort headers according to the order defined in this map for (name, orig_name) in self.iter() { for value in headers.get_all(name) { dst(orig_name.as_ref(), value); } headers.remove(name); } // After processing all ordered headers, append any remaining headers let mut prev_name: Option = None; for (name, value) in headers.drain() { match (name, &prev_name) { (Some(name), _) => { dst(name.as_ref(), &value); prev_name.replace(name.into_orig_header_name()); } (None, Some(prev_name)) => { dst(prev_name.as_ref(), &value); } _ => (), }; } } } impl<'a> IntoIterator for &'a OrigHeaderMap { type Item = (&'a HeaderName, &'a OrigHeaderName); type IntoIter = <&'a HeaderMap as IntoIterator>::IntoIter; #[inline] fn into_iter(self) -> Self::IntoIter { self.0.iter() } } impl IntoIterator for OrigHeaderMap { type Item = (Option, OrigHeaderName); type IntoIter = as IntoIterator>::IntoIter; #[inline] fn into_iter(self) -> Self::IntoIter { self.0.into_iter() } } impl_request_config_value!(OrigHeaderMap); mod name { use bytes::Bytes; use http::HeaderName; use super::IntoOrigHeaderName; /// An HTTP header name with both normalized and original casing. /// /// While HTTP headers are case-insensitive, this type stores both /// the canonical `HeaderName` and the original casing as received, /// useful for preserving header order and formatting in proxies, /// debugging, or exact HTTP message reproduction. #[derive(Debug, Clone, PartialEq, Eq)] pub struct OrigHeaderName { pub(super) kind: Kind, } #[derive(Debug, Clone, PartialEq, Eq)] pub(super) enum Kind { /// The original casing of the header name as received. Cased(Bytes), /// The canonical (normalized, lowercased) header name. Standard(HeaderName), } impl AsRef<[u8]> for OrigHeaderName { #[inline] fn as_ref(&self) -> &[u8] { match &self.kind { Kind::Standard(name) => name.as_ref(), Kind::Cased(orig) => orig.as_ref(), } } } impl IntoOrigHeaderName for &'static str { #[inline] fn into_orig_header_name(self) -> OrigHeaderName { Bytes::from_static(self.as_bytes()).into_orig_header_name() } } impl IntoOrigHeaderName for String { #[inline] fn into_orig_header_name(self) -> OrigHeaderName { Bytes::from(self).into_orig_header_name() } } impl IntoOrigHeaderName for Bytes { #[inline] fn into_orig_header_name(self) -> OrigHeaderName { OrigHeaderName { kind: Kind::Cased(self), } } } impl IntoOrigHeaderName for &HeaderName { #[inline] fn into_orig_header_name(self) -> OrigHeaderName { OrigHeaderName { kind: Kind::Standard(self.clone()), } } } impl IntoOrigHeaderName for HeaderName { #[inline] fn into_orig_header_name(self) -> OrigHeaderName { OrigHeaderName { kind: Kind::Standard(self), } } } impl IntoOrigHeaderName for OrigHeaderName { #[inline] fn into_orig_header_name(self) -> OrigHeaderName { self } } impl IntoOrigHeaderName for &OrigHeaderName { #[inline] fn into_orig_header_name(self) -> OrigHeaderName { self.clone() } } } mod sealed { use bytes::Bytes; use http::HeaderName; use crate::header::OrigHeaderName; pub trait Sealed {} impl Sealed for &'static str {} impl Sealed for String {} impl Sealed for Bytes {} impl Sealed for &HeaderName {} impl Sealed for HeaderName {} impl Sealed for &OrigHeaderName {} impl Sealed for OrigHeaderName {} } #[cfg(test)] mod test { use http::{HeaderMap, HeaderName, HeaderValue}; use super::OrigHeaderMap; /// Returns a view of all spellings associated with that header name, /// in the order they were found. #[inline] pub(crate) fn get_all<'a>( orig_headers: &'a OrigHeaderMap, name: &HeaderName, ) -> impl Iterator + 'a> + 'a { orig_headers.0.get_all(name).into_iter() } #[test] fn test_header_order() { let mut headers = OrigHeaderMap::new(); // Insert headers with different cases and order headers.insert("X-Test"); headers.insert("X-Another"); headers.insert("x-test2"); // Check order and case let mut iter = headers.iter(); assert_eq!(iter.next().unwrap().1.as_ref(), b"X-Test"); assert_eq!(iter.next().unwrap().1.as_ref(), b"X-Another"); assert_eq!(iter.next().unwrap().1.as_ref(), b"x-test2"); } #[test] fn test_extend_preserves_order() { use super::OrigHeaderMap; let mut map1 = OrigHeaderMap::new(); map1.insert("A-Header"); map1.insert("B-Header"); let mut map2 = OrigHeaderMap::new(); map2.insert("C-Header"); map2.insert("D-Header"); map1.extend(map2); let names: Vec<_> = map1.iter().map(|(_, orig)| orig.as_ref()).collect(); assert_eq!( names, vec![b"A-Header", b"B-Header", b"C-Header", b"D-Header"] ); } #[test] fn test_header_case() { let mut headers = OrigHeaderMap::new(); // Insert headers with different cases headers.insert("X-Test"); headers.insert("x-test"); // Check that both headers are stored let all_x_test: Vec<_> = get_all(&headers, &"X-Test".parse().unwrap()).collect(); assert_eq!(all_x_test.len(), 2); assert!(all_x_test.iter().any(|v| v.as_ref() == b"X-Test")); assert!(all_x_test.iter().any(|v| v.as_ref() == b"x-test")); } #[test] fn test_header_multiple_cases() { let mut headers = OrigHeaderMap::new(); // Insert multiple headers with the same name but different cases headers.insert("X-test"); headers.insert("x-test"); headers.insert("X-test"); // Check that all variations are stored let all_x_test: Vec<_> = get_all(&headers, &"x-test".parse().unwrap()).collect(); assert_eq!(all_x_test.len(), 3); assert!(all_x_test.iter().any(|v| v.as_ref() == b"X-test")); assert!(all_x_test.iter().any(|v| v.as_ref() == b"x-test")); assert!(all_x_test.iter().any(|v| v.as_ref() == b"X-test")); } #[test] fn test_sort_headers_preserves_multiple_cookie_values() { // Create original header map for ordering let mut orig_headers = OrigHeaderMap::new(); orig_headers.insert("Cookie"); orig_headers.insert("User-Agent"); orig_headers.insert("Accept"); // Create headers with multiple Cookie values let mut headers = HeaderMap::new(); // Add multiple Cookie headers (this simulates how cookies are often sent) headers.append("cookie", HeaderValue::from_static("session=abc123")); headers.append("cookie", HeaderValue::from_static("theme=dark")); headers.append("cookie", HeaderValue::from_static("lang=en")); // Add other headers headers.insert("user-agent", HeaderValue::from_static("Mozilla/5.0")); headers.insert("accept", HeaderValue::from_static("text/html")); headers.insert("host", HeaderValue::from_static("example.com")); // Record original cookie values for comparison let original_cookies: Vec<_> = headers .get_all("cookie") .iter() .map(|v| v.to_str().unwrap().to_string()) .collect(); // Sort headers according to orig_headers order orig_headers.sort_headers(&mut headers); // Verify all cookie values are preserved let sorted_cookies: Vec<_> = headers .get_all("cookie") .iter() .map(|v| v.to_str().unwrap().to_string()) .collect(); assert_eq!( original_cookies.len(), sorted_cookies.len(), "Cookie count should be preserved" ); assert_eq!(original_cookies.len(), 3, "Should have 3 cookie values"); // Verify all original cookies are still present (order might change but content preserved) for original_cookie in &original_cookies { assert!( sorted_cookies.contains(original_cookie), "Cookie '{original_cookie}' should be preserved" ); } // Verify header ordering - Cookie should come first let header_names: Vec<_> = headers.keys().collect(); assert_eq!( header_names[0].as_str(), "cookie", "Cookie should be first header" ); // Verify all headers are preserved assert_eq!( headers.len(), 6, "Should have 6 total header values (3 cookies + 3 others)" ); assert!(headers.contains_key("user-agent")); assert!(headers.contains_key("accept")); assert!(headers.contains_key("host")); } #[test] fn test_sort_headers_multiple_values_different_headers() { let mut orig_headers = OrigHeaderMap::new(); orig_headers.insert("Accept"); orig_headers.insert("Cookie"); let mut headers = HeaderMap::new(); // Multiple Accept headers headers.append("accept", HeaderValue::from_static("text/html")); headers.append("accept", HeaderValue::from_static("application/json")); // Multiple Cookie headers headers.append("cookie", HeaderValue::from_static("a=1")); headers.append("cookie", HeaderValue::from_static("b=2")); // Single header headers.insert("host", HeaderValue::from_static("example.com")); let total_before = headers.len(); orig_headers.sort_headers(&mut headers); // Verify all values preserved assert_eq!( headers.len(), total_before, "Total header count should be preserved" ); assert_eq!( headers.get_all("accept").iter().count(), 2, "Accept headers should be preserved" ); assert_eq!( headers.get_all("cookie").iter().count(), 2, "Cookie headers should be preserved" ); assert_eq!( headers.get_all("host").iter().count(), 1, "Host header should be preserved" ); } } ================================================ FILE: src/into_uri.rs ================================================ //! URI conversion utilities. //! //! This module provides the [`IntoUri`] trait, allowing various types //! (such as `&str`, `String`, `Vec`, etc.) to be fallibly converted into an [`http::Uri`]. //! The conversion is based on `TryFrom for Uri` and ensures the resulting URI is valid and //! contains a host. //! //! Internally, the trait is sealed to prevent use bytes::Bytes; use http::Uri; use crate::{Error, Result}; /// Converts a value into a [`Uri`] with error handling. /// /// This trait is implemented for common types such as [`Uri`], [`String`], [`&str`], and byte /// slices, as well as any type that can be fallibly converted into a [`Uri`] via [`TryFrom`]. pub trait IntoUri: IntoUriSealed {} impl IntoUri for Uri {} impl IntoUri for &Uri {} impl IntoUri for &str {} impl IntoUri for String {} impl IntoUri for &String {} impl IntoUri for Vec {} impl IntoUri for &[u8] {} pub trait IntoUriSealed { // Besides parsing as a valid `Uri`. fn into_uri(self) -> Result; } impl IntoUriSealed for &[u8] { fn into_uri(self) -> Result { Uri::try_from(self) .or_else(|_| internal::parse(internal::Kind::Bytes(self))) .and_then(IntoUriSealed::into_uri) } } impl IntoUriSealed for Vec { fn into_uri(self) -> Result { let bytes = Bytes::from(self); Uri::from_maybe_shared(bytes.clone()) .or_else(|_| internal::parse(internal::Kind::Bytes(&bytes))) .and_then(IntoUriSealed::into_uri) } } impl IntoUriSealed for &str { fn into_uri(self) -> Result { Uri::try_from(self) .or_else(|_| internal::parse(internal::Kind::Str(self))) .and_then(IntoUriSealed::into_uri) } } impl IntoUriSealed for String { #[inline] fn into_uri(self) -> Result { self.into_bytes().into_uri() } } impl IntoUriSealed for &String { #[inline] fn into_uri(self) -> Result { IntoUriSealed::into_uri(self.as_str()) } } impl IntoUriSealed for Uri { fn into_uri(self) -> Result { match (self.scheme(), self.authority()) { (Some(_), Some(_)) => Ok(self), _ => Err(Error::uri_bad_scheme(self)), } } } impl IntoUriSealed for &Uri { fn into_uri(self) -> Result { match (self.scheme(), self.authority()) { (Some(_), Some(_)) => Ok(self.clone()), _ => Err(Error::uri_bad_scheme(self.clone())), } } } mod internal { use http::Uri; use url::Url; use crate::{Error, Result}; pub(super) enum Kind<'a> { Bytes(&'a [u8]), Str(&'a str), } pub(super) fn parse(s: Kind) -> Result { let s = match s { Kind::Bytes(bytes) => std::str::from_utf8(bytes).map_err(Error::decode), Kind::Str(s) => Ok(s), }?; Url::parse(s) .map(String::from) .map_err(Error::builder) .and_then(|s| Uri::try_from(s).map_err(Error::builder)) } } #[cfg(test)] mod tests { use super::IntoUriSealed; #[test] fn into_uri_bad_scheme() { let err = "/hello/world".into_uri().unwrap_err(); assert_eq!( err.to_string(), "builder error for uri (/hello/world): URI scheme is not allowed" ); let err = "127.0.0.1".into_uri().unwrap_err(); assert_eq!( err.to_string(), "builder error for uri (127.0.0.1): URI scheme is not allowed" ); } #[test] fn into_uri_with_space_in_path() { let uri = "http://example.com/hello world".into_uri().unwrap(); assert_eq!(uri, "http://example.com/hello%20world"); } #[test] fn into_uri_with_unicode_in_path() { let uri = "http://example.com/文件/测试".into_uri().unwrap(); assert_eq!(uri, "http://example.com/文件/测试"); } #[test] fn into_uri_with_special_chars_in_path() { let uri = "http://example.com/path<>{}".into_uri().unwrap(); assert_eq!(uri, "http://example.com/path%3C%3E%7B%7D"); } #[test] fn into_uri_with_query_preserved() { let uri = "http://example.com/path?key=value&foo=bar" .into_uri() .unwrap(); assert_eq!(uri, "http://example.com/path?key=value&foo=bar"); } #[test] fn into_uri_bytes_with_encoding() { let bytes = b"http://example.com/hello world"; let uri = bytes.into_uri().unwrap(); assert_eq!(uri, "http://example.com/hello%20world"); } #[test] fn test_bytes_with_query() { let bytes = b"http://example.com/path?key=hello%20world"; let uri = bytes.into_uri().unwrap(); assert_eq!(uri.to_string(), "http://example.com/path?key=hello%20world"); } #[test] fn test_bytes_with_unicode() { let bytes = b"http://example.com/\xE6\xB5\x8B\xE8\xAF\x95"; let uri = bytes.into_uri().unwrap(); assert_eq!(uri, "http://example.com/测试"); } #[test] fn test_bytes_minimal() { let bytes = b"http://example.com"; let uri = bytes.into_uri().unwrap(); assert_eq!(uri, "http://example.com"); } } ================================================ FILE: src/lib.rs ================================================ #![deny(unused)] #![deny(unsafe_code)] #![deny(missing_docs)] #![cfg_attr(test, deny(warnings))] #![cfg_attr(not(test), warn(unused_crate_dependencies))] //! # wreq //! //! An ergonomic and modular Rust HTTP Client for high-fidelity protocol matching, featuring //! customizable TLS, JA3/JA4, and HTTP/2 signature capabilities. //! //! - Plain bodies, [JSON](#json), [urlencoded](#forms), [multipart] //! - HTTP Trailer //! - Cookie Store //! - [Redirect Policy](#redirect-policies) //! - Original Header //! - Rotating [Proxies](#proxies) //! - [Tower](https://docs.rs/tower/latest/tower) Middleware //! - [WebSocket](#websocket) Upgrade //! - HTTPS via [BoringSSL](#tls) //! - HTTP/2 over TLS Parity //! - [Certificate Store (CAs & mTLS)](#certificate-store) //! //! Additional learning resources include: //! //! - [The Rust Cookbook](https://doc.rust-lang.org/stable/book/ch00-00-introduction.html) //! - [Repository Examples](https://github.com/0x676e67/wreq/tree/main/examples) //! //! ## Making a GET request //! //! Making a GET request is simple. //! //! ```rust //! # async fn run() -> wreq::Result<()> { //! let body = wreq::get("https://www.rust-lang.org") //! .send() //! .await? //! .text() //! .await?; //! //! println!("body = {:?}", body); //! # Ok(()) //! # } //! ``` //! //! **NOTE**: If you plan to perform multiple requests, it is best to create a //! [`Client`][client] and reuse it, taking advantage of keep-alive connection //! pooling. //! //! ## Making POST requests (or setting request bodies) //! //! There are several ways you can set the body of a request. The basic one is //! by using the `body()` method of a [`RequestBuilder`][builder]. This lets you set the //! exact raw bytes of what the body should be. It accepts various types, //! including `String` and `Vec`. If you wish to pass a custom //! type, you can use the `wreq::Body` constructors. //! //! ```rust //! # use wreq::Error; //! # //! # async fn run() -> Result<(), Error> { //! let client = wreq::Client::new(); //! let res = client //! .post("http://httpbin.org/post") //! .body("the exact body that is sent") //! .send() //! .await?; //! # Ok(()) //! # } //! ``` //! //! ### Forms //! //! It's very common to want to send form data in a request body. This can be //! done with any type that can be serialized into form data. //! //! This can be an array of tuples, or a `HashMap`, or a custom type that //! implements [`Serialize`][serde]. //! //! The feature `form` is required. //! //! ```rust //! # use wreq::Error; //! # #[cfg(feature = "form")] //! # async fn run() -> Result<(), Error> { //! // This will POST a body of `foo=bar&baz=quux` //! let params = [("foo", "bar"), ("baz", "quux")]; //! let client = wreq::Client::new(); //! let res = client //! .post("http://httpbin.org/post") //! .form(¶ms) //! .send() //! .await?; //! # Ok(()) //! # } //! ``` //! //! ### JSON //! //! There is also a `json` method helper on the [`RequestBuilder`][builder] that works in //! a similar fashion the `form` method. It can take any value that can be //! serialized into JSON. The feature `json` is required. //! //! ```rust //! # use wreq::Error; //! # use std::collections::HashMap; //! # //! # #[cfg(feature = "json")] //! # async fn run() -> Result<(), Error> { //! // This will POST a body of `{"lang":"rust","body":"json"}` //! let mut map = HashMap::new(); //! map.insert("lang", "rust"); //! map.insert("body", "json"); //! //! let client = wreq::Client::new(); //! let res = client //! .post("http://httpbin.org/post") //! .json(&map) //! .send() //! .await?; //! # Ok(()) //! # } //! ``` //! //! ## Websocket //! //! The `websocket` module provides a way to upgrade a connection to a websocket. //! //! ```rust,no_run //! use futures_util::{SinkExt, StreamExt, TryStreamExt}; //! use wreq::{header, ws::message::Message}; //! //! #[tokio::main] //! async fn main() -> wreq::Result<()> { //! // Use the API you're already familiar with //! let websocket = wreq::websocket("wss://echo.websocket.org") //! .header(header::USER_AGENT, env!("CARGO_PKG_NAME")) //! .send() //! .await?; //! //! assert_eq!(websocket.version(), http::Version::HTTP_11); //! //! let (mut tx, mut rx) = websocket.into_websocket().await?.split(); //! //! tokio::spawn(async move { //! for i in 1..11 { //! if let Err(err) = tx.send(Message::text(format!("Hello, World! {i}"))).await { //! eprintln!("failed to send message: {err}"); //! } //! } //! }); //! //! while let Some(message) = rx.try_next().await? { //! if let Message::Text(text) = message { //! println!("received: {text}"); //! } //! } //! //! Ok(()) //! } //! ``` //! //! ## Redirect Policies //! //! By default, the client does not handle HTTP redirects. //! To customize this behavior, you can use [`redirect::Policy`][redirect] with ClientBuilder. //! //! ## Cookies //! //! The automatic storing and sending of session cookies can be enabled with //! the [`cookie_store`][ClientBuilder::cookie_store] method on `ClientBuilder`. //! //! ## Proxies //! //! **NOTE**: System proxies are enabled by default. //! //! System proxies look in environment variables to set HTTP or HTTPS proxies. //! //! `HTTP_PROXY` or `http_proxy` provide HTTP proxies for HTTP connections while //! `HTTPS_PROXY` or `https_proxy` provide HTTPS proxies for HTTPS connections. //! `ALL_PROXY` or `all_proxy` provide proxies for both HTTP and HTTPS connections. //! If both the all proxy and HTTP or HTTPS proxy variables are set the more specific //! HTTP or HTTPS proxies take precedence. //! //! These can be overwritten by adding a [`Proxy`] to `ClientBuilder` //! i.e. `let proxy = wreq::Proxy::http("https://secure.example")?;` //! or disabled by calling `ClientBuilder::no_proxy()`. //! //! `socks` feature is required if you have configured socks proxy like this: //! //! ```bash //! export https_proxy=socks5://127.0.0.1:1086 //! ``` //! //! * `http://` is the scheme for http proxy //! * `https://` is the scheme for https proxy //! * `socks4://` is the scheme for socks4 proxy //! * `socks4a://` is the scheme for socks4a proxy //! * `socks5://` is the scheme for socks5 proxy //! * `socks5h://` is the scheme for socks5h proxy //! //! ## TLS //! //! By default, clients will utilize BoringSSL transport layer security to connect to HTTPS targets. //! //! - Various parts of TLS can also be configured or even disabled on the `ClientBuilder`. //! //! ## Certificate Store //! //! By default, wreq uses Mozilla's root certificates through the webpki-roots crate. //! This static root certificate bundle is not automatically updated and ignores any root //! certificates installed on the host. You can disable default-features to use the system's default //! certificate path. Additionally, wreq provides a certificate store for users to customize and //! update certificates. //! //! Custom Certificate Store verification supports Root CA certificates, peer certificates, and //! self-signed certificate SSL pinning. //! //! ## Optional Features //! //! The following are a list of [Cargo features][cargo-features] that can be //! enabled or disabled: //! //! - **cookies**: Provides cookie session support. //! - **gzip**: Provides response body gzip decompression. //! - **brotli**: Provides response body brotli decompression. //! - **zstd**: Provides response body zstd decompression. //! - **deflate**: Provides response body deflate decompression. //! - **query**: Provides query parameter serialization. //! - **form**: Provides form data serialization. //! - **json**: Provides serialization and deserialization for JSON bodies. //! - **multipart**: Provides functionality for multipart forms. //! - **charset**: Improved support for decoding text. //! - **stream**: Adds support for `futures::Stream`. //! - **socks**: Provides SOCKS5 and SOCKS4 proxy support. //! - **ws**: Provides websocket support. //! - **hickory-dns**: Enables a hickory-dns async resolver instead of default threadpool using //! `getaddrinfo`. //! - **webpki-roots** *(enabled by default)*: Use the webpki-roots crate for root certificates. //! - **system-proxy**: Enable system proxy support. //! - **tracing**: Enable tracing logging support. //! - **prefix-symbols**: Prefix BoringSSL symbols to avoid OpenSSL conflicts. //! //! [client]: ./struct.Client.html //! [response]: ./struct.Response.html //! [get]: ./fn.get.html //! [builder]: ./struct.RequestBuilder.html //! [serde]: http://serde.rs //! [redirect]: crate::redirect //! [Proxy]: ./struct.Proxy.html //! [cargo-features]: https://doc.rust-lang.org/stable/cargo/reference/manifest.html#the-features-section #[macro_use] mod trace; #[macro_use] mod config; #[macro_use] mod ext; mod client; mod error; mod into_uri; mod proxy; mod sync; mod util; #[cfg(feature = "cookies")] pub mod cookie; pub mod dns; pub mod header; pub mod redirect; pub mod retry; pub mod tls; pub use http::{Method, StatusCode, Uri, Version}; #[cfg(unix)] use libc as _; #[cfg(feature = "multipart")] pub use self::client::multipart; #[cfg(feature = "ws")] pub use self::client::ws; pub use self::{ client::{ Body, Client, ClientBuilder, Emulation, EmulationBuilder, Group, IntoEmulation, Request, RequestBuilder, Response, Upgraded, }, error::{Error, Result}, ext::{ResponseBuilderExt, ResponseExt}, into_uri::IntoUri, proxy::{NoProxy, Proxy}, }; pub mod http1 { //! HTTP/1 protocol implementation and utilities. pub use super::client::http1::{Http1Options, Http1OptionsBuilder}; } pub mod http2 { //! HTTP/2 protocol implementation and utilities. pub use http2::frame::{ Priorities, PrioritiesBuilder, Priority, PseudoId, PseudoOrder, Setting, SettingId, SettingsOrder, SettingsOrderBuilder, StreamDependency, StreamId, }; pub use super::client::http2::{Http2Options, Http2OptionsBuilder}; } fn _assert_impls() { fn assert_send() {} fn assert_sync() {} fn assert_clone() {} assert_send::(); assert_sync::(); assert_clone::(); assert_send::(); assert_send::(); #[cfg(feature = "ws")] assert_send::(); assert_send::(); #[cfg(feature = "ws")] assert_send::(); #[cfg(feature = "ws")] assert_send::(); assert_send::(); assert_sync::(); } /// Shortcut method to quickly make a `GET` request. /// /// See also the methods on the [`wreq::RequestBuilder`](./struct.RequestBuilder.html) /// type. /// /// **NOTE**: This function creates a new internal `Client` on each call, /// and so should not be used if making many requests. Create a /// [`Client`](./struct.Client.html) instead. /// /// # Examples /// /// ```rust /// # async fn run() -> wreq::Result<()> { /// let body = wreq::get("https://www.rust-lang.org") /// .send() /// .await? /// .text() /// .await?; /// # Ok(()) /// # } /// ``` #[inline] pub fn get(uri: T) -> RequestBuilder { Client::new().get(uri) } /// Shortcut method to quickly make a `POST` request. /// /// See also the methods on the [`wreq::RequestBuilder`](./struct.RequestBuilder.html) /// type. /// /// **NOTE**: This function creates a new internal `Client` on each call, /// and so should not be used if making many requests. Create a /// [`Client`](./struct.Client.html) instead. /// /// # Examples /// /// ```rust /// # async fn run() -> wreq::Result<()> { /// let res = wreq::post("https://httpbin.org/post") /// .body("example body") /// .send() /// .await?; /// # Ok(()) /// # } /// ``` #[inline] pub fn post(uri: T) -> RequestBuilder { Client::new().post(uri) } /// Shortcut method to quickly make a `PUT` request. /// /// See also the methods on the [`wreq::RequestBuilder`](./struct.RequestBuilder.html) /// type. /// /// **NOTE**: This function creates a new internal `Client` on each call, /// and so should not be used if making many requests. Create a /// [`Client`](./struct.Client.html) instead. /// /// # Examples /// /// ```rust /// # async fn run() -> wreq::Result<()> { /// let res = wreq::put("https://httpbin.org/put") /// .body("update content") /// .send() /// .await?; /// # Ok(()) /// # } /// ``` #[inline] pub fn put(uri: T) -> RequestBuilder { Client::new().put(uri) } /// Shortcut method to quickly make a `DELETE` request. /// /// See also the methods on the [`wreq::RequestBuilder`](./struct.RequestBuilder.html) /// type. /// /// **NOTE**: This function creates a new internal `Client` on each call, /// and so should not be used if making many requests. Create a /// [`Client`](./struct.Client.html) instead. /// /// # Examples /// /// ```rust /// # async fn run() -> wreq::Result<()> { /// let res = wreq::delete("https://httpbin.org/delete") /// .send() /// .await?; /// # Ok(()) /// # } /// ``` #[inline] pub fn delete(uri: T) -> RequestBuilder { Client::new().delete(uri) } /// Shortcut method to quickly make a `HEAD` request. /// /// See also the methods on the [`wreq::RequestBuilder`](./struct.RequestBuilder.html) /// type. /// /// **NOTE**: This function creates a new internal `Client` on each call, /// and so should not be used if making many requests. Create a /// [`Client`](./struct.Client.html) instead. /// /// # Examples /// /// ```rust /// # async fn run() -> wreq::Result<()> { /// let res = wreq::head("https://httpbin.org/get") /// .send() /// .await?; /// # Ok(()) /// # } /// ``` #[inline] pub fn head(uri: T) -> RequestBuilder { Client::new().head(uri) } /// Shortcut method to quickly make a `PATCH` request. /// /// See also the methods on the [`wreq::RequestBuilder`](./struct.RequestBuilder.html) /// type. /// /// **NOTE**: This function creates a new internal `Client` on each call, /// and so should not be used if making many requests. Create a /// [`Client`](./struct.Client.html) instead. /// /// # Examples /// /// ```rust /// # async fn run() -> wreq::Result<()> { /// let res = wreq::patch("https://httpbin.org/patch") /// .body("patch content") /// .send() /// .await?; /// # Ok(()) /// # } /// ``` #[inline] pub fn patch(uri: T) -> RequestBuilder { Client::new().patch(uri) } /// Shortcut method to quickly make an `OPTIONS` request. /// /// See also the methods on the [`wreq::RequestBuilder`](./struct.RequestBuilder.html) /// type. /// /// **NOTE**: This function creates a new internal `Client` on each call, /// and so should not be used if making many requests. Create a /// [`Client`](./struct.Client.html) instead. /// /// # Examples /// /// ```rust /// # async fn run() -> wreq::Result<()> { /// let res = wreq::options("https://httpbin.org/get") /// .send() /// .await?; /// # Ok(()) /// # } /// ``` #[inline] pub fn options(uri: T) -> RequestBuilder { Client::new().options(uri) } /// Shortcut method to quickly make a request with a custom HTTP method. /// /// See also the methods on the [`wreq::RequestBuilder`](./struct.RequestBuilder.html) /// type. /// /// **NOTE**: This function creates a new internal `Client` on each call, /// and so should not be used if making many requests. Create a /// [`Client`](./struct.Client.html) instead. /// /// # Examples /// /// ```rust /// # async fn run() -> wreq::Result<()> { /// use http::Method; /// let res = wreq::request(Method::TRACE, "https://httpbin.org/trace") /// .send() /// .await?; /// # Ok(()) /// # } /// ``` #[inline] pub fn request(method: Method, uri: T) -> RequestBuilder { Client::new().request(method, uri) } /// Shortcut method to quickly make a WebSocket request. /// /// See also the methods on the /// [`wreq::ws::WebSocketRequestBuilder`](./ws/struct.WebSocketRequestBuilder.html) type. /// /// **NOTE**: This function creates a new internal `Client` on each call, /// and so should not be used if making many requests. Create a /// [`Client`](./struct.Client.html) instead. /// /// # Examples /// /// ```rust /// # async fn run() -> wreq::Result<()> { /// use futures_util::{SinkExt, StreamExt, TryStreamExt}; /// use wreq::{header, ws::message::Message}; /// /// let resp = wreq::websocket("wss://echo.websocket.org") /// .header(header::USER_AGENT, env!("CARGO_PKG_NAME")) /// .read_buffer_size(1024 * 1024) /// .send() /// .await?; /// /// assert_eq!(resp.version(), http::Version::HTTP_11); /// /// let websocket = resp.into_websocket().await?; /// if let Some(protocol) = websocket.protocol() { /// println!("WebSocket subprotocol: {:?}", protocol); /// } /// /// let (mut tx, mut rx) = websocket.split(); /// /// tokio::spawn(async move { /// for i in 1..11 { /// if let Err(err) = tx.send(Message::text(format!("Hello, World! {i}"))).await { /// eprintln!("failed to send message: {err}"); /// } /// } /// }); /// /// while let Some(message) = rx.try_next().await? { /// if let Message::Text(text) = message { /// println!("received: {text}"); /// } /// } /// # Ok(()) /// # } /// ``` #[inline] #[cfg(feature = "ws")] #[cfg_attr(docsrs, doc(cfg(feature = "ws")))] pub fn websocket(uri: T) -> ws::WebSocketRequestBuilder { Client::new().websocket(uri) } ================================================ FILE: src/proxy/mac.rs ================================================ use system_configuration::{ core_foundation::{ base::CFType, dictionary::CFDictionary, number::CFNumber, string::{CFString, CFStringRef}, }, dynamic_store::SCDynamicStoreBuilder, sys::schema_definitions::{ kSCPropNetProxiesHTTPEnable, kSCPropNetProxiesHTTPPort, kSCPropNetProxiesHTTPProxy, kSCPropNetProxiesHTTPSEnable, kSCPropNetProxiesHTTPSPort, kSCPropNetProxiesHTTPSProxy, }, }; #[allow(unsafe_code)] pub(super) fn with_system(builder: &mut super::matcher::Builder) { let Some(proxies_map) = SCDynamicStoreBuilder::new("") .build() .and_then(|store| store.get_proxies()) else { return; }; if builder.http.is_empty() { let http_proxy_config = parse_setting_from_dynamic_store( &proxies_map, unsafe { kSCPropNetProxiesHTTPEnable }, unsafe { kSCPropNetProxiesHTTPProxy }, unsafe { kSCPropNetProxiesHTTPPort }, ); if let Some(http) = http_proxy_config { builder.http = http; } } if builder.https.is_empty() { let https_proxy_config = parse_setting_from_dynamic_store( &proxies_map, unsafe { kSCPropNetProxiesHTTPSEnable }, unsafe { kSCPropNetProxiesHTTPSProxy }, unsafe { kSCPropNetProxiesHTTPSPort }, ); if let Some(https) = https_proxy_config { builder.https = https; } } } fn parse_setting_from_dynamic_store( proxies_map: &CFDictionary, enabled_key: CFStringRef, host_key: CFStringRef, port_key: CFStringRef, ) -> Option { let proxy_enabled = proxies_map .find(enabled_key) .and_then(|flag| flag.downcast::()) .and_then(|flag| flag.to_i32()) .unwrap_or(0) == 1; if proxy_enabled { let proxy_host = proxies_map .find(host_key) .and_then(|host| host.downcast::()) .map(|host| host.to_string()); let proxy_port = proxies_map .find(port_key) .and_then(|port| port.downcast::()) .and_then(|port| port.to_i32()); return match (proxy_host, proxy_port) { (Some(proxy_host), Some(proxy_port)) => Some(format!("{proxy_host}:{proxy_port}")), (Some(proxy_host), None) => Some(proxy_host), (None, Some(_)) => None, (None, None) => None, }; } None } ================================================ FILE: src/proxy/matcher.rs ================================================ //! Proxy matchers //! //! This module contains different matchers to configure rules for when a proxy //! should be used, and if so, with what arguments. //! //! A [`Matcher`] can be constructed either using environment variables, or //! a [`Matcher::builder()`]. //! //! Once constructed, the `Matcher` can be asked if it intercepts a `Uri` by //! calling [`Matcher::intercept()`]. //! //! An [`Intercept`] includes the destination for the proxy, and any parsed //! authentication to be used. use std::net::IpAddr; #[cfg(unix)] use std::{path::Path, sync::Arc}; use bytes::Bytes; use http::{ HeaderMap, Uri, header::HeaderValue, uri::{Authority, Scheme}, }; use ipnet::IpNet; use percent_encoding::percent_decode_str; use self::builder::IntoValue; use super::{Extra, Intercepted}; use crate::ext::UriExt; /// A proxy matcher, usually built from environment variables. #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct Matcher { http: Option, https: Option, no: NoProxy, #[cfg(unix)] unix: Option>, } /// A matched proxy, /// /// This is returned by a matcher if a proxy should be used. #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct Intercept { uri: Uri, auth: Auth, extra: Extra, } /// A builder to create a [`Matcher`]. /// /// Construct with [`Matcher::builder()`]. #[derive(Default)] pub struct Builder { pub(super) is_cgi: bool, pub(super) all: String, pub(super) http: String, pub(super) https: String, pub(super) no: String, #[cfg(unix)] pub(super) unix: Option>, } #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub enum Auth { Empty, Basic(HeaderValue), Raw(Bytes, Bytes), } /// A filter for proxy matchers. /// /// This type is based off the `NO_PROXY` rules used by curl. #[derive(Clone, Debug, Default, PartialEq, Eq, Hash)] struct NoProxy { ips: IpMatcher, domains: DomainMatcher, } #[derive(Clone, Debug, Default, PartialEq, Eq, Hash)] struct DomainMatcher(Vec); #[derive(Clone, Debug, Default, PartialEq, Eq, Hash)] struct IpMatcher(Vec); #[derive(Clone, Debug, PartialEq, Eq, Hash)] enum Ip { Address(IpAddr), Network(IpNet), } // ===== impl Matcher ===== impl Matcher { /// Create a matcher from the environment or system. /// /// This checks the same environment variables as `from_env()`, and if not /// set, checks the system configuration for values for the OS. /// /// This constructor is always available, but if the `client-proxy-system` /// feature is enabled, it will check more configuration. Use this /// constructor if you want to allow users to optionally enable more, or /// use `from_env` if you do not want the values to change based on an /// enabled feature. pub fn from_system() -> Self { Builder::from_system().build(Extra::default()) } /// Start a builder to configure a matcher. pub fn builder() -> Builder { Builder::default() } /// Check if the destination should be intercepted by a proxy. /// /// If the proxy rules match the destination, a new `Uri` will be returned /// to connect to. pub fn intercept(&self, dst: &Uri) -> Option { // if unix sockets are configured, check them first #[cfg(unix)] if let Some(unix) = &self.unix { return Some(Intercepted::Unix(unix.clone())); } // TODO(perf): don't need to check `no` if below doesn't match... if self.no.contains(dst.host()?) { return None; } if dst.is_http() { return self.http.clone().map(Intercepted::Proxy); } if dst.is_https() { return self.https.clone().map(Intercepted::Proxy); } None } } // ===== impl Intercept ===== impl Intercept { #[inline] pub(crate) fn uri(&self) -> &Uri { &self.uri } pub(crate) fn basic_auth(&self) -> Option<&HeaderValue> { if let Some(ref val) = self.extra.auth { return Some(val); } if let Auth::Basic(ref val) = self.auth { Some(val) } else { None } } #[inline] pub(crate) fn custom_headers(&self) -> Option<&HeaderMap> { self.extra.misc.as_ref() } #[cfg(feature = "socks")] pub(crate) fn raw_auth(&self) -> Option<(Bytes, Bytes)> { if let Auth::Raw(ref u, ref p) = self.auth { Some((u.clone(), p.clone())) } else { None } } } // ===== impl Builder ===== impl Builder { fn from_env() -> Self { Builder { is_cgi: std::env::var_os("REQUEST_METHOD").is_some(), all: get_first_env(&["ALL_PROXY", "all_proxy"]), http: get_first_env(&["HTTP_PROXY", "http_proxy"]), https: get_first_env(&["HTTPS_PROXY", "https_proxy"]), no: get_first_env(&["NO_PROXY", "no_proxy"]), #[cfg(unix)] unix: None, } } fn from_system() -> Self { #[allow(unused_mut)] let mut builder = Self::from_env(); #[cfg(all(target_os = "macos", feature = "system-proxy"))] super::mac::with_system(&mut builder); #[cfg(all(windows, feature = "system-proxy"))] super::win::with_system(&mut builder); builder } /// Set the target proxy for all destinations. pub fn all(mut self, val: S) -> Self where S: IntoValue, { self.all = val.into_value(); self } /// Set the target proxy for HTTP destinations. pub fn http(mut self, val: S) -> Self where S: IntoValue, { self.http = val.into_value(); self } /// Set the target proxy for HTTPS destinations. pub fn https(mut self, val: S) -> Self where S: IntoValue, { self.https = val.into_value(); self } /// Set the "no" proxy filter. /// /// The rules are as follows: /// * Entries are expected to be comma-separated (whitespace between entries is ignored) /// * IP addresses (both IPv4 and IPv6) are allowed, as are optional subnet masks (by adding /// /size, for example "`192.168.1.0/24`"). /// * An entry "`*`" matches all hostnames (this is the only wildcard allowed) /// * Any other entry is considered a domain name (and may contain a leading dot, for example /// `google.com` and `.google.com` are equivalent) and would match both that domain AND all /// subdomains. /// /// For example, if `"NO_PROXY=google.com, 192.168.1.0/24"` was set, all of the following would /// match (and therefore would bypass the proxy): /// * `http://google.com/` /// * `http://www.google.com/` /// * `http://192.168.1.42/` /// /// The URI `http://notgoogle.com/` would not match. pub fn no(mut self, val: S) -> Self where S: IntoValue, { self.no = val.into_value(); self } // / Set the unix socket target proxy for all destinations. #[cfg(unix)] pub fn unix(mut self, val: S) -> Self where S: super::uds::IntoUnixSocket, { self.unix = Some(val.unix_socket()); self } /// Construct a [`Matcher`] using the configured values. pub(super) fn build(self, extra: Extra) -> Matcher { if self.is_cgi { return Matcher { http: None, https: None, no: NoProxy::empty(), #[cfg(unix)] unix: None, }; } let mut all = parse_env_uri(&self.all); let mut http = parse_env_uri(&self.http); let mut https = parse_env_uri(&self.https); if let Some(http) = http.as_mut() { http.extra = extra.clone(); } if let Some(https) = https.as_mut() { https.extra = extra.clone(); } if http.is_none() || https.is_none() { if let Some(all) = all.as_mut() { all.extra = extra; } } Matcher { http: http.or_else(|| all.clone()), https: https.or(all), no: NoProxy::from_string(&self.no), #[cfg(unix)] unix: self.unix, } } } fn get_first_env(names: &[&str]) -> String { for name in names { if let Ok(val) = std::env::var(name) { return val; } } String::new() } fn parse_env_uri(val: &str) -> Option { let uri = val.parse::().ok()?; let mut builder = Uri::builder(); let mut is_httpish = false; let mut is_socks = false; let mut auth = Auth::Empty; builder = builder.scheme(match uri.scheme() { Some(s) => { if s == &Scheme::HTTP || s == &Scheme::HTTPS { is_httpish = true; s.clone() } else if matches!(s.as_str(), "socks4" | "socks4a" | "socks5" | "socks5h") { is_socks = true; s.clone() } else { // can't use this proxy scheme return None; } } // if no scheme provided, assume they meant 'http' None => { is_httpish = true; Scheme::HTTP } }); let authority = { let authority = uri.authority()?; // default SOCKS port to 1080 if missing if is_socks && authority.port().is_none() { Authority::from_maybe_shared(Bytes::from(format!("{authority}:1080"))).ok()? } else { authority.clone() } }; if let Some((userinfo, host_port)) = authority.as_str().rsplit_once('@') { let (user, pass) = match userinfo.split_once(':') { Some((user, pass)) => (user, Some(pass)), None => (userinfo, None), }; let user = percent_decode_str(user).decode_utf8_lossy(); let pass = pass.map(|pass| percent_decode_str(pass).decode_utf8_lossy()); if is_httpish { auth = Auth::Basic(crate::util::basic_auth(&user, pass.as_deref())); } else { auth = Auth::Raw( Bytes::from(user.into_owned()), Bytes::from(pass.map_or_else(String::new, std::borrow::Cow::into_owned)), ); } builder = builder.authority(host_port); } else { builder = builder.authority(authority); } // removing any path, but we MUST specify one or the builder errors builder = builder.path_and_query("/"); Some(Intercept { auth, extra: Extra::default(), uri: builder.build().ok()?, }) } impl NoProxy { fn empty() -> NoProxy { NoProxy { ips: IpMatcher(Vec::new()), domains: DomainMatcher(Vec::new()), } } /// Returns a new no-proxy configuration based on a `no_proxy` string (or `None` if no variables /// are set) /// The rules are as follows: /// * The environment variable `NO_PROXY` is checked, if it is not set, `no_proxy` is checked /// * If neither environment variable is set, `None` is returned /// * Entries are expected to be comma-separated (whitespace between entries is ignored) /// * IP addresses (both IPv4 and IPv6) are allowed, as are optional subnet masks (by adding /// /size, for example "`192.168.1.0/24`"). /// * An entry "`*`" matches all hostnames (this is the only wildcard allowed) /// * Any other entry is considered a domain name (and may contain a leading dot, for example /// `google.com` and `.google.com` are equivalent) and would match both that domain AND all /// subdomains. /// /// For example, if `"NO_PROXY=google.com, 192.168.1.0/24"` was set, all of the following would /// match (and therefore would bypass the proxy): /// * `http://google.com/` /// * `http://www.google.com/` /// * `http://192.168.1.42/` /// /// The URI `http://notgoogle.com/` would not match. pub fn from_string(no_proxy_list: &str) -> Self { let mut ips = Vec::new(); let mut domains = Vec::new(); let parts = no_proxy_list.split(',').map(str::trim); for part in parts { match part.parse::() { // If we can parse an IP net or address, then use it, otherwise, assume it is a // domain Ok(ip) => ips.push(Ip::Network(ip)), Err(_) => match part.parse::() { Ok(addr) => ips.push(Ip::Address(addr)), Err(_) => { if !part.trim().is_empty() { domains.push(part.to_owned()) } } }, } } NoProxy { ips: IpMatcher(ips), domains: DomainMatcher(domains), } } /// Return true if this matches the host (domain or IP). pub fn contains(&self, host: &str) -> bool { // According to RFC3986, raw IPv6 hosts will be wrapped in []. So we need to strip those off // the end in order to parse correctly let host = if host.starts_with('[') { let x: &[_] = &['[', ']']; host.trim_matches(x) } else { host }; match host.parse::() { // If we can parse an IP addr, then use it, otherwise, assume it is a domain Ok(ip) => self.ips.contains(ip), Err(_) => self.domains.contains(host), } } } impl IpMatcher { fn contains(&self, addr: IpAddr) -> bool { for ip in &self.0 { match ip { Ip::Address(address) => { if &addr == address { return true; } } Ip::Network(net) => { if net.contains(&addr) { return true; } } } } false } } impl DomainMatcher { // The following links may be useful to understand the origin of these rules: // * https://curl.se/libcurl/c/CURLOPT_NOPROXY.html // * https://github.com/curl/curl/issues/1208 fn contains(&self, domain: &str) -> bool { let domain_len = domain.len(); for d in &self.0 { if d.eq_ignore_ascii_case(domain) || d.strip_prefix('.') .is_some_and(|s| s.eq_ignore_ascii_case(domain)) { return true; } else if domain .get(domain_len.saturating_sub(d.len())..) .is_some_and(|s| s.eq_ignore_ascii_case(d)) { if d.starts_with('.') { // If the first character of d is a dot, that means the first character of // domain must also be a dot, so we are looking at a // subdomain of d and that matches return true; } else if domain .as_bytes() .get(domain_len.saturating_sub(d.len() + 1)) == Some(&b'.') { // Given that d is a prefix of domain, if the prior character in domain is a dot // then that means we must be matching a subdomain of d, and that matches return true; } } else if d == "*" { return true; } } false } } mod builder { /// A type that can used as a `Builder` value. /// /// Private and sealed, only visible in docs. pub trait IntoValue { #[doc(hidden)] fn into_value(self) -> String; } impl IntoValue for String { #[doc(hidden)] fn into_value(self) -> String { self } } impl IntoValue for &String { #[doc(hidden)] fn into_value(self) -> String { self.into() } } impl IntoValue for &str { #[doc(hidden)] fn into_value(self) -> String { self.into() } } } #[cfg(test)] mod tests { use super::*; #[test] fn test_domain_matcher() { let domains = vec![".foo.bar".into(), "bar.foo".into()]; let matcher = DomainMatcher(domains); // domains match with leading `.` assert!(matcher.contains("foo.bar")); assert!(matcher.contains("FOO.BAR")); // subdomains match with leading `.` assert!(matcher.contains("www.foo.bar")); assert!(matcher.contains("WWW.FOO.BAR")); // domains match with no leading `.` assert!(matcher.contains("bar.foo")); assert!(matcher.contains("Bar.foo")); // subdomains match with no leading `.` assert!(matcher.contains("www.bar.foo")); assert!(matcher.contains("WWW.BAR.FOO")); // non-subdomain string prefixes don't match assert!(!matcher.contains("notfoo.bar")); assert!(!matcher.contains("notbar.foo")); } #[test] fn test_no_proxy_wildcard() { let no_proxy = NoProxy::from_string("*"); assert!(no_proxy.contains("any.where")); } #[test] fn test_no_proxy_ip_ranges() { let no_proxy = NoProxy::from_string(".foo.bar, bar.baz,10.42.1.1/24,::1,10.124.7.8,2001::/17"); let should_not_match = [ // random uri, not in no_proxy "hyper.rs", // make sure that random non-subdomain string prefixes don't match "notfoo.bar", // make sure that random non-subdomain string prefixes don't match "notbar.baz", // ipv4 address out of range "10.43.1.1", // ipv4 address out of range "10.124.7.7", // ipv6 address out of range "[ffff:db8:a0b:12f0::1]", // ipv6 address out of range "[2005:db8:a0b:12f0::1]", ]; for host in &should_not_match { assert!(!no_proxy.contains(host), "should not contain {host:?}"); } let should_match = [ // make sure subdomains (with leading .) match "hello.foo.bar", // make sure exact matches (without leading .) match (also makes sure spaces between // entries work) "bar.baz", // make sure subdomains (without leading . in no_proxy) match "foo.bar.baz", // make sure subdomains (without leading . in no_proxy) match - this differs from cURL "foo.bar", // ipv4 address match within range "10.42.1.100", // ipv6 address exact match "[::1]", // ipv6 address match within range "[2001:db8:a0b:12f0::1]", // ipv4 address exact match "10.124.7.8", ]; for host in &should_match { assert!(no_proxy.contains(host), "should contain {host:?}"); } } macro_rules! p { ($($n:ident = $v:expr,)*) => ({Builder { $($n: $v.into(),)* ..Builder::default() }.build(Extra::default())}); } fn intercept(p: &Matcher, u: &str) -> Intercept { match p.intercept(&u.parse().unwrap()).unwrap() { Intercepted::Proxy(intercept) => intercept, #[cfg(unix)] Intercepted::Unix(path) => { unreachable!("should not intercept unix socket: {path:?}") } } } #[test] fn test_all_proxy() { let p = p! { all = "http://om.nom", }; assert_eq!("http://om.nom", intercept(&p, "http://example.com").uri()); assert_eq!("http://om.nom", intercept(&p, "https://example.com").uri()); } #[test] fn test_specific_overrides_all() { let p = p! { all = "http://no.pe", http = "http://y.ep", }; assert_eq!("http://no.pe", intercept(&p, "https://example.com").uri()); // the http rule is "more specific" than the all rule assert_eq!("http://y.ep", intercept(&p, "http://example.com").uri()); } #[test] fn test_parse_no_scheme_defaults_to_http() { let p = p! { https = "y.ep", http = "127.0.0.1:8887", }; assert_eq!(intercept(&p, "https://example.local").uri(), "http://y.ep"); assert_eq!( intercept(&p, "http://example.local").uri(), "http://127.0.0.1:8887" ); } #[test] fn test_parse_http_auth() { let p = p! { all = "http://Aladdin:opensesame@y.ep", }; let proxy = intercept(&p, "https://example.local"); assert_eq!(proxy.uri(), "http://y.ep"); assert_eq!( proxy.basic_auth().expect("basic_auth"), "Basic QWxhZGRpbjpvcGVuc2VzYW1l" ); } #[test] fn test_parse_http_auth_without_password() { let p = p! { all = "http://Aladdin@y.ep", }; let proxy = intercept(&p, "https://example.local"); assert_eq!(proxy.uri(), "http://y.ep"); assert_eq!( proxy.basic_auth().expect("basic_auth"), "Basic QWxhZGRpbjo=" ); } #[test] fn test_parse_http_auth_without_scheme() { let p = p! { all = "Aladdin:opensesame@y.ep", }; let proxy = intercept(&p, "https://example.local"); assert_eq!(proxy.uri(), "http://y.ep"); assert_eq!( proxy.basic_auth().expect("basic_auth"), "Basic QWxhZGRpbjpvcGVuc2VzYW1l" ); } #[test] fn test_dont_parse_http_when_is_cgi() { let mut builder = Matcher::builder(); builder.is_cgi = true; builder.http = "http://never.gonna.let.you.go".into(); let m = builder.build(Extra::default()); assert!(m.intercept(&"http://rick.roll".parse().unwrap()).is_none()); } fn test_parse_socks(uri: &str) { let p = p! { all = uri, }; let proxy = intercept(&p, "https://example.local"); assert_eq!(proxy.uri(), uri); } #[test] fn test_parse_socks4() { test_parse_socks("socks4://localhost:8887"); test_parse_socks("socks4a://localhost:8887"); } #[test] fn test_parse_socks5() { test_parse_socks("socks5://localhost:8887"); test_parse_socks("socks5h://localhost:8887"); } #[test] fn test_domain_matcher_case_insensitive() { let domains = vec![".foo.bar".into()]; let matcher = DomainMatcher(domains); assert!(matcher.contains("foo.bar")); assert!(matcher.contains("FOO.BAR")); assert!(matcher.contains("Foo.Bar")); assert!(matcher.contains("www.foo.bar")); assert!(matcher.contains("WWW.FOO.BAR")); assert!(matcher.contains("Www.Foo.Bar")); } #[test] fn test_no_proxy_case_insensitive() { let p = p! { all = "http://proxy.local", no = ".example.com", }; // should bypass proxy (case insensitive match) assert!( p.intercept(&"http://example.com".parse().unwrap()) .is_none() ); assert!( p.intercept(&"http://EXAMPLE.COM".parse().unwrap()) .is_none() ); assert!( p.intercept(&"http://Example.com".parse().unwrap()) .is_none() ); // subdomain should bypass proxy (case insensitive match) assert!( p.intercept(&"http://www.example.com".parse().unwrap()) .is_none() ); assert!( p.intercept(&"http://WWW.EXAMPLE.COM".parse().unwrap()) .is_none() ); assert!( p.intercept(&"http://Www.Example.Com".parse().unwrap()) .is_none() ); } } ================================================ FILE: src/proxy/uds.rs ================================================ use std::{ path::{Path, PathBuf}, sync::Arc, }; /// Trait for converting various types into a shared Unix Domain Socket path (`Arc`). /// /// This trait is sealed to allow future extension while controlling which types can implement it. /// It enables ergonomic conversion from common path types such as `String`, `&str`, `PathBuf`, /// `&Path`, and `Arc` into a unified `Arc` representation for Unix socket usage. /// /// # Supported types /// - `String` /// - `&str` /// - `PathBuf` /// - `&Path` /// - `Arc` pub trait IntoUnixSocket: sealed::Sealed { /// Returns the Unix Domain Socket path as an [`Arc`]. fn unix_socket(self) -> Arc; } impl IntoUnixSocket for String { fn unix_socket(self) -> Arc { Arc::from(PathBuf::from(self)) } } impl IntoUnixSocket for &'_ str { fn unix_socket(self) -> Arc { Arc::from(PathBuf::from(self)) } } impl IntoUnixSocket for &'_ Path { fn unix_socket(self) -> Arc { Arc::from(self) } } impl IntoUnixSocket for PathBuf { fn unix_socket(self) -> Arc { Arc::from(self) } } impl IntoUnixSocket for Arc { fn unix_socket(self) -> Arc { self } } mod sealed { use std::{ path::{Path, PathBuf}, sync::Arc, }; /// Sealed trait to prevent external implementations of `IntoUnixSocket`. pub trait Sealed {} impl Sealed for String {} impl Sealed for &'_ str {} impl Sealed for &'_ Path {} impl Sealed for PathBuf {} impl Sealed for Arc {} } ================================================ FILE: src/proxy/win.rs ================================================ pub(super) fn with_system(builder: &mut super::matcher::Builder) { let Ok(settings) = windows_registry::CURRENT_USER .open("Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings") else { return; }; if settings.get_u32("ProxyEnable").unwrap_or(0) == 0 { return; } if let Ok(val) = settings.get_string("ProxyServer") { if builder.http.is_empty() { builder.http = val.clone(); } if builder.https.is_empty() { builder.https = val; } } if builder.no.is_empty() { if let Ok(val) = settings.get_string("ProxyOverride") { builder.no = val .split(';') .map(|s| s.trim()) .collect::>() .join(",") .replace("*.", ""); } } } ================================================ FILE: src/proxy.rs ================================================ #[cfg(all(target_os = "macos", feature = "system-proxy"))] mod mac; #[cfg(unix)] mod uds; #[cfg(all(windows, feature = "system-proxy"))] mod win; pub(crate) mod matcher; use std::hash::{Hash, Hasher}; #[cfg(unix)] use std::{path::Path, sync::Arc}; use http::{HeaderMap, Uri, header::HeaderValue}; use crate::{IntoUri, ext::UriExt}; // # Internals // // This module is a couple pieces: // // - The public builder API // - The internal built types that our Connector knows how to use. // // The user creates a builder (`wreq::Proxy`), and configures any extras. // Once that type is passed to the `ClientBuilder`, we convert it into the // built matcher types, making use of `core`'s matchers. /// Configuration of a proxy that a `Client` should pass requests to. /// /// A `Proxy` has a couple pieces to it: /// /// - a URI of how to talk to the proxy /// - rules on what `Client` requests should be directed to the proxy /// /// For instance, let's look at `Proxy::http`: /// /// ```rust /// # fn run() -> Result<(), Box> { /// let proxy = wreq::Proxy::http("https://secure.example")?; /// # Ok(()) /// # } /// ``` /// /// This proxy will intercept all HTTP requests, and make use of the proxy /// at `https://secure.example`. A request to `http://hyper.rs` will talk /// to your proxy. A request to `https://hyper.rs` will not. /// /// Multiple `Proxy` rules can be configured for a `Client`. The `Client` will /// check each `Proxy` in the order it was added. This could mean that a /// `Proxy` added first with eager intercept rules, such as `Proxy::all`, /// would prevent a `Proxy` later in the list from ever working, so take care. /// /// By enabling the `"socks"` feature it is possible to use a socks proxy: /// ```rust /// # fn run() -> Result<(), Box> { /// let proxy = wreq::Proxy::http("socks5://192.168.1.1:9000")?; /// # Ok(()) /// # } /// ``` #[derive(Clone, Debug)] pub struct Proxy { extra: Extra, scheme: ProxyScheme, no_proxy: Option, } /// A configuration for filtering out requests that shouldn't be proxied #[derive(Clone, Debug, Default)] pub struct NoProxy { inner: String, } // ===== Internal ===== #[allow(clippy::large_enum_variant)] #[derive(Clone, PartialEq, Eq)] pub(crate) enum Intercepted { Proxy(matcher::Intercept), #[cfg(unix)] Unix(Arc), } #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub(crate) struct Matcher { inner: Box, } #[derive(Clone, Debug)] enum ProxyScheme { All(Uri), Http(Uri), Https(Uri), #[cfg(unix)] Unix(Arc), } #[derive(Debug, Clone, Default, PartialEq, Eq)] struct Extra { auth: Option, misc: Option, } // ===== impl Proxy ===== impl Proxy { /// Proxy all HTTP traffic to the passed URI. /// /// # Example /// /// ``` /// # extern crate wreq; /// # fn run() -> Result<(), Box> { /// let client = wreq::Client::builder() /// .proxy(wreq::Proxy::http("https://my.prox")?) /// .build()?; /// # Ok(()) /// # } /// # fn main() {} /// ``` pub fn http(uri: U) -> crate::Result { uri.into_uri().map(ProxyScheme::Http).map(Proxy::new) } /// Proxy all HTTPS traffic to the passed URI. /// /// # Example /// /// ``` /// # extern crate wreq; /// # fn run() -> Result<(), Box> { /// let client = wreq::Client::builder() /// .proxy(wreq::Proxy::https("https://example.prox:4545")?) /// .build()?; /// # Ok(()) /// # } /// # fn main() {} /// ``` pub fn https(uri: U) -> crate::Result { uri.into_uri().map(ProxyScheme::Https).map(Proxy::new) } /// Proxy **all** traffic to the passed URI. /// /// "All" refers to `https` and `http` URIs. Other schemes are not /// recognized by wreq. /// /// # Example /// /// ``` /// # extern crate wreq; /// # fn run() -> Result<(), Box> { /// let client = wreq::Client::builder() /// .proxy(wreq::Proxy::all("http://pro.xy")?) /// .build()?; /// # Ok(()) /// # } /// # fn main() {} /// ``` pub fn all(uri: U) -> crate::Result { uri.into_uri().map(ProxyScheme::All).map(Proxy::new) } /// Proxy all traffic to the passed Unix Domain Socket path. /// /// # Example /// /// ``` /// # extern crate wreq; /// # fn run() -> Result<(), Box> { /// let client = wreq::Client::builder() /// .proxy(wreq::Proxy::unix("/var/run/docker.sock")?) /// .build()?; /// # Ok(()) /// # } /// # fn main() {} /// ``` #[cfg(unix)] pub fn unix(unix: P) -> crate::Result { Ok(Proxy::new(ProxyScheme::Unix(unix.unix_socket()))) } fn new(scheme: ProxyScheme) -> Proxy { Proxy { extra: Extra { auth: None, misc: None, }, scheme, no_proxy: None, } } /// Set the `Proxy-Authorization` header using Basic auth. /// /// # Example /// /// ``` /// # extern crate wreq; /// # fn run() -> Result<(), Box> { /// let proxy = wreq::Proxy::https("http://localhost:1234")?.basic_auth("Aladdin", "open sesame"); /// # Ok(()) /// # } /// # fn main() {} /// ``` pub fn basic_auth(mut self, username: &str, password: &str) -> Proxy { match self.scheme { ProxyScheme::All(ref mut uri) | ProxyScheme::Http(ref mut uri) | ProxyScheme::Https(ref mut uri) => { let header = crate::util::basic_auth(username, Some(password)); uri.set_userinfo(username, Some(password)); self.extra.auth = Some(header); } #[cfg(unix)] ProxyScheme::Unix(_) => { // For Unix sockets, we don't set the auth header. // This is a no-op, but keeps the API consistent. } } self } /// Set the `Proxy-Authorization` header to a specified value. /// /// # Example /// /// ``` /// # extern crate wreq; /// # use wreq::header::*; /// # fn run() -> Result<(), Box> { /// let proxy = wreq::Proxy::https("http://localhost:1234")? /// .custom_http_auth(HeaderValue::from_static("justletmeinalreadyplease")); /// # Ok(()) /// # } /// # fn main() {} /// ``` pub fn custom_http_auth(mut self, header_value: HeaderValue) -> Proxy { self.extra.auth = Some(header_value); self } /// Adds a Custom Headers to Proxy /// Adds custom headers to this Proxy /// /// # Example /// ``` /// # extern crate wreq; /// # use wreq::header::*; /// # fn run() -> Result<(), Box> { /// let mut headers = HeaderMap::new(); /// headers.insert(USER_AGENT, "wreq".parse().unwrap()); /// let proxy = wreq::Proxy::https("http://localhost:1234")?.custom_http_headers(headers); /// # Ok(()) /// # } /// # fn main() {} /// ``` pub fn custom_http_headers(mut self, headers: HeaderMap) -> Proxy { match self.scheme { ProxyScheme::All(_) | ProxyScheme::Http(_) | ProxyScheme::Https(_) => { self.extra.misc = Some(headers); } #[cfg(unix)] ProxyScheme::Unix(_) => { // For Unix sockets, we don't set custom headers. // This is a no-op, but keeps the API consistent. } } self } /// Adds a `No Proxy` exclusion list to this Proxy /// /// # Example /// /// ``` /// # extern crate wreq; /// # fn run() -> Result<(), Box> { /// let proxy = wreq::Proxy::https("http://localhost:1234")? /// .no_proxy(wreq::NoProxy::from_string("direct.tld, sub.direct2.tld")); /// # Ok(()) /// # } /// # fn main() {} /// ``` pub fn no_proxy(mut self, no_proxy: Option) -> Proxy { self.no_proxy = no_proxy; self } pub(crate) fn into_matcher(self) -> Matcher { let Proxy { scheme, extra, no_proxy, } = self; let no_proxy = no_proxy.as_ref().map_or("", |n| n.inner.as_ref()); let inner = match scheme { ProxyScheme::All(uri) => matcher::Matcher::builder() .all(uri.to_string()) .no(no_proxy) .build(extra), ProxyScheme::Http(uri) => matcher::Matcher::builder() .http(uri.to_string()) .no(no_proxy) .build(extra), ProxyScheme::Https(uri) => matcher::Matcher::builder() .https(uri.to_string()) .no(no_proxy) .build(extra), #[cfg(unix)] ProxyScheme::Unix(unix) => matcher::Matcher::builder() .unix(unix) .no(no_proxy) .build(extra), }; Matcher { inner: Box::new(inner), } } } // ===== impl NoProxy ===== impl NoProxy { /// Returns a new no-proxy configuration based on environment variables (or `None` if no /// variables are set) see [self::NoProxy::from_string()] for the string format pub fn from_env() -> Option { let raw = std::env::var("NO_PROXY") .or_else(|_| std::env::var("no_proxy")) .ok()?; // Per the docs, this returns `None` if no environment variable is set. We can only reach // here if an env var is set, so we return `Some(NoProxy::default)` if `from_string` // returns None, which occurs with an empty string. Some(Self::from_string(&raw).unwrap_or_default()) } /// Returns a new no-proxy configuration based on a `no_proxy` string (or `None` if no variables /// are set) /// The rules are as follows: /// * The environment variable `NO_PROXY` is checked, if it is not set, `no_proxy` is checked /// * If neither environment variable is set, `None` is returned /// * Entries are expected to be comma-separated (whitespace between entries is ignored) /// * IP addresses (both IPv4 and IPv6) are allowed, as are optional subnet masks (by adding /// /size, for example "`192.168.1.0/24`"). /// * An entry "`*`" matches all hostnames (this is the only wildcard allowed) /// * Any other entry is considered a domain name (and may contain a leading dot, for example /// `google.com` and `.google.com` are equivalent) and would match both that domain AND all /// subdomains. /// /// For example, if `"NO_PROXY=google.com, 192.168.1.0/24"` was set, all the following would /// match (and therefore would bypass the proxy): /// * `http://google.com/` /// * `http://www.google.com/` /// * `http://192.168.1.42/` /// /// The URI `http://notgoogle.com/` would not match. pub fn from_string(no_proxy_list: &str) -> Option { Some(NoProxy { inner: no_proxy_list.into(), }) } } // ===== impl Matcher ===== impl Matcher { pub(crate) fn system() -> Self { Self { inner: Box::new(matcher::Matcher::from_system()), } } /// Intercept the given destination URI, returning the intercepted /// proxy configuration if there is a match. #[inline] pub(crate) fn intercept(&self, dst: &Uri) -> Option { self.inner.intercept(dst) } } // ===== impl Extra ===== impl Hash for Extra { fn hash(&self, state: &mut H) { self.auth.hash(state); if let Some(ref misc) = self.misc { for (k, v) in misc.iter() { k.as_str().hash(state); v.as_bytes().hash(state); } } else { 1u8.hash(state); } } } #[cfg(test)] mod tests { use super::*; fn uri(s: &str) -> Uri { s.parse().unwrap() } fn intercept(p: &Matcher, s: &Uri) -> matcher::Intercept { match p.intercept(s).unwrap() { Intercepted::Proxy(proxy) => proxy, #[cfg(unix)] _ => { unreachable!("intercepted_port should only be called with a Proxy matcher") } } } fn intercepted_uri(p: &Matcher, s: &str) -> Uri { match p.intercept(&s.parse().unwrap()).unwrap() { Intercepted::Proxy(proxy) => proxy.uri().clone(), #[cfg(unix)] _ => { unreachable!("intercepted_uri should only be called with a Proxy matcher") } } } #[test] fn test_http() { let target = "http://example.domain/"; let p = Proxy::http(target).unwrap().into_matcher(); let http = "http://hyper.rs"; let other = "https://hyper.rs"; assert_eq!(intercepted_uri(&p, http), target); assert!(p.intercept(&uri(other)).is_none()); } #[test] fn test_https() { let target = "http://example.domain/"; let p = Proxy::https(target).unwrap().into_matcher(); let http = "http://hyper.rs"; let other = "https://hyper.rs"; assert!(p.intercept(&uri(http)).is_none()); assert_eq!(intercepted_uri(&p, other), target); } #[test] fn test_all() { let target = "http://example.domain/"; let p = Proxy::all(target).unwrap().into_matcher(); let http = "http://hyper.rs"; let https = "https://hyper.rs"; // no longer supported //let other = "x-youve-never-heard-of-me-mr-proxy://hyper.rs"; assert_eq!(intercepted_uri(&p, http), target); assert_eq!(intercepted_uri(&p, https), target); //assert_eq!(intercepted_uri(&p, other), target); } #[test] fn test_standard_with_custom_auth_header() { let target = "http://example.domain/"; let p = Proxy::all(target) .unwrap() .custom_http_auth(http::HeaderValue::from_static("testme")) .into_matcher(); let got = intercept(&p, &uri("http://anywhere.local")); let auth = got.basic_auth().unwrap(); assert_eq!(auth, "testme"); } #[test] fn test_maybe_has_http_auth() { let uri = uri("http://example.domain/"); let m = Proxy::all("https://letme:in@yo.local") .unwrap() .into_matcher(); let got = intercept(&m, &uri); assert!(got.basic_auth().is_some(), "https forwards"); let m = Proxy::all("http://letme:in@yo.local") .unwrap() .into_matcher(); let got = intercept(&m, &uri); assert!(got.basic_auth().is_some(), "http forwards"); } #[test] fn test_maybe_has_http_custom_headers() { let uri = uri("http://example.domain/"); let mut headers = HeaderMap::new(); headers.insert("x-custom-header", HeaderValue::from_static("custom-value")); let m = Proxy::all("https://yo.local") .unwrap() .custom_http_headers(headers.clone()) .into_matcher(); match m.intercept(&uri).unwrap() { Intercepted::Proxy(proxy) => { let got_headers = proxy.custom_headers().unwrap(); assert_eq!(got_headers, &headers, "https forwards"); } #[cfg(unix)] _ => { unreachable!("Expected a Proxy Intercepted"); } } let m = Proxy::all("http://yo.local") .unwrap() .custom_http_headers(headers.clone()) .into_matcher(); match m.intercept(&uri).unwrap() { Intercepted::Proxy(proxy) => { let got_headers = proxy.custom_headers().unwrap(); assert_eq!(got_headers, &headers, "http forwards"); } #[cfg(unix)] _ => { unreachable!("Expected a Proxy Intercepted"); } } } fn test_socks_proxy_default_port(uri: &str, url2: &str, port: u16) { let m = Proxy::all(uri).unwrap().into_matcher(); let http = "http://hyper.rs"; let https = "https://hyper.rs"; assert_eq!(intercepted_uri(&m, http).port_u16(), Some(1080)); assert_eq!(intercepted_uri(&m, https).port_u16(), Some(1080)); // custom port let m = Proxy::all(url2).unwrap().into_matcher(); assert_eq!(intercepted_uri(&m, http).port_u16(), Some(port)); assert_eq!(intercepted_uri(&m, https).port_u16(), Some(port)); } #[test] fn test_socks4_proxy_default_port() { test_socks_proxy_default_port("socks4://example.com", "socks4://example.com:1234", 1234); test_socks_proxy_default_port("socks4a://example.com", "socks4a://example.com:1234", 1234); } #[test] fn test_socks5_proxy_default_port() { test_socks_proxy_default_port("socks5://example.com", "socks5://example.com:1234", 1234); test_socks_proxy_default_port("socks5h://example.com", "socks5h://example.com:1234", 1234); } } ================================================ FILE: src/redirect.rs ================================================ //! Redirect Handling //! //! By default, a `Client` does not follow HTTP redirects. To enable automatic //! redirect handling with a maximum redirect chain of 10 hops, use a [`Policy`] //! with [`ClientBuilder::redirect()`](crate::ClientBuilder::redirect). use std::{borrow::Cow, error::Error as StdError, fmt, sync::Arc}; use bytes::Bytes; use futures_util::FutureExt; use http::{HeaderMap, HeaderName, HeaderValue, StatusCode, Uri}; use crate::{ client::{Body, layer::redirect}, config::RequestConfig, error::{BoxError, Error}, ext::UriExt, header::{AUTHORIZATION, COOKIE, PROXY_AUTHORIZATION, REFERER, WWW_AUTHENTICATE}, }; /// A type that controls the policy on how to handle the following of redirects. /// /// The default value will catch redirect loops, and has a maximum of 10 /// redirects it will follow in a chain before returning an error. /// /// - `limited` can be used have the same as the default behavior, but adjust the allowed maximum /// redirect hops in a chain. /// - `none` can be used to disable all redirect behavior. /// - `custom` can be used to create a customized policy. #[derive(Debug, Clone)] pub struct Policy { inner: PolicyKind, } /// A type that holds information on the next request and previous requests /// in redirect chain. #[derive(Debug)] #[non_exhaustive] pub struct Attempt<'a, const PENDING: bool = true> { /// The status code of the redirect response. pub status: StatusCode, /// The headers of the redirect response. pub headers: Cow<'a, HeaderMap>, /// The URI to redirect to. pub uri: Cow<'a, Uri>, /// The list of previous URIs that have already been requested in this chain. pub previous: Cow<'a, [Uri]>, } /// An action to perform when a redirect status code is found. #[derive(Debug)] pub struct Action { inner: redirect::Action, } /// Redirect history information for a response. #[derive(Debug, Clone)] pub struct History(Vec); /// An entry in the redirect history. #[derive(Debug, Clone)] #[non_exhaustive] pub struct HistoryEntry { /// The status code of the redirect response. pub status: StatusCode, /// The URI of the redirect response. pub uri: Uri, /// The previous URI before the redirect response. pub previous: Uri, /// The headers of the redirect response. pub headers: HeaderMap, } #[derive(Clone)] enum PolicyKind { Custom(Arc Action + Send + Sync + 'static>), Limit(usize), None, } #[derive(Debug)] struct TooManyRedirects; /// A redirect policy handler for HTTP clients. /// /// [`FollowRedirectPolicy`] manages how HTTP redirects are handled by the client, /// including the maximum number of redirects, whether to set the `Referer` header, /// HTTPS-only enforcement, and redirect history tracking. /// /// This type is used internally by the client to implement redirect logic according to /// the configured [`Policy`]. It ensures that only allowed redirects are followed, /// sensitive headers are removed when crossing hosts, and the `Referer` header is set /// when appropriate. #[derive(Clone)] pub(crate) struct FollowRedirectPolicy { policy: RequestConfig, referer: bool, uris: Vec, https_only: bool, history: Option>, } // ===== impl Policy ===== impl Policy { /// Create a [`Policy`] with a maximum number of redirects. /// /// An [`Error`] will be returned if the max is reached. #[inline] pub fn limited(max: usize) -> Self { Self { inner: PolicyKind::Limit(max), } } /// Create a [`Policy`] that does not follow any redirect. #[inline] pub fn none() -> Self { Self { inner: PolicyKind::None, } } /// Create a custom [`Policy`] using the passed function. /// /// # Note /// /// The default [`Policy`] handles a maximum loop /// chain, but the custom variant does not do that for you automatically. /// The custom policy should have some way of handling those. /// /// Information on the next request and previous requests can be found /// on the [`Attempt`] argument passed to the closure. /// /// Actions can be conveniently created from methods on the /// [`Attempt`]. /// /// # Example /// /// ```rust /// # use wreq::{Error, redirect}; /// # /// # fn run() -> Result<(), Error> { /// let custom = redirect::Policy::custom(|attempt| { /// if attempt.previous.len() > 5 { /// attempt.error("too many redirects") /// } else if attempt.uri() == "example.domain" { /// // prevent redirects to 'example.domain' /// attempt.stop() /// } else { /// attempt.follow() /// } /// }); /// let client = wreq::Client::builder().redirect(custom).build()?; /// # Ok(()) /// # } /// ``` #[inline] pub fn custom(policy: T) -> Self where T: Fn(Attempt) -> Action + Send + Sync + 'static, { Self { inner: PolicyKind::Custom(Arc::new(policy)), } } /// Apply this policy to a given [`Attempt`] to produce a [`Action`]. /// /// # Note /// /// This method can be used together with [`Policy::custom()`] /// to construct one [`Policy`] that wraps another. /// /// # Example /// /// ```rust /// # use wreq::{Error, redirect}; /// # /// # fn run() -> Result<(), Error> { /// let custom = redirect::Policy::custom(|attempt| { /// eprintln!("{}, Location: {:?}", attempt.status(), attempt.uri()); /// redirect::Policy::default().redirect(attempt) /// }); /// # Ok(()) /// # } /// ``` pub fn redirect(&self, attempt: Attempt) -> Action { match self.inner { PolicyKind::Custom(ref custom) => custom(attempt), PolicyKind::Limit(max) => { // The first URI in the previous is the initial URI and not a redirection. It needs // to be excluded. if attempt.previous.len() > max { attempt.error(TooManyRedirects) } else { attempt.follow() } } PolicyKind::None => attempt.stop(), } } #[inline] fn check( &self, status: StatusCode, headers: &HeaderMap, next: &Uri, previous: &[Uri], ) -> redirect::Action { self.redirect(Attempt { status, headers: Cow::Borrowed(headers), uri: Cow::Borrowed(next), previous: Cow::Borrowed(previous), }) .inner } } impl Default for Policy { #[inline] fn default() -> Policy { // Keep `is_default` in sync Policy::limited(10) } } impl_request_config_value!(Policy); // ===== impl Attempt ===== impl Attempt<'_, PENDING> { /// Returns an action meaning wreq should follow the next URI. #[inline] pub fn follow(self) -> Action { Action { inner: redirect::Action::Follow, } } /// Returns an action meaning wreq should not follow the next URI. /// /// The 30x response will be returned as the `Ok` result. #[inline] pub fn stop(self) -> Action { Action { inner: redirect::Action::Stop, } } /// Returns an [`Action`] failing the redirect with an error. /// /// The [`Error`] will be returned for the result of the sent request. #[inline] pub fn error>(self, error: E) -> Action { Action { inner: redirect::Action::Error(error.into()), } } } impl Attempt<'_, true> { /// Returns an action meaning wreq should perform the redirect asynchronously. /// /// The provided async closure receives an owned [`Attempt<'static>`] and should /// return an [`Action`] to determine the final redirect behavior. /// /// # Example /// /// ```rust /// # use wreq::redirect; /// # /// let policy = redirect::Policy::custom(|attempt| { /// attempt.pending(|attempt| async move { /// // Perform some async operation /// if attempt.uri().host() == Some("trusted.domain") { /// attempt.follow() /// } else { /// attempt.stop() /// } /// }) /// }); /// ``` pub fn pending(self, func: F) -> Action where F: FnOnce(Attempt<'static, false>) -> Fut + Send + 'static, Fut: Future + Send + 'static, { let attempt = Attempt { status: self.status, headers: Cow::Owned(self.headers.into_owned()), uri: Cow::Owned(self.uri.into_owned()), previous: Cow::Owned(self.previous.into_owned()), }; let pending = Box::pin(func(attempt).map(|action| action.inner)); Action { inner: redirect::Action::Pending(pending), } } } // ===== impl History ===== impl IntoIterator for History { type Item = HistoryEntry; type IntoIter = std::vec::IntoIter; #[inline] fn into_iter(self) -> Self::IntoIter { self.0.into_iter() } } impl<'a> IntoIterator for &'a History { type Item = &'a HistoryEntry; type IntoIter = std::slice::Iter<'a, HistoryEntry>; #[inline] fn into_iter(self) -> Self::IntoIter { self.0.iter() } } // ===== impl PolicyKind ===== impl fmt::Debug for PolicyKind { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { PolicyKind::Custom(..) => f.pad("Custom"), PolicyKind::Limit(max) => f.debug_tuple("Limit").field(&max).finish(), PolicyKind::None => f.pad("None"), } } } // ===== impl TooManyRedirects ===== impl fmt::Display for TooManyRedirects { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str("too many redirects") } } impl StdError for TooManyRedirects {} // ===== impl FollowRedirectPolicy ===== impl FollowRedirectPolicy { /// Creates a new redirect policy handler with the given [`Policy`]. pub fn new(policy: Policy) -> Self { Self { policy: RequestConfig::new(Some(policy)), referer: false, uris: Vec::new(), https_only: false, history: None, } } /// Enables or disables automatic Referer header management. #[inline] pub fn with_referer(mut self, referer: bool) -> Self { self.referer = referer; self } /// Enables or disables HTTPS-only redirect enforcement. #[inline] pub fn with_https_only(mut self, https_only: bool) -> Self { self.https_only = https_only; self } } impl redirect::Policy for FollowRedirectPolicy { fn redirect(&mut self, attempt: redirect::Attempt<'_>) -> Result { // Parse the next URI from the attempt. let previous_uri = attempt.previous; let next_uri = attempt.location; // Push the previous URI to the list of URLs. self.uris.push(previous_uri.clone()); // Get policy from config let policy = self .policy .as_ref() .expect("[BUG] FollowRedirectPolicy should always have a policy set"); // Check if the next URI is already in the list of URLs. match policy.check(attempt.status, attempt.headers, next_uri, &self.uris) { redirect::Action::Follow => { // Validate the redirect URI scheme if !(next_uri.is_http() || next_uri.is_https()) { return Err(Error::uri_bad_scheme(next_uri.clone()).into()); } // Check HTTPS-only policy if self.https_only && !next_uri.is_https() { return Err(Error::redirect( Error::uri_bad_scheme(next_uri.clone()), next_uri.clone(), ) .into()); } // Record redirect history if !matches!(policy.inner, PolicyKind::None) { self.history.get_or_insert_default().push(HistoryEntry { status: attempt.status, uri: attempt.location.clone(), previous: attempt.previous.clone(), headers: attempt.headers.clone(), }); } Ok(redirect::Action::Follow) } redirect::Action::Stop => Ok(redirect::Action::Stop), redirect::Action::Pending(task) => Ok(redirect::Action::Pending(task)), redirect::Action::Error(err) => Err(Error::redirect(err, previous_uri.clone()).into()), } } fn follow_redirects(&mut self, request: &mut http::Request) -> bool { self.policy .load(request.extensions_mut()) .is_some_and(|policy| !matches!(policy.inner, PolicyKind::None)) } fn on_request(&mut self, req: &mut http::Request) { let next_url = req.uri().clone(); remove_sensitive_headers(req.headers_mut(), &next_url, &self.uris); if self.referer { if let Some(previous_url) = self.uris.last() { if let Some(v) = make_referer(next_url, previous_url) { req.headers_mut().insert(REFERER, v); } } } } fn on_response(&mut self, response: &mut http::Response) { if let Some(history) = self.history.take() { response.extensions_mut().insert(History(history)); } } #[inline] fn clone_body(&self, body: &Body) -> Option { body.try_clone() } } fn make_referer(next: Uri, previous: &Uri) -> Option { if next.is_http() && previous.is_https() { return None; } let mut referer = previous.clone(); referer.set_userinfo("", None); HeaderValue::from_maybe_shared(Bytes::from(referer.to_string())).ok() } fn remove_sensitive_headers(headers: &mut HeaderMap, next: &Uri, previous: &[Uri]) { if let Some(previous) = previous.last() { let cross_host = next.host() != previous.host() || next.port() != previous.port() || next.scheme() != previous.scheme(); if cross_host { /// Avoid dynamic allocation of `HeaderName` by using `from_static`. /// https://github.com/hyperium/http/blob/e9de46c9269f0a476b34a02a401212e20f639df2/src/header/map.rs#L3794 const COOKIE2: HeaderName = HeaderName::from_static("cookie2"); headers.remove(AUTHORIZATION); headers.remove(COOKIE); headers.remove(COOKIE2); headers.remove(PROXY_AUTHORIZATION); headers.remove(WWW_AUTHENTICATE); } } } #[cfg(test)] mod tests { use super::*; #[test] fn test_redirect_policy_limit() { let policy = Policy::default(); let next = Uri::try_from("http://x.y/z").unwrap(); let mut previous = (0..=9) .map(|i| Uri::try_from(&format!("http://a.b/c/{i}")).unwrap()) .collect::>(); match policy.check(StatusCode::FOUND, &HeaderMap::new(), &next, &previous) { redirect::Action::Follow => (), other => panic!("unexpected {other:?}"), } previous.push(Uri::try_from("http://a.b.d/e/33").unwrap()); match policy.check(StatusCode::FOUND, &HeaderMap::new(), &next, &previous) { redirect::Action::Error(err) if err.is::() => (), other => panic!("unexpected {other:?}"), } } #[test] fn test_redirect_policy_limit_to_0() { let policy = Policy::limited(0); let next = Uri::try_from("http://x.y/z").unwrap(); let previous = vec![Uri::try_from("http://a.b/c").unwrap()]; match policy.check(StatusCode::FOUND, &HeaderMap::new(), &next, &previous) { redirect::Action::Error(err) if err.is::() => (), other => panic!("unexpected {other:?}"), } } #[test] fn test_redirect_policy_custom() { let policy = Policy::custom(|attempt| { if attempt.uri.host() == Some("foo") { attempt.stop() } else { attempt.follow() } }); let next = Uri::try_from("http://bar/baz").unwrap(); match policy.check(StatusCode::FOUND, &HeaderMap::new(), &next, &[]) { redirect::Action::Follow => (), other => panic!("unexpected {other:?}"), } let next = Uri::try_from("http://foo/baz").unwrap(); match policy.check(StatusCode::FOUND, &HeaderMap::new(), &next, &[]) { redirect::Action::Stop => (), other => panic!("unexpected {other:?}"), } } #[test] fn test_remove_sensitive_headers() { use http::header::{ACCEPT, AUTHORIZATION, COOKIE, HeaderValue}; let mut headers = HeaderMap::new(); headers.insert(ACCEPT, HeaderValue::from_static("*/*")); headers.insert(AUTHORIZATION, HeaderValue::from_static("let me in")); headers.insert(COOKIE, HeaderValue::from_static("foo=bar")); let next = Uri::try_from("http://initial-domain.com/path").unwrap(); let mut prev = vec![Uri::try_from("http://initial-domain.com/new_path").unwrap()]; let mut filtered_headers = headers.clone(); remove_sensitive_headers(&mut headers, &next, &prev); assert_eq!(headers, filtered_headers); prev.push(Uri::try_from("http://new-domain.com/path").unwrap()); filtered_headers.remove(AUTHORIZATION); filtered_headers.remove(COOKIE); remove_sensitive_headers(&mut headers, &next, &prev); assert_eq!(headers, filtered_headers); } } ================================================ FILE: src/retry.rs ================================================ //! Retry requests //! //! A `Client` has the ability to retry requests, by sending additional copies //! to the server if a response is considered retryable. //! //! The [`Policy`] makes it easier to configure what requests to retry, along //! with including best practices by default, such as a retry budget. //! //! # Defaults //! //! The default retry behavior of a `Client` is to only retry requests where an //! error or low-level protocol NACK is encountered that is known to be safe to //! retry. Note however that providing a specific retry policy will override //! the default, and you will need to explicitly include that behavior. //! //! All policies default to including a retry budget that permits 20% extra //! requests to be sent. //! //! # Scoped //! //! A client's retry policy is scoped. That means that the policy doesn't //! apply to all requests, but only those within a user-defined scope. //! //! Since all policies include a budget by default, it doesn't make sense to //! apply it on _all_ requests. Rather, the retry history applied by a budget //! should likely only be applied to the same host. //! //! # Classifiers //! //! A retry policy needs to be configured with a classifier that determines //! if a request should be retried. Knowledge of the destination server's //! behavior is required to make a safe classifier. **Requests should not be //! retried** if the server cannot safely handle the same request twice, or if //! it causes side effects. //! //! Some common properties to check include if the request method is //! idempotent, or if the response status code indicates a transient error. use std::sync::Arc; use http::Request; use crate::{ Body, client::layer::retry::{Action, Classifier, ClassifyFn, ReqRep, ScopeFn, Scoped}, }; /// A retry policy. pub struct Policy { pub(crate) budget: Option, pub(crate) classifier: Classifier, pub(crate) max_retries_per_request: u32, pub(crate) scope: Scoped, } impl Policy { /// Create a retry policy that will never retry any request. /// /// This is useful for disabling the `Client`s default behavior of retrying /// protocol nacks. #[inline] pub fn never() -> Policy { Self::scoped(|_| false).no_budget() } /// Create a retry policy scoped to requests for a specific host. /// /// This is a convenience method that creates a retry policy which only applies /// to requests targeting the specified host. Requests to other hosts will not /// be retried under this policy. /// /// # Arguments /// * `host` - The hostname to match against request URIs (e.g., "api.example.com") /// /// # Example /// ```rust /// use wreq::retry::Policy; /// /// // Only retry requests to rust-lang.org /// let policy = Policy::for_host("rust-lang.org"); /// ``` #[inline] pub fn for_host(host: S) -> Policy where S: for<'a> PartialEq<&'a str> + Send + Sync + 'static, { Self::scoped(move |req| { req.uri() .host() .is_some_and(|request_host| host == request_host) }) } /// Create a scoped retry policy. /// /// For a more convenient constructor, see [`Policy::for_host()`]. #[inline] fn scoped(func: F) -> Policy where F: Fn(&Request) -> bool + Send + Sync + 'static, { Self { budget: Some(0.2), classifier: Classifier::Never, max_retries_per_request: 2, scope: Scoped::Dyn(Arc::new(ScopeFn(func))), } } /// Set no retry budget. /// /// Sets that no budget will be enforced. This could also be considered /// to be an infinite budget. /// /// This is NOT recommended. Disabling the budget can make your system more /// susceptible to retry storms. #[inline] pub fn no_budget(mut self) -> Self { self.budget = None; self } /// Sets the max extra load the budget will allow. /// /// Think of the amount of requests your client generates, and how much /// load that puts on the server. This option configures as a percentage /// how much extra load is allowed via retries. /// /// For example, if you send 1,000 requests per second, setting a maximum /// extra load value of `0.3` would allow 300 more requests per second /// in retries. A value of `2.5` would allow 2,500 more requests. /// /// # Panics /// /// The `extra_percent` value must be within reasonable values for a /// percentage. This method will panic if it is less than `0.0`, or greater /// than `1000.0`. #[inline] pub fn max_extra_load(mut self, extra_percent: f32) -> Self { assert!(extra_percent >= 0.0); assert!(extra_percent <= 1000.0); self.budget = Some(extra_percent); self } /// Set the max retries allowed per request. /// /// For each logical (initial) request, only retry up to `max` times. /// /// This value is used in combination with a token budget that is applied /// to all requests. Even if the budget would allow more requests, this /// limit will prevent. Likewise, the budget may prevent retrying up to /// `max` times. This setting prevents a single request from consuming /// the entire budget. /// /// Default is currently 2 retries. #[inline] pub fn max_retries_per_request(mut self, max: u32) -> Self { self.max_retries_per_request = max; self } /// Provide a classifier to determine if a request should be retried. /// /// # Example /// /// ```rust /// # fn with_policy(policy: wreq::retry::Policy) -> wreq::retry::Policy { /// policy.classify_fn(|req_rep| { /// match (req_rep.method(), req_rep.status()) { /// (&http::Method::GET, Some(http::StatusCode::SERVICE_UNAVAILABLE)) => { /// req_rep.retryable() /// }, /// _ => req_rep.success() /// } /// }) /// # } /// ``` #[inline] pub fn classify_fn(mut self, func: F) -> Self where F: Fn(ReqRep<'_>) -> Action + Send + Sync + 'static, { self.classifier = Classifier::Dyn(Arc::new(ClassifyFn(func))); self } } impl Default for Policy { fn default() -> Self { Self { budget: None, classifier: Classifier::ProtocolNacks, max_retries_per_request: 2, scope: Scoped::Unscoped, } } } ================================================ FILE: src/sync.rs ================================================ //! Synchronization primitives: [`Mutex`] and [`RwLock`] that never poison. //! //! These types expose APIs identical to [`std::sync::Mutex`] and [`std::sync::RwLock`], //! but **do not return** [`std::sync::PoisonError`] even if a thread panics while holding the lock. //! //! This is useful in high-availability systems where panic recovery is done externally, //! or poisoning is not meaningful in context. //! //! ## Implementation //! - When the `parking_lot` feature is enabled, it uses [`parking_lot::Mutex`] and //! [`parking_lot::RwLock`]. //! - Otherwise, it wraps [`std::sync::Mutex`] and [`std::sync::RwLock`], using `.unwrap_or_else(|e| //! e.into_inner())` to silently recover from poisoning. #[cfg(feature = "parking_lot")] pub use parking_lot::*; #[cfg(not(feature = "parking_lot"))] pub use self::std::*; #[cfg(not(feature = "parking_lot"))] mod std { use std::{ ops::{Deref, DerefMut}, sync, }; /// A [`Mutex`] that never poisons and has the same interface as [`std::sync::Mutex`]. /// /// See [`crate::sync`] for more details. #[derive(Debug)] pub struct Mutex(sync::Mutex); impl Mutex { /// Like [`std::sync::Mutex::new`]. #[inline] pub fn new(t: T) -> Mutex { Mutex(sync::Mutex::new(t)) } } impl Mutex { /// Like [`std::sync::Mutex::lock`]. #[inline] pub fn lock<'a>(&'a self) -> MutexGuard<'a, T> { MutexGuard(self.0.lock().unwrap_or_else(|e| e.into_inner())) } } /// Like [`std::sync::MutexGuard`]. #[must_use] pub struct MutexGuard<'a, T: ?Sized + 'a>(sync::MutexGuard<'a, T>); impl<'a, T: ?Sized> Deref for MutexGuard<'a, T> { type Target = T; #[inline] fn deref(&self) -> &T { self.0.deref() } } impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T> { #[inline] fn deref_mut(&mut self) -> &mut T { self.0.deref_mut() } } impl Default for Mutex { fn default() -> Self { Mutex(Default::default()) } } /// A [`RwLock`] that never poisons and has the same interface as [`std::sync::RwLock`]. /// /// See [`crate::sync`] for more details. #[derive(Debug, Default)] pub struct RwLock(sync::RwLock); impl RwLock { /// Like [`std::sync::RwLock::read`]. #[inline] pub fn read<'a>(&'a self) -> RwLockReadGuard<'a, T> { RwLockReadGuard(self.0.read().unwrap_or_else(|e| e.into_inner())) } /// Like [`std::sync::RwLock::write`]. #[inline] pub fn write<'a>(&'a self) -> RwLockWriteGuard<'a, T> { RwLockWriteGuard(self.0.write().unwrap_or_else(|e| e.into_inner())) } } /// Like [`std::sync::RwLockReadGuard`]. #[must_use] pub struct RwLockReadGuard<'a, T: ?Sized + 'a>(sync::RwLockReadGuard<'a, T>); impl<'a, T: ?Sized> Deref for RwLockReadGuard<'a, T> { type Target = T; #[inline] fn deref(&self) -> &T { self.0.deref() } } /// Like [`std::sync::RwLockWriteGuard`]. #[must_use] pub struct RwLockWriteGuard<'a, T: ?Sized + 'a>(sync::RwLockWriteGuard<'a, T>); impl<'a, T: ?Sized> Deref for RwLockWriteGuard<'a, T> { type Target = T; #[inline] fn deref(&self) -> &T { self.0.deref() } } impl<'a, T: ?Sized> DerefMut for RwLockWriteGuard<'a, T> { #[inline] fn deref_mut(&mut self) -> &mut T { self.0.deref_mut() } } } ================================================ FILE: src/tls/compress.rs ================================================ //! TLS certificate compression [RFC 8879](https://datatracker.ietf.org/doc/html/rfc8879). //! //! Reduces handshake latency by compressing certificate chains. //! Supports Zlib, Brotli, and Zstd algorithms to minimize bytes-on-wire //! and fit within the initial congestion window. use std::{fmt::Debug, io}; use btls::{ error::ErrorStack, ssl::{self, SslConnectorBuilder}, }; use btls_sys as ffi; // Re-export the `CertificateCompressionAlgorithm` enum for users of this module. pub use ssl::CertificateCompressionAlgorithm; /// Certificate compression or decompression. /// /// Wraps a function pointer or closure that processes certificate data. #[allow(clippy::type_complexity)] pub enum Codec { /// Function pointer. Pointer(fn(&[u8], &mut dyn io::Write) -> io::Result<()>), /// Closure or function object. Dynamic(Box io::Result<()> + Send + Sync>), } /// Trait for TLS certificate compression implementations. /// /// Provides methods for compressing and decompressing certificate data, /// as well as identifying the algorithm in use. /// /// See [RFC 8879, §3](https://www.rfc-editor.org/rfc/rfc8879.html#name-compression-algorithms) /// for the list of IANA-assigned compression algorithm identifiers. pub trait CertificateCompressor: Debug + Sync + Send + 'static { /// Returns the [`Codec`] used to compress certificate chains for this algorithm. fn compress(&self) -> Codec; /// Returns the [`Codec`] used to decompress certificate chains for this algorithm. fn decompress(&self) -> Codec; /// Returns the IANA-assigned identifier of the compression algorithm. fn algorithm(&self) -> CertificateCompressionAlgorithm; } struct Compressor { compress: Codec, decompress: Codec, } // ===== impl Codec ===== impl Codec { #[inline] fn call(&self, input: &[u8], output: &mut dyn io::Write) -> io::Result<()> { match self { Codec::Pointer(func) => func(input, output), Codec::Dynamic(closure) => closure(input, output), } } } // ===== impl Compressor ===== impl ssl::CertificateCompressor for Compressor { const ALGORITHM: CertificateCompressionAlgorithm = match ALGORITHM { ffi::TLSEXT_cert_compression_zlib => CertificateCompressionAlgorithm::ZLIB, ffi::TLSEXT_cert_compression_brotli => CertificateCompressionAlgorithm::BROTLI, ffi::TLSEXT_cert_compression_zstd => CertificateCompressionAlgorithm::ZSTD, _ => unreachable!(), }; const CAN_COMPRESS: bool = true; const CAN_DECOMPRESS: bool = true; #[inline] fn compress(&self, input: &[u8], output: &mut W) -> io::Result<()> where W: io::Write, { self.compress.call(input, output) } #[inline] fn decompress(&self, input: &[u8], output: &mut W) -> io::Result<()> where W: io::Write, { self.decompress.call(input, output) } } /// Register a certificate compressor with the given [`SslConnectorBuilder`]. pub(super) fn register( compressor: &dyn CertificateCompressor, builder: &mut SslConnectorBuilder, ) -> Result<(), ErrorStack> { match compressor.algorithm() { CertificateCompressionAlgorithm::ZLIB => { builder.add_certificate_compression_algorithm(Compressor::< { ffi::TLSEXT_cert_compression_zlib }, > { compress: compressor.compress(), decompress: compressor.decompress(), }) } CertificateCompressionAlgorithm::BROTLI => { builder.add_certificate_compression_algorithm(Compressor::< { ffi::TLSEXT_cert_compression_brotli }, > { compress: compressor.compress(), decompress: compressor.decompress(), }) } CertificateCompressionAlgorithm::ZSTD => { builder.add_certificate_compression_algorithm(Compressor::< { ffi::TLSEXT_cert_compression_zstd }, > { compress: compressor.compress(), decompress: compressor.decompress(), }) } _ => unreachable!(), } } ================================================ FILE: src/tls/conn/ext.rs ================================================ use std::borrow::Cow; use btls::ssl::{SslConnectorBuilder, SslVerifyMode}; use crate::{ Error, tls::{ compress::{self, CertificateCompressor}, trust::CertStore, }, }; /// SslConnectorBuilderExt trait for `SslConnectorBuilder`. pub trait SslConnectorBuilderExt { /// Configure the CertStore for the given `SslConnectorBuilder`. fn set_cert_store(self, store: Option<&CertStore>) -> crate::Result; /// Configure the certificate verification for the given `SslConnectorBuilder`. fn set_cert_verification(self, enable: bool) -> crate::Result; /// Configure the certificate compressors for the given `SslConnectorBuilder`. fn set_cert_compressors( self, compressors: Option<&Cow<'static, [&'static dyn CertificateCompressor]>>, ) -> crate::Result; } impl SslConnectorBuilderExt for SslConnectorBuilder { #[inline] fn set_cert_store(mut self, store: Option<&CertStore>) -> crate::Result { if let Some(store) = store { store.add_to_tls(&mut self); } else { self.set_default_verify_paths().map_err(Error::tls)?; } Ok(self) } #[inline] fn set_cert_verification(mut self, enable: bool) -> crate::Result { if enable { self.set_verify(SslVerifyMode::PEER); } else { self.set_verify(SslVerifyMode::NONE); } Ok(self) } #[inline] fn set_cert_compressors( mut self, compressors: Option<&Cow<'static, [&'static dyn CertificateCompressor]>>, ) -> crate::Result { if let Some(compressors) = compressors { for compressor in compressors.as_ref() { compress::register(*compressor, &mut self).map_err(Error::tls)?; } } Ok(self) } } ================================================ FILE: src/tls/conn/macros.rs ================================================ macro_rules! set_bool { ($cfg:expr, $field:ident, $conn:expr, $setter:ident) => { if $cfg.$field { $conn.$setter(); } }; ($cfg:expr, !$field:ident, $conn:expr, $setter:ident, $arg:expr) => { if !$cfg.$field { $conn.$setter($arg); } }; } macro_rules! set_option { ($cfg:expr, $field:ident, $conn:expr, $setter:ident) => { if let Some(val) = $cfg.$field { $conn.$setter(val); } }; } macro_rules! set_option_ref_try { ($cfg:expr, $field:ident, $conn:expr, $setter:ident) => { if let Some(val) = $cfg.$field.as_ref() { $conn.$setter(val).map_err(Error::tls)?; } }; } macro_rules! set_option_inner_try { ($field:ident, $conn:expr, $setter:ident) => { $conn.$setter($field.map(|v| v.0)).map_err(Error::tls)?; }; } ================================================ FILE: src/tls/conn/service.rs ================================================ use std::{ fmt::Debug, future::Future, pin::Pin, task::{Context, Poll}, }; use http::{Uri, uri::Scheme}; use tokio::io::{AsyncRead, AsyncWrite}; use tokio_btls::SslStream; use tower::{BoxError, Service}; use super::{EstablishedConn, HttpsConnector, MaybeHttpsStream}; use crate::{ client::{Connection, ConnectionDescriptor}, ext::UriExt, }; type BoxFuture = Pin> + Send>>; async fn perform_handshake(ssl: btls::ssl::Ssl, conn: T) -> Result, BoxError> where T: AsyncRead + AsyncWrite + Unpin, { let mut stream = SslStream::new(ssl, conn)?; Pin::new(&mut stream).connect().await?; Ok(MaybeHttpsStream::Https(stream)) } impl Service for HttpsConnector where S: Service + Send, S::Error: Into, S::Future: Unpin + Send + 'static, T: AsyncRead + AsyncWrite + Connection + Unpin + Debug + Sync + Send + 'static, { type Response = MaybeHttpsStream; type Error = BoxError; type Future = BoxFuture; #[inline] fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.http.poll_ready(cx).map_err(Into::into) } fn call(&mut self, uri: Uri) -> Self::Future { let connect = self.http.call(uri.clone()); let tls = self.tls.clone(); let f = async move { let conn = connect.await.map_err(Into::into)?; // Early return if it is not a tls scheme if uri.scheme() != Some(&Scheme::HTTPS) { return Ok(MaybeHttpsStream::Http(conn)); } let ssl = tls.setup_ssl(uri)?; perform_handshake(ssl, conn).await }; Box::pin(f) } } impl Service for HttpsConnector where S: Service + Send, S::Error: Into, S::Future: Unpin + Send + 'static, T: AsyncRead + AsyncWrite + Connection + Unpin + Debug + Sync + Send + 'static, { type Response = MaybeHttpsStream; type Error = BoxError; type Future = BoxFuture; #[inline] fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.http.poll_ready(cx).map_err(Into::into) } fn call(&mut self, descriptor: ConnectionDescriptor) -> Self::Future { let uri = descriptor.uri().clone(); let connect = self.http.call(uri.clone()); let tls = self.tls.clone(); let f = async move { let conn = connect.await.map_err(Into::into)?; // Early return if it is not a tls scheme if uri.is_http() { return Ok(MaybeHttpsStream::Http(conn)); } let ssl = tls.setup_ssl2(descriptor)?; perform_handshake(ssl, conn).await }; Box::pin(f) } } impl Service> for HttpsConnector where S: Service + Send + Clone + 'static, S::Error: Into, S::Future: Unpin + Send + 'static, T: AsyncRead + AsyncWrite + Connection + Unpin + Debug + Sync + Send + 'static, IO: AsyncRead + AsyncWrite + Unpin + Send + Sync + Debug + 'static, { type Response = MaybeHttpsStream; type Error = BoxError; type Future = BoxFuture; #[inline] fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.http.poll_ready(cx).map_err(Into::into) } fn call(&mut self, conn: EstablishedConn) -> Self::Future { let tls = self.tls.clone(); let fut = async move { // Early return if it is not a tls scheme if conn.descriptor.uri().is_http() { return Ok(MaybeHttpsStream::Http(conn.io)); } let ssl = tls.setup_ssl2(conn.descriptor)?; perform_handshake(ssl, conn.io).await }; Box::pin(fut) } } ================================================ FILE: src/tls/conn.rs ================================================ //! SSL support via BoringSSL. #[macro_use] mod macros; mod ext; mod service; use std::{ borrow::Cow, fmt::{self, Debug}, io, pin::Pin, sync::{Arc, LazyLock}, task::{Context, Poll}, }; use btls::{ error::ErrorStack, ex_data::Index, ssl::{Ssl, SslConnector, SslMethod, SslOptions, SslSessionCacheMode}, }; use http::{Uri, Version}; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; use tokio_btls::SslStream; use tower::{BoxError, Service}; use crate::{ Error, client::{Connected, Connection, ConnectionDescriptor}, tls::{ AlpnProtocol, AlpsProtocol, KeyShare, TlsOptions, TlsVersion, conn::ext::SslConnectorBuilderExt, keylog::KeyLog, session::{Key, LruTlsSessionCache, TlsSession, TlsSessionCache}, trust::{CertStore, Identity}, }, }; fn key_index() -> Result, ErrorStack> { static IDX: LazyLock, ErrorStack>> = LazyLock::new(Ssl::new_ex_index); IDX.clone() } /// Settings for [`TlsConnector`] #[derive(Clone)] pub struct HandshakeSettings { no_ticket: bool, enable_ech_grease: bool, verify_hostname: bool, tls_sni: bool, alpn_protocols: Option>, alps_protocols: Option>, alps_use_new_codepoint: bool, key_shares: Option>, random_aes_hw_override: bool, } /// A Connector using BoringSSL to support `http` and `https` schemes. #[derive(Clone)] pub struct HttpsConnector { http: T, tls: TlsConnector, } /// A builder for creating a `TlsConnector`. pub struct TlsConnectorBuilder { alpn_protocol: Option, max_version: Option, min_version: Option, tls_sni: bool, verify_hostname: bool, identity: Option, cert_store: Option, cert_verification: bool, keylog: Option, session_cache: Arc, } /// A layer which wraps services in an `SslConnector`. #[derive(Clone)] pub struct TlsConnector { ssl: SslConnector, cache: Option>, settings: HandshakeSettings, } // ===== impl HttpsConnector ===== impl HttpsConnector where S: Service + Send, S::Error: Into, S::Future: Unpin + Send + 'static, T: AsyncRead + AsyncWrite + Connection + Unpin + Debug + Sync + Send + 'static, { /// Creates a new [`HttpsConnector`] with a given [`TlsConnector`]. #[inline] pub fn new(http: S, tls: TlsConnector) -> HttpsConnector { HttpsConnector { http, tls } } /// Disables ALPN negotiation. #[inline] pub fn no_alpn(&mut self) -> &mut Self { self.tls.settings.alpn_protocols = None; self } } // ===== impl TlsConnector ===== impl TlsConnector { /// Creates a new [`TlsConnectorBuilder`] with the given configuration. pub fn builder() -> TlsConnectorBuilder { TlsConnectorBuilder { alpn_protocol: None, min_version: None, max_version: None, identity: None, tls_sni: true, verify_hostname: true, cert_store: None, cert_verification: true, keylog: None, session_cache: Arc::new(LruTlsSessionCache::new(8)), } } fn setup_ssl(&self, uri: Uri) -> Result { let cfg = self.ssl.configure()?; let host = uri.host().ok_or("URI missing host")?; let host = Self::normalize_host(host); let ssl = cfg.into_ssl(host)?; Ok(ssl) } fn setup_ssl2(&self, descriptor: ConnectionDescriptor) -> Result { let mut cfg = self.ssl.configure()?; // Use server name indication cfg.set_use_server_name_indication(self.settings.tls_sni); // Verify hostname cfg.set_verify_hostname(self.settings.verify_hostname); // Set ECH grease cfg.set_enable_ech_grease(self.settings.enable_ech_grease); // Set random AES hardware override if self.settings.random_aes_hw_override { let random = (crate::util::fast_random() & 1) == 0; cfg.set_aes_hw_override(random); } // Set ALPN protocols if let Some(version) = descriptor.version() { match version { Version::HTTP_11 | Version::HTTP_10 | Version::HTTP_09 => { cfg.set_alpn_protos(&AlpnProtocol::HTTP1.encode())?; } Version::HTTP_2 => { cfg.set_alpn_protos(&AlpnProtocol::HTTP2.encode())?; } // No ALPN protocol for other versions _ => {} } } else { // Default use the connector configuration. if let Some(ref alpn_values) = self.settings.alpn_protocols { let encoded = AlpnProtocol::encode_sequence(alpn_values.as_ref()); cfg.set_alpn_protos(&encoded)?; } } // Set ALPS protos if let Some(ref alps_values) = self.settings.alps_protocols { for alps in alps_values.iter() { cfg.add_application_settings(alps.0)?; } // By default, the new endpoint is used. if !alps_values.is_empty() { cfg.set_alps_use_new_codepoint(self.settings.alps_use_new_codepoint); } } // Set TLS key shares if let Some(ref key_shares) = self.settings.key_shares { cfg.set_client_key_shares(key_shares.as_ref())?; } let uri = descriptor.uri().clone(); let host = uri.host().ok_or("URI missing host")?; let host = Self::normalize_host(host); if let Some(ref cache) = self.cache { let key = Key(descriptor.id()); // If the session cache is enabled, we try to retrieve the session // associated with the key. If it exists, we set it in the SSL configuration. if let Some(session) = cache.pop(&key) { #[allow(unsafe_code)] unsafe { cfg.set_session(&session.0) }?; if self.settings.no_ticket { cfg.set_options(SslOptions::NO_TICKET); } } let idx = key_index()?; cfg.set_ex_data(idx, key); } Ok(cfg.into_ssl(host)?) } /// If `host` is an IPv6 address, we must strip away the square brackets that surround /// it (otherwise, boring will fail to parse the host as an IP address, eventually /// causing the handshake to fail due a hostname verification error). fn normalize_host(host: &str) -> &str { if host.is_empty() { return host; } let last = host.len() - 1; let mut chars = host.chars(); if let (Some('['), Some(']')) = (chars.next(), chars.last()) { if host[1..last].parse::().is_ok() { return &host[1..last]; } } host } } // ====== impl TlsConnectorBuilder ===== impl TlsConnectorBuilder { /// Sets the alpn protocol to be used. #[inline] pub fn alpn_protocol(mut self, protocol: Option) -> Self { self.alpn_protocol = protocol; self } /// Sets the TLS keylog policy. #[inline] pub fn keylog(mut self, keylog: Option) -> Self { self.keylog = keylog; self } /// Sets the identity to be used for client certificate authentication. #[inline] pub fn identity(mut self, identity: Option) -> Self { self.identity = identity; self } /// Sets the certificate store used for TLS verification. #[inline] pub fn cert_store(mut self, cert_store: T) -> Self where T: Into>, { self.cert_store = cert_store.into(); self } /// Sets the certificate verification flag. #[inline] pub fn cert_verification(mut self, enabled: bool) -> Self { self.cert_verification = enabled; self } /// Sets the minimum TLS version to use. #[inline] pub fn min_version(mut self, version: T) -> Self where T: Into>, { self.min_version = version.into(); self } /// Sets the maximum TLS version to use. #[inline] pub fn max_version(mut self, version: T) -> Self where T: Into>, { self.max_version = version.into(); self } /// Sets the Server Name Indication (SNI) flag. #[inline] pub fn tls_sni(mut self, enabled: bool) -> Self { self.tls_sni = enabled; self } /// Sets the hostname verification flag. #[inline] pub fn verify_hostname(mut self, enabled: bool) -> Self { self.verify_hostname = enabled; self } /// Sets a custom TLS session store. /// /// By default, a [`LruSessionStore`] is used. Use this method to provide /// a custom [`TlsSessionCache`] implementation (e.g., file-based or distributed). #[inline] pub fn session_store(mut self, store: Option>) -> Self { if let Some(store) = store { self.session_cache = store; } self } /// Build the `TlsConnector` with the provided configuration. pub fn build<'a, T>(&self, opts: T) -> crate::Result where T: Into>, { let opts = opts.into(); // Replace the default configuration with the provided one let max_tls_version = opts.max_tls_version.or(self.max_version); let min_tls_version = opts.min_tls_version.or(self.min_version); let alpn_protocols = self .alpn_protocol .map(|proto| Cow::Owned(vec![proto])) .or_else(|| opts.alpn_protocols.clone()); // Create the SslConnector with the provided options let mut connector = SslConnector::bare_builder(SslMethod::tls()) .map_err(Error::tls)? .set_cert_store(self.cert_store.as_ref())? .set_cert_verification(self.cert_verification)? .set_cert_compressors(opts.certificate_compressors.as_ref())?; // Set Identity if let Some(ref identity) = self.identity { identity.add_to_tls(&mut connector)?; } // Set minimum TLS version set_option_inner_try!(min_tls_version, connector, set_min_proto_version); // Set maximum TLS version set_option_inner_try!(max_tls_version, connector, set_max_proto_version); // Set OCSP stapling set_bool!(opts, enable_ocsp_stapling, connector, enable_ocsp_stapling); // Set Signed Certificate Timestamps (SCT) set_bool!( opts, enable_signed_cert_timestamps, connector, enable_signed_cert_timestamps ); // Set TLS Session ticket options set_bool!( opts, !session_ticket, connector, set_options, SslOptions::NO_TICKET ); // Set TLS PSK DHE key exchange options set_bool!( opts, !psk_dhe_ke, connector, set_options, SslOptions::NO_PSK_DHE_KE ); // Set TLS No Renegotiation options set_bool!( opts, !renegotiation, connector, set_options, SslOptions::NO_RENEGOTIATION ); // Set TLS grease options set_option!(opts, grease_enabled, connector, set_grease_enabled); // Set TLS permute extensions options set_option!(opts, permute_extensions, connector, set_permute_extensions); // Set TLS curves list set_option_ref_try!(opts, curves_list, connector, set_curves_list); // Set TLS signature algorithms list set_option_ref_try!(opts, sigalgs_list, connector, set_sigalgs_list); // Set TLS prreserve TLS 1.3 cipher list order set_option!( opts, preserve_tls13_cipher_list, connector, set_preserve_tls13_cipher_list ); // Set TLS cipher list set_option_ref_try!(opts, cipher_list, connector, set_cipher_list); // Set TLS delegated credentials set_option_ref_try!( opts, delegated_credentials, connector, set_delegated_credentials ); // Set TLS record size limit set_option!(opts, record_size_limit, connector, set_record_size_limit); // Set TLS aes hardware override set_option!(opts, aes_hw_override, connector, set_aes_hw_override); // Set TLS extension permutation if let Some(ref extension_permutation) = opts.extension_permutation { connector .set_extension_permutation(extension_permutation) .map_err(Error::tls)?; } // Set TLS keylog handler. if let Some(ref policy) = self.keylog { let handle = policy.clone().handle().map_err(Error::tls)?; connector.set_keylog_callback(move |_, line| { handle.write(line); }); } // Create the handshake settings with the default session cache capacity. let settings = HandshakeSettings { tls_sni: self.tls_sni, verify_hostname: self.verify_hostname, no_ticket: opts.psk_skip_session_ticket, alpn_protocols, alps_protocols: opts.alps_protocols.clone(), alps_use_new_codepoint: opts.alps_use_new_codepoint, enable_ech_grease: opts.enable_ech_grease, key_shares: opts.key_shares.clone(), random_aes_hw_override: opts.random_aes_hw_override, }; // If the session cache is disabled, we don't need to set up any callbacks. let cache = opts.pre_shared_key.then(|| { let session_cache = self.session_cache.clone(); connector.set_session_cache_mode(SslSessionCacheMode::CLIENT); connector.set_new_session_callback({ let cache = session_cache.clone(); move |ssl, session| { if let Ok(Some(key)) = key_index().map(|idx| ssl.ex_data(idx)) { cache.put(key.clone(), TlsSession(session)); } } }); session_cache }); Ok(TlsConnector { ssl: connector.build(), cache, settings, }) } } /// A stream which may be wrapped with TLS. pub enum MaybeHttpsStream { /// A raw HTTP stream. Http(T), /// An SSL-wrapped HTTP stream. Https(SslStream), } /// A connection that has been established with a TLS handshake. pub struct EstablishedConn { io: IO, descriptor: ConnectionDescriptor, } // ===== impl MaybeHttpsStream ===== impl AsRef for MaybeHttpsStream { #[inline] fn as_ref(&self) -> &T { match self { MaybeHttpsStream::Http(s) => s, MaybeHttpsStream::Https(s) => s.get_ref(), } } } impl fmt::Debug for MaybeHttpsStream { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { MaybeHttpsStream::Http(..) => f.pad("Http(..)"), MaybeHttpsStream::Https(..) => f.pad("Https(..)"), } } } impl Connection for MaybeHttpsStream where T: Connection, { fn connected(&self) -> Connected { match self { MaybeHttpsStream::Http(s) => s.connected(), MaybeHttpsStream::Https(s) => { let mut connected = s.get_ref().connected(); if s.ssl().selected_alpn_protocol() == Some(b"h2") { connected = connected.negotiated_h2(); } connected } } } } impl AsyncRead for MaybeHttpsStream where T: AsyncRead + AsyncWrite + Unpin, { #[inline] fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll> { match self.as_mut().get_mut() { MaybeHttpsStream::Http(inner) => Pin::new(inner).poll_read(cx, buf), MaybeHttpsStream::Https(inner) => Pin::new(inner).poll_read(cx, buf), } } } impl AsyncWrite for MaybeHttpsStream where T: AsyncRead + AsyncWrite + Unpin, { #[inline] fn poll_write( mut self: Pin<&mut Self>, ctx: &mut Context<'_>, buf: &[u8], ) -> Poll> { match self.as_mut().get_mut() { MaybeHttpsStream::Http(inner) => Pin::new(inner).poll_write(ctx, buf), MaybeHttpsStream::Https(inner) => Pin::new(inner).poll_write(ctx, buf), } } #[inline] fn poll_flush(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll> { match self.as_mut().get_mut() { MaybeHttpsStream::Http(inner) => Pin::new(inner).poll_flush(ctx), MaybeHttpsStream::Https(inner) => Pin::new(inner).poll_flush(ctx), } } #[inline] fn poll_shutdown(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll> { match self.as_mut().get_mut() { MaybeHttpsStream::Http(inner) => Pin::new(inner).poll_shutdown(ctx), MaybeHttpsStream::Https(inner) => Pin::new(inner).poll_shutdown(ctx), } } #[inline] fn is_write_vectored(&self) -> bool { match self { MaybeHttpsStream::Http(inner) => inner.is_write_vectored(), MaybeHttpsStream::Https(inner) => inner.is_write_vectored(), } } #[inline] fn poll_write_vectored( self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[io::IoSlice<'_>], ) -> Poll> { match self.get_mut() { MaybeHttpsStream::Http(inner) => Pin::new(inner).poll_write_vectored(cx, bufs), MaybeHttpsStream::Https(inner) => Pin::new(inner).poll_write_vectored(cx, bufs), } } } // ===== impl EstablishedConn ===== impl EstablishedConn { /// Creates a new [`EstablishedConn`]. #[inline] pub fn new(io: IO, descriptor: ConnectionDescriptor) -> EstablishedConn { EstablishedConn { io, descriptor } } } ================================================ FILE: src/tls/keylog/handle.rs ================================================ use std::{ fs::OpenOptions, io::{Result, Write}, path::Path, sync::{ Arc, mpsc::{self, Sender}, }, }; /// Handle for writing to a key log file. #[derive(Debug, Clone)] pub struct Handle { #[allow(unused)] filepath: Arc, sender: Sender, } impl Handle { /// Create a new [`Handle`] with the specified path and sender. pub fn new(filepath: Arc) -> Result { if let Some(parent) = filepath.parent() { std::fs::create_dir_all(parent)?; } let mut file = OpenOptions::new() .create(true) .append(true) .open(&filepath)?; let (sender, receiver) = mpsc::channel::(); let _path_name = filepath.clone(); std::thread::spawn(move || { trace!( file = ?_path_name, "Handle: receiver task up and running", ); while let Ok(line) = receiver.recv() { if let Err(_err) = file.write_all(line.as_bytes()) { error!( file = ?_path_name, error = %_err, "Handle: failed to write file", ); } } }); Ok(Handle { filepath, sender }) } /// Write a line to the keylogger. pub fn write(&self, line: &str) { let line = format!("{line}\n"); if let Err(_err) = self.sender.send(line) { error!( file = ?self.filepath, error = %_err, "Handle: failed to send log line for writing", ); } } } ================================================ FILE: src/tls/keylog.rs ================================================ //! TLS Key Log Management //! //! This module provides utilities for managing TLS key logging, allowing session keys to be //! written to a file for debugging or analysis (e.g., with Wireshark). //! //! The [`KeyLog`] enum lets you control key log behavior, either by respecting the //! `SSLKEYLOGFILE` environment variable or by specifying a custom file path. Handles are cached //! globally to avoid duplicate file access. mod handle; use std::{ borrow::Cow, collections::{HashMap, hash_map::Entry}, io::{Error, ErrorKind, Result}, path::{Component, Path, PathBuf}, sync::{Arc, OnceLock}, }; use handle::Handle; use crate::sync::RwLock; /// Specifies the intent for a (TLS) keylogger. #[derive(Debug, Clone)] pub struct KeyLog(Option>); impl KeyLog { /// Creates a [`KeyLog`] based on the `SSLKEYLOGFILE` environment variable. pub fn from_env() -> KeyLog { match std::env::var("SSLKEYLOGFILE") { Ok(ref s) if !s.trim().is_empty() => { KeyLog(Some(Arc::from(normalize_path(Path::new(s))))) } _ => KeyLog(None), } } /// Creates a [`KeyLog`] that writes to the specified file path. pub fn from_file>(path: P) -> KeyLog { KeyLog(Some(Arc::from(normalize_path(path.as_ref())))) } /// Creates a new key log file [`Handle`] based on the policy. pub(crate) fn handle(self) -> Result { static GLOBAL_KEYLOG_CACHE: OnceLock, Handle>>> = OnceLock::new(); let path = self .0 .ok_or_else(|| Error::new(ErrorKind::NotFound, "KeyLog: file path is not specified"))?; let cache = GLOBAL_KEYLOG_CACHE.get_or_init(RwLock::default); if let Some(handle) = cache.read().get(path.as_ref()).cloned() { return Ok(handle); } match cache.write().entry(path.clone()) { Entry::Occupied(entry) => Ok(entry.get().clone()), Entry::Vacant(entry) => { let handle = Handle::new(path)?; entry.insert(handle.clone()); Ok(handle) } } } } fn normalize_path<'a, P>(path: P) -> PathBuf where P: Into>, { let path = path.into(); let mut components = path.components().peekable(); let mut ret = if let Some(c @ Component::Prefix(..)) = components.peek().cloned() { components.next(); PathBuf::from(c.as_os_str()) } else { PathBuf::new() }; for component in components { match component { Component::Prefix(..) => unreachable!(), Component::RootDir => { ret.push(component.as_os_str()); } Component::CurDir => {} Component::ParentDir => { ret.pop(); } Component::Normal(c) => { ret.push(c); } } } ret } ================================================ FILE: src/tls/session.rs ================================================ //! TLS session caching and resumption. //! //! Handshakes are expensive. This module lets you reuse sessions to save //! CPU cycles and reduce latency. //! //! By default, we use an in-memory LRU cache, but you can plug in your own //! implementation if you're running at scale or need to share sessions //! across multiple instances. use std::{ borrow::Borrow, collections::{HashMap, hash_map::Entry}, hash::{Hash, Hasher}, num::NonZeroUsize, sync::Arc, }; use btls::ssl::{SslSession, SslVersion}; use lru::LruCache; use crate::{client::ConnectionId, sync::Mutex, tls::TlsVersion}; /// An opaque key identifying a TLS session cache entry. #[derive(Clone, PartialEq, Eq, Hash)] pub struct Key(pub(super) ConnectionId); /// A TLS session that can be stored and retrieved from a session cache. #[derive(Clone)] pub struct TlsSession(pub(super) SslSession); /// A trait for cache storing and retrieving TLS sessions. /// /// # TLS 1.3 Session Handling /// /// For TLS 1.3 sessions, implementations **should** remove the session after /// retrieval to comply with [RFC 8446 Appendix C.4](https://tools.ietf.org/html/rfc8446#appendix-C.4), /// which requires that session tickets are used at most once to prevent /// concurrent handshakes from reusing the same session. pub trait TlsSessionCache: Send + Sync { /// Store a TLS session associated with the given key. fn put(&self, key: Key, session: TlsSession); /// Retrieve a TLS session for the given key. /// /// For TLS 1.3, the session should be removed from the cache upon retrieval /// to ensure single-use semantics (see [RFC 8446 Appendix C.4]). fn pop(&self, key: &Key) -> Option; } impl_into_shared!( /// Trait for converting types into a shared [`TlsSessionCache`]. /// /// This allows accepting bare types, `Arc`, or `Arc`. pub trait IntoTlsSessionCache => TlsSessionCache ); /// The default two-level LRU session cache. /// /// Maintains both forward (key → sessions) and reverse (session → key) lookups /// for efficient session storage, retrieval, and cleanup operations. /// /// This is the built-in implementation of [`TlsSessionCache`] used when no /// custom session store is configured. pub struct LruTlsSessionCache { inner: Mutex, per_host_session_capacity: usize, } struct Inner { reverse: HashMap, per_host_sessions: HashMap>, } // ===== impl TlsSession ===== impl TlsSession { /// Returns the TLS session ID. #[inline] pub fn id(&self) -> &[u8] { self.0.id() } /// Returns the time at which the session was established, in seconds since the Unix epoch. #[inline] pub fn time(&self) -> u64 { self.0.time() } /// Returns the sessions timeout, in seconds. /// /// A session older than this time should not be used for session resumption. #[inline] pub fn timeout(&self) -> u32 { self.0.timeout() } /// Returns the TLS protocol version negotiated for this session. #[inline] pub fn protocol_version(&self) -> TlsVersion { let version = self.0.protocol_version(); if version == SslVersion::SSL3 { // SSLv3 (SSL 3.0) is obsolete and insecure, and is not supported by btls. // This branch should never be reached in normal operation. If it is, // it indicates a bug or an unsupported/legacy OpenSSL configuration. unreachable!( "Encountered unsupported protocol: SSLv3 (SSL 3.0) is obsolete and not accepted by btls" ); } TlsVersion(version) } } impl Eq for TlsSession {} impl PartialEq for TlsSession { #[inline] fn eq(&self, other: &TlsSession) -> bool { self.0.id() == other.0.id() } } impl Hash for TlsSession { #[inline] fn hash(&self, state: &mut H) { self.0.id().hash(state); } } impl Borrow<[u8]> for TlsSession { #[inline] fn borrow(&self) -> &[u8] { self.0.id() } } // ===== impl LruTlsSessionCache ===== impl LruTlsSessionCache { /// Creates a new [`LruTlsSessionCache`] with the given per-host capacity. pub fn new(per_host_session_capacity: usize) -> Self { LruTlsSessionCache { inner: Mutex::new(Inner { reverse: HashMap::new(), per_host_sessions: HashMap::new(), }), per_host_session_capacity, } } } impl TlsSessionCache for LruTlsSessionCache { fn put(&self, key: Key, session: TlsSession) { let mut inner = self.inner.lock(); let evicted = { let per_host_sessions = inner .per_host_sessions .entry(key.clone()) .or_insert_with(|| { NonZeroUsize::new(self.per_host_session_capacity) .map_or_else(LruCache::unbounded, LruCache::new) }); // Enforce per-key capacity limit by evicting the least recently used session let evicted = if per_host_sessions.len() >= self.per_host_session_capacity { per_host_sessions.pop_lru().map(|(s, _)| s) } else { None }; per_host_sessions.put(session.clone(), ()); evicted }; if let Some(evicted_session) = evicted { inner.reverse.remove(&evicted_session); } inner.reverse.insert(session, key); } fn pop(&self, key: &Key) -> Option { let mut inner = self.inner.lock(); let session = { let per_host_sessions = inner.per_host_sessions.get_mut(key)?; per_host_sessions.peek_lru()?.0.clone() }; // https://tools.ietf.org/html/rfc8446#appendix-C.4 // OpenSSL will remove the session from its cache after the handshake completes anyway, but // this ensures that concurrent handshakes don't end up with the same session. if session.protocol_version() == TlsVersion::TLS_1_3 { if let Some(key) = inner.reverse.remove(&session) { if let Entry::Occupied(mut entry) = inner.per_host_sessions.entry(key) { entry.get_mut().pop(&session); if entry.get().is_empty() { entry.remove(); } } } } Some(session) } } ================================================ FILE: src/tls/trust/identity.rs ================================================ use btls::{ pkcs12::Pkcs12, pkey::{PKey, Private}, x509::X509, }; use crate::Error; /// Represents a private key and X509 cert as a client certificate. #[derive(Debug, Clone)] pub struct Identity { pkey: PKey, cert: X509, chain: Vec, } impl Identity { /// Parses a DER-formatted PKCS #12 archive, using the specified password to decrypt the key. /// /// The archive should contain a leaf certificate and its private key, as well any intermediate /// certificates that allow clients to build a chain to a trusted root. /// The chain certificates should be in order from the leaf certificate towards the root. /// /// PKCS #12 archives typically have the file extension `.p12` or `.pfx`, and can be created /// with the OpenSSL `pkcs12` tool: /// /// ```bash /// openssl pkcs12 -export -out identity.pfx -inkey key.pem -in cert.pem -certfile chain_certs.pem /// ``` /// /// # Examples /// /// ``` /// # use std::fs::File; /// # use std::io::Read; /// # fn pkcs12() -> Result<(), Box> { /// let mut buf = Vec::new(); /// File::open("my-ident.pfx")?.read_to_end(&mut buf)?; /// let pkcs12 = wreq::Identity::from_pkcs12_der(&buf, "my-privkey-password")?; /// # drop(pkcs12); /// # Ok(()) /// # } /// ``` pub fn from_pkcs12_der(buf: &[u8], pass: &str) -> crate::Result { let pkcs12 = Pkcs12::from_der(buf).map_err(Error::tls)?; let parsed = pkcs12.parse(pass).map_err(Error::tls)?; Ok(Identity { pkey: parsed.pkey, cert: parsed.cert, // > The stack is the reverse of what you might expect due to the way // > PKCS12_parse is implemented, so we need to load it backwards. // > https://github.com/sfackler/rust-native-tls/commit/05fb5e583be589ab63d9f83d986d095639f8ec44 chain: parsed.chain.into_iter().flatten().rev().collect(), }) } /// Parses a chain of PEM encoded X509 certificates, with the leaf certificate first. /// `key` is a PEM encoded PKCS #8 formatted private key for the leaf certificate. /// /// The certificate chain should contain any intermediate certificates that should be sent to /// clients to allow them to build a chain to a trusted root. /// /// A certificate chain here means a series of PEM encoded certificates concatenated together. /// /// # Examples /// /// ``` /// # use std::fs; /// # fn pkcs8() -> Result<(), Box> { /// let cert = fs::read("client.pem")?; /// let key = fs::read("key.pem")?; /// let pkcs8 = wreq::Identity::from_pkcs8_pem(&cert, &key)?; /// # drop(pkcs8); /// # Ok(()) /// # } /// ``` pub fn from_pkcs8_pem(buf: &[u8], key: &[u8]) -> crate::Result { if !key.starts_with(b"-----BEGIN PRIVATE KEY-----") { return Err(Error::builder("expected PKCS#8 PEM")); } let pkey = PKey::private_key_from_pem(key).map_err(Error::tls)?; let mut cert_chain = X509::stack_from_pem(buf).map_err(Error::tls)?.into_iter(); let cert = cert_chain.next().ok_or_else(|| { Error::builder("at least one certificate must be provided to create an identity") })?; let chain = cert_chain.collect(); Ok(Identity { pkey, cert, chain }) } pub(crate) fn add_to_tls( &self, connector: &mut btls::ssl::SslConnectorBuilder, ) -> crate::Result<()> { connector.set_certificate(&self.cert).map_err(Error::tls)?; connector.set_private_key(&self.pkey).map_err(Error::tls)?; for cert in self.chain.iter() { // https://www.openssl.org/docs/manmaster/man3/SSL_CTX_add_extra_chain_cert.html // specifies that "When sending a certificate chain, extra chain certificates are // sent in order following the end entity certificate." connector .add_extra_chain_cert(cert.clone()) .map_err(Error::tls)?; } Ok(()) } } #[cfg(test)] mod test { use super::Identity; #[test] fn identity_from_pkcs12_der_invalid() { Identity::from_pkcs12_der(b"not der", "nope").unwrap_err(); } #[test] fn identity_from_pkcs8_pem_invalid() { Identity::from_pkcs8_pem(b"not pem", b"not key").unwrap_err(); } } ================================================ FILE: src/tls/trust/parse.rs ================================================ use btls::x509::store::{X509Store, X509StoreBuilder}; use super::{Certificate, CertificateInput}; use crate::{Error, Result}; pub fn parse_certs<'c, I>( certs: I, parser: fn(&'c [u8]) -> crate::Result, ) -> Result where I: IntoIterator, I::Item: Into>, { let mut store = X509StoreBuilder::new().map_err(Error::tls)?; let certs = filter_map_certs(certs, parser); process_certs(certs.into_iter(), &mut store)?; Ok(store.build()) } pub fn parse_certs_with_stack(certs: C, parse: F) -> Result where C: AsRef<[u8]>, F: Fn(C) -> Result>, { let mut store = X509StoreBuilder::new().map_err(Error::tls)?; let certs = parse(certs)?; process_certs(certs.into_iter(), &mut store)?; Ok(store.build()) } pub fn process_certs(iter: I, store: &mut X509StoreBuilder) -> Result<()> where I: Iterator, { let mut valid_count = 0; let mut invalid_count = 0; for cert in iter { if let Err(_err) = store.add_cert(cert.0) { invalid_count += 1; warn!("tls failed to parse certificate: {:?}", _err); } else { valid_count += 1; } } if valid_count == 0 && invalid_count > 0 { return Err(Error::builder("invalid certificate")); } Ok(()) } pub fn filter_map_certs<'c, I>( certs: I, parser: fn(&'c [u8]) -> Result, ) -> impl Iterator where I: IntoIterator, I::Item: Into>, { certs .into_iter() .map(Into::into) .filter_map(move |data| match data.with_parser(parser) { Ok(cert) => Some(cert), Err(_err) => { warn!("tls failed to parse certificate: {:?}", _err); None } }) } ================================================ FILE: src/tls/trust/store.rs ================================================ use std::sync::Arc; use btls::{ ssl::SslConnectorBuilder, x509::store::{X509Store, X509StoreBuilder}, }; use super::{ Certificate, CertificateInput, parse::{filter_map_certs, parse_certs, parse_certs_with_stack, process_certs}, }; use crate::{Error, Result}; /// A builder for constructing a `CertStore`. pub struct CertStoreBuilder { builder: Result, } // ====== impl CertStoreBuilder ====== impl CertStoreBuilder { /// Adds a DER-encoded certificate to the certificate store. #[inline] pub fn add_der_cert<'c, C>(self, cert: C) -> Self where C: Into>, { self.parse_cert(cert, Certificate::from_der) } /// Adds a PEM-encoded certificate to the certificate store. #[inline] pub fn add_pem_cert<'c, C>(self, cert: C) -> Self where C: Into>, { self.parse_cert(cert, Certificate::from_pem) } /// Adds multiple DER-encoded certificates to the certificate store. #[inline] pub fn add_der_certs<'c, I>(self, certs: I) -> Self where I: IntoIterator, I::Item: Into>, { self.parse_certs(certs, Certificate::from_der) } /// Adds multiple PEM-encoded certificates to the certificate store. #[inline] pub fn add_pem_certs<'c, I>(self, certs: I) -> Self where I: IntoIterator, I::Item: Into>, { self.parse_certs(certs, Certificate::from_pem) } /// Adds a PEM-encoded certificate stack to the certificate store. pub fn add_stack_pem_certs(mut self, certs: C) -> Self where C: AsRef<[u8]>, { if let Ok(ref mut builder) = self.builder { let result = Certificate::stack_from_pem(certs.as_ref()) .and_then(|certs| process_certs(certs.into_iter(), builder)); if let Err(err) = result { self.builder = Err(err); } } self } /// Load certificates from their default locations. /// /// These locations are read from the `SSL_CERT_FILE` and `SSL_CERT_DIR` /// environment variables if present, or defaults specified at OpenSSL /// build time otherwise. pub fn set_default_paths(mut self) -> Self { if let Ok(ref mut builder) = self.builder { if let Err(err) = builder.set_default_paths() { self.builder = Err(Error::tls(err)); } } self } /// Constructs the `CertStore`. /// /// This method finalizes the builder and constructs the `CertStore` /// containing all the added certificates. #[inline] pub fn build(self) -> Result { self.builder .map(X509StoreBuilder::build) .map(Arc::new) .map(CertStore) } } impl CertStoreBuilder { fn parse_cert<'c, C, P>(mut self, cert: C, parser: P) -> Self where C: Into>, P: Fn(&'c [u8]) -> Result, { if let Ok(ref mut builder) = self.builder { let input = cert.into(); let result = input .with_parser(parser) .and_then(|cert| builder.add_cert(cert.0).map_err(Error::tls)); if let Err(err) = result { self.builder = Err(err); } } self } fn parse_certs<'c, I>(mut self, certs: I, parser: fn(&'c [u8]) -> Result) -> Self where I: IntoIterator, I::Item: Into>, { if let Ok(ref mut builder) = self.builder { let certs = filter_map_certs(certs, parser); if let Err(err) = process_certs(certs, builder) { self.builder = Err(err); } } self } } /// A thread-safe certificate store for TLS connections. /// /// [`CertStore`] manages a collection of trusted certificates used for verifying peer identities. /// It is designed to be shared and reused across requests and connections, similar to `Client`. /// /// Internally, [`CertStore`] uses an [`Arc`] for reference counting, so you do **not** need to wrap /// it in an additional [`Rc`] or [`Arc`] for sharing between threads or tasks. /// /// To configure a [`CertStore`], use [`CertStore::builder()`]. You can also construct it from DER /// or PEM certificates, or load system defaults. /// /// [`Rc`]: std::rc::Rc /// [`Arc`]: std::sync::Arc #[derive(Clone)] pub struct CertStore(Arc); // ====== impl CertStore ====== impl CertStore { /// Creates a new `CertStoreBuilder`. #[inline] pub fn builder() -> CertStoreBuilder { CertStoreBuilder { builder: X509StoreBuilder::new().map_err(Error::builder), } } /// Creates a new `CertStore` from a collection of DER-encoded certificates. #[inline] pub fn from_der_certs<'c, C>(certs: C) -> Result where C: IntoIterator, C::Item: Into>, { parse_certs(certs, Certificate::from_der) .map(Arc::new) .map(CertStore) } /// Creates a new `CertStore` from a collection of PEM-encoded certificates. #[inline] pub fn from_pem_certs<'c, C>(certs: C) -> Result where C: IntoIterator, C::Item: Into>, { parse_certs(certs, Certificate::from_pem) .map(Arc::new) .map(CertStore) } /// Creates a new `CertStore` from a PEM-encoded certificate stack. #[inline] pub fn from_pem_stack(certs: C) -> Result where C: AsRef<[u8]>, { parse_certs_with_stack(certs, Certificate::stack_from_pem) .map(Arc::new) .map(CertStore) } } impl CertStore { #[inline] pub(crate) fn add_to_tls(&self, tls: &mut SslConnectorBuilder) { tls.set_cert_store_ref(&self.0); } } impl Default for CertStore { fn default() -> Self { #[cfg(feature = "webpki-roots")] static LOAD_CERTS: std::sync::LazyLock = std::sync::LazyLock::new(|| { CertStore::builder() .add_der_certs(webpki_root_certs::TLS_SERVER_ROOT_CERTS) .build() .expect("failed to load default cert store") }); #[cfg(not(feature = "webpki-roots"))] { CertStore::builder() .set_default_paths() .build() .expect("failed to load default cert store") } #[cfg(feature = "webpki-roots")] LOAD_CERTS.clone() } } ================================================ FILE: src/tls/trust.rs ================================================ //! TLS Trust and Identity management. //! //! Handles server certificate verification, mTLS identity, and CA //! bundle management. Provides DER/PEM parsing for BoringSSL and //! supports both system and custom trust stores. mod identity; mod parse; mod store; use btls::x509::X509; pub use self::{ identity::Identity, store::{CertStore, CertStoreBuilder}, }; use crate::Error; /// A certificate input. pub enum CertificateInput<'c> { /// Raw DER or PEM data. Raw(&'c [u8]), /// An already parsed certificate. Parsed(Certificate), } impl<'a> CertificateInput<'a> { pub(crate) fn with_parser(self, parser: F) -> crate::Result where F: Fn(&'a [u8]) -> crate::Result, { match self { CertificateInput::Raw(data) => parser(data), CertificateInput::Parsed(cert) => Ok(cert), } } } impl From for CertificateInput<'_> { fn from(cert: Certificate) -> Self { CertificateInput::Parsed(cert) } } impl<'c, T: AsRef<[u8]> + ?Sized + 'c> From<&'c T> for CertificateInput<'c> { fn from(value: &'c T) -> CertificateInput<'c> { CertificateInput::Raw(value.as_ref()) } } /// A certificate. #[derive(Clone)] pub struct Certificate(X509); impl Certificate { /// Parse a certificate from DER data. #[inline] pub fn from_der>(cert: C) -> crate::Result { X509::from_der(cert.as_ref()).map(Self).map_err(Error::tls) } /// Parse a certificate from PEM data. #[inline] pub fn from_pem>(cert: C) -> crate::Result { X509::from_pem(cert.as_ref()).map(Self).map_err(Error::tls) } /// Parse a stack of certificates from DER data. #[inline] pub fn stack_from_pem>(cert: C) -> crate::Result> { let certs = X509::stack_from_pem(cert.as_ref()).map_err(Error::tls)?; Ok(certs.into_iter().map(Self).collect()) } } ================================================ FILE: src/tls.rs ================================================ //! TLS options configuration //! //! - Various parts of TLS can also be configured or even disabled on the `ClientBuilder`. pub(crate) mod conn; pub mod compress; pub mod keylog; pub mod session; pub mod trust; use std::borrow::Cow; use ::bytes::{BufMut, Bytes, BytesMut}; /// Re-exports of TLS-related types from `btls` for public use. pub use btls::ssl::{ExtensionType, KeyShare}; use self::compress::CertificateCompressor; /// Http extension carrying extra TLS layer information. /// Made available to clients on responses when `tls_info` is set. #[derive(Debug, Clone)] pub struct TlsInfo { pub(crate) peer_certificate: Option, pub(crate) peer_certificate_chain: Option>, } impl TlsInfo { /// Get the DER encoded leaf certificate of the peer. pub fn peer_certificate(&self) -> Option<&[u8]> { self.peer_certificate.as_deref() } /// Get the DER encoded certificate chain of the peer. /// /// This includes the leaf certificate on the client side. pub fn peer_certificate_chain(&self) -> Option> { self.peer_certificate_chain .as_ref() .map(|v| v.iter().map(|b| b.as_ref())) } } /// A TLS protocol version. #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] pub struct TlsVersion(btls::ssl::SslVersion); impl TlsVersion { /// Version 1.0 of the TLS protocol. pub const TLS_1_0: TlsVersion = TlsVersion(btls::ssl::SslVersion::TLS1); /// Version 1.1 of the TLS protocol. pub const TLS_1_1: TlsVersion = TlsVersion(btls::ssl::SslVersion::TLS1_1); /// Version 1.2 of the TLS protocol. pub const TLS_1_2: TlsVersion = TlsVersion(btls::ssl::SslVersion::TLS1_2); /// Version 1.3 of the TLS protocol. pub const TLS_1_3: TlsVersion = TlsVersion(btls::ssl::SslVersion::TLS1_3); } /// A TLS ALPN protocol. #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] pub struct AlpnProtocol(&'static [u8]); impl AlpnProtocol { /// Prefer HTTP/1.1 pub const HTTP1: AlpnProtocol = AlpnProtocol(b"http/1.1"); /// Prefer HTTP/2 pub const HTTP2: AlpnProtocol = AlpnProtocol(b"h2"); /// Prefer HTTP/3 pub const HTTP3: AlpnProtocol = AlpnProtocol(b"h3"); /// Create a new [`AlpnProtocol`] from a static byte slice. #[inline] pub const fn new(value: &'static [u8]) -> Self { AlpnProtocol(value) } #[inline] fn encode(self) -> Bytes { Self::encode_sequence(std::iter::once(&self)) } fn encode_sequence<'a, I>(items: I) -> Bytes where I: IntoIterator, { let mut buf = BytesMut::new(); for item in items { buf.put_u8(item.0.len() as u8); buf.extend_from_slice(item.0); } buf.freeze() } } /// A TLS ALPS protocol. #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] pub struct AlpsProtocol(&'static [u8]); impl AlpsProtocol { /// Prefer HTTP/1.1 pub const HTTP1: AlpsProtocol = AlpsProtocol(b"http/1.1"); /// Prefer HTTP/2 pub const HTTP2: AlpsProtocol = AlpsProtocol(b"h2"); /// Prefer HTTP/3 pub const HTTP3: AlpsProtocol = AlpsProtocol(b"h3"); } /// Builder for `[`TlsOptions`]`. #[must_use] #[derive(Debug, Clone)] pub struct TlsOptionsBuilder { config: TlsOptions, } /// TLS connection configuration options. /// /// This struct provides fine-grained control over the behavior of TLS /// connections, including: /// - **Protocol negotiation** (ALPN, ALPS, TLS versions) /// - **Session management** (tickets, PSK, key shares) /// - **Security & privacy** (OCSP, GREASE, ECH, delegated credentials) /// - **Performance tuning** (record size, cipher preferences, hardware overrides) /// /// All fields are optional or have defaults. See each field for details. #[non_exhaustive] #[derive(Debug, Clone)] pub struct TlsOptions { /// Application-Layer Protocol Negotiation ([RFC 7301](https://datatracker.ietf.org/doc/html/rfc7301)). /// /// Specifies which application protocols (e.g., HTTP/2, HTTP/1.1) may be negotiated /// over a single TLS connection. /// /// **Default:** `Some([HTTP/2, HTTP/1.1])` pub alpn_protocols: Option>, /// Application-Layer Protocol Settings (ALPS). /// /// Enables exchanging application-layer settings during the handshake /// for protocols negotiated via ALPN. /// /// **Default:** `None` pub alps_protocols: Option>, /// Whether to use an alternative ALPS codepoint for compatibility. /// /// Useful when larger ALPS payloads are required. /// /// **Default:** `false` pub alps_use_new_codepoint: bool, /// Enables TLS Session Tickets ([RFC 5077](https://tools.ietf.org/html/rfc5077)). /// /// Allows session resumption without requiring server-side state. /// /// **Default:** `true` pub session_ticket: bool, /// Minimum TLS version allowed for the connection. /// /// **Default:** `None` (library default applied) pub min_tls_version: Option, /// Maximum TLS version allowed for the connection. /// /// **Default:** `None` (library default applied) pub max_tls_version: Option, /// Enables Pre-Shared Key (PSK) cipher suites ([RFC 4279](https://datatracker.ietf.org/doc/html/rfc4279)). /// /// Authentication relies on out-of-band pre-shared keys instead of certificates. /// /// **Default:** `false` pub pre_shared_key: bool, /// Controls whether to send a GREASE Encrypted ClientHello (ECH) extension /// when no supported ECH configuration is available. /// /// GREASE prevents protocol ossification by sending unknown extensions. /// /// **Default:** `false` pub enable_ech_grease: bool, /// Controls whether ClientHello extensions should be permuted. /// /// **Default:** `None` (implementation default) pub permute_extensions: Option, /// Controls whether GREASE extensions ([RFC 8701](https://datatracker.ietf.org/doc/html/rfc8701)) /// are enabled in general. /// /// **Default:** `None` (implementation default) pub grease_enabled: Option, /// Enables OCSP stapling for the connection. /// /// **Default:** `false` pub enable_ocsp_stapling: bool, /// Enables Signed Certificate Timestamps (SCT). /// /// **Default:** `false` pub enable_signed_cert_timestamps: bool, /// Sets the maximum TLS record size. /// /// **Default:** `None` pub record_size_limit: Option, /// Whether to skip session tickets when using PSK. /// /// **Default:** `false` pub psk_skip_session_ticket: bool, /// Whether to set specific key shares for TLS 1.3 handshakes. /// /// **Default:** `None` pub key_shares: Option>, /// Enables PSK with (EC)DHE key establishment (`psk_dhe_ke`). /// /// **Default:** `true` pub psk_dhe_ke: bool, /// Enables TLS renegotiation by sending the `renegotiation_info` extension. /// /// **Default:** `true` pub renegotiation: bool, /// Delegated Credentials ([RFC 9345](https://datatracker.ietf.org/doc/html/rfc9345)). /// /// Allows TLS 1.3 endpoints to use temporary delegated credentials /// for authentication with reduced long-term key exposure. /// /// **Default:** `None` pub delegated_credentials: Option>, /// List of supported elliptic curves. /// /// **Default:** `None` pub curves_list: Option>, /// List of supported signature algorithms. /// /// **Default:** `None` pub sigalgs_list: Option>, /// Cipher suite configuration string. /// /// Uses BoringSSL's mini-language to select, enable, and prioritize ciphers. /// /// **Default:** `None` pub cipher_list: Option>, /// Sets whether to preserve the TLS 1.3 cipher list as configured by [`Self::cipher_list`]. /// /// **Default:** `None` pub preserve_tls13_cipher_list: Option, /// Supported certificate compression algorithms ([RFC 8879](https://datatracker.ietf.org/doc/html/rfc8879)). /// /// **Default:** `None` pub certificate_compressors: Option>, /// Supported TLS extensions, used for extension ordering/permutation. /// /// **Default:** `None` pub extension_permutation: Option>, /// Overrides AES hardware acceleration. /// /// **Default:** `None` pub aes_hw_override: Option, /// Overrides the random AES hardware acceleration. /// /// **Default:** `false` pub random_aes_hw_override: bool, } impl TlsOptionsBuilder { /// Sets the ALPN protocols to use. #[inline] pub fn alpn_protocols(mut self, alpn: I) -> Self where I: IntoIterator, { self.config.alpn_protocols = Some(Cow::Owned(alpn.into_iter().collect())); self } /// Sets the ALPS protocols to use. #[inline] pub fn alps_protocols(mut self, alps: I) -> Self where I: IntoIterator, { self.config.alps_protocols = Some(Cow::Owned(alps.into_iter().collect())); self } /// Sets whether to use a new codepoint for ALPS. #[inline] pub fn alps_use_new_codepoint(mut self, enabled: bool) -> Self { self.config.alps_use_new_codepoint = enabled; self } /// Sets the session ticket flag. #[inline] pub fn session_ticket(mut self, enabled: bool) -> Self { self.config.session_ticket = enabled; self } /// Sets the minimum TLS version to use. #[inline] pub fn min_tls_version(mut self, version: T) -> Self where T: Into>, { self.config.min_tls_version = version.into(); self } /// Sets the maximum TLS version to use. #[inline] pub fn max_tls_version(mut self, version: T) -> Self where T: Into>, { self.config.max_tls_version = version.into(); self } /// Sets the pre-shared key flag. #[inline] pub fn pre_shared_key(mut self, enabled: bool) -> Self { self.config.pre_shared_key = enabled; self } /// Sets the GREASE ECH extension flag. #[inline] pub fn enable_ech_grease(mut self, enabled: bool) -> Self { self.config.enable_ech_grease = enabled; self } /// Sets whether to permute ClientHello extensions. #[inline] pub fn permute_extensions(mut self, permute: T) -> Self where T: Into>, { self.config.permute_extensions = permute.into(); self } /// Sets the GREASE enabled flag. #[inline] pub fn grease_enabled(mut self, enabled: T) -> Self where T: Into>, { self.config.grease_enabled = enabled.into(); self } /// Sets the OCSP stapling flag. #[inline] pub fn enable_ocsp_stapling(mut self, enabled: bool) -> Self { self.config.enable_ocsp_stapling = enabled; self } /// Sets the signed certificate timestamps flag. #[inline] pub fn enable_signed_cert_timestamps(mut self, enabled: bool) -> Self { self.config.enable_signed_cert_timestamps = enabled; self } /// Sets the record size limit. #[inline] pub fn record_size_limit>>(mut self, limit: U) -> Self { self.config.record_size_limit = limit.into(); self } /// Sets the PSK skip session ticket flag. #[inline] pub fn psk_skip_session_ticket(mut self, skip: bool) -> Self { self.config.psk_skip_session_ticket = skip; self } /// Sets the PSK DHE key establishment flag. #[inline] pub fn psk_dhe_ke(mut self, enabled: bool) -> Self { self.config.psk_dhe_ke = enabled; self } /// Sets the renegotiation flag. #[inline] pub fn renegotiation(mut self, enabled: bool) -> Self { self.config.renegotiation = enabled; self } /// Sets the delegated credentials. #[inline] pub fn delegated_credentials(mut self, creds: T) -> Self where T: Into>, { self.config.delegated_credentials = Some(creds.into()); self } /// Sets the client key shares to be used in the TLS 1.3 handshake. #[inline] pub fn key_shares(mut self, key_shares: T) -> Self where T: Into>, { self.config.key_shares = Some(key_shares.into()); self } /// Sets the supported curves list. #[inline] pub fn curves_list(mut self, curves: T) -> Self where T: Into>, { self.config.curves_list = Some(curves.into()); self } /// Sets the cipher list. #[inline] pub fn cipher_list(mut self, ciphers: T) -> Self where T: Into>, { self.config.cipher_list = Some(ciphers.into()); self } /// Sets the supported signature algorithms. #[inline] pub fn sigalgs_list(mut self, sigalgs: T) -> Self where T: Into>, { self.config.sigalgs_list = Some(sigalgs.into()); self } /// Sets the certificate compression algorithms. #[inline] pub fn certificate_compressors(mut self, algs: T) -> Self where T: Into>, { self.config.certificate_compressors = Some(algs.into()); self } /// Sets the extension permutation. #[inline] pub fn extension_permutation(mut self, permutation: T) -> Self where T: Into>, { self.config.extension_permutation = Some(permutation.into()); self } /// Sets the AES hardware override flag. #[inline] pub fn aes_hw_override(mut self, enabled: T) -> Self where T: Into>, { self.config.aes_hw_override = enabled.into(); self } /// Sets the random AES hardware override flag. #[inline] pub fn random_aes_hw_override(mut self, enabled: bool) -> Self { self.config.random_aes_hw_override = enabled; self } /// Sets whether to preserve the TLS 1.3 cipher list as configured by [`Self::cipher_list`]. /// /// By default, BoringSSL does not preserve the TLS 1.3 cipher list. When this option is /// disabled (the default), BoringSSL uses its internal default TLS 1.3 cipher suites in its /// default order, regardless of what is set via [`Self::cipher_list`]. /// /// When enabled, this option ensures that the TLS 1.3 cipher suites explicitly set via /// [`Self::cipher_list`] are retained in their original order, without being reordered or /// modified by BoringSSL's internal logic. This is useful for maintaining specific cipher suite /// priorities for TLS 1.3. Note that if [`Self::cipher_list`] does not include any TLS 1.3 /// cipher suites, BoringSSL will still fall back to its default TLS 1.3 cipher suites and /// order. #[inline] pub fn preserve_tls13_cipher_list(mut self, enabled: T) -> Self where T: Into>, { self.config.preserve_tls13_cipher_list = enabled.into(); self } /// Builds the `TlsOptions` from the builder. #[inline] pub fn build(self) -> TlsOptions { self.config } } impl TlsOptions { /// Creates a new `TlsOptionsBuilder` instance. pub fn builder() -> TlsOptionsBuilder { TlsOptionsBuilder { config: TlsOptions::default(), } } } impl Default for TlsOptions { fn default() -> Self { TlsOptions { alpn_protocols: Some(Cow::Borrowed(&[AlpnProtocol::HTTP2, AlpnProtocol::HTTP1])), alps_protocols: None, alps_use_new_codepoint: false, session_ticket: true, min_tls_version: None, max_tls_version: None, pre_shared_key: false, enable_ech_grease: false, permute_extensions: None, grease_enabled: None, enable_ocsp_stapling: false, enable_signed_cert_timestamps: false, record_size_limit: None, psk_skip_session_ticket: false, key_shares: None, psk_dhe_ke: true, renegotiation: true, delegated_credentials: None, curves_list: None, cipher_list: None, sigalgs_list: None, certificate_compressors: None, extension_permutation: None, aes_hw_override: None, preserve_tls13_cipher_list: None, random_aes_hw_override: false, } } } #[cfg(test)] mod tests { use super::*; #[test] fn alpn_protocol_encode() { let alpn = AlpnProtocol::encode_sequence(&[AlpnProtocol::HTTP1, AlpnProtocol::HTTP2]); assert_eq!(alpn, Bytes::from_static(b"\x08http/1.1\x02h2")); let alpn = AlpnProtocol::encode_sequence(&[AlpnProtocol::HTTP3]); assert_eq!(alpn, Bytes::from_static(b"\x02h3")); let alpn = AlpnProtocol::encode_sequence(&[AlpnProtocol::HTTP1, AlpnProtocol::HTTP3]); assert_eq!(alpn, Bytes::from_static(b"\x08http/1.1\x02h3")); let alpn = AlpnProtocol::encode_sequence(&[AlpnProtocol::HTTP2, AlpnProtocol::HTTP3]); assert_eq!(alpn, Bytes::from_static(b"\x02h2\x02h3")); let alpn = AlpnProtocol::encode_sequence(&[ AlpnProtocol::HTTP1, AlpnProtocol::HTTP2, AlpnProtocol::HTTP3, ]); assert_eq!(alpn, Bytes::from_static(b"\x08http/1.1\x02h2\x02h3")); } #[test] fn alpn_protocol_encode_single() { let alpn = AlpnProtocol::HTTP1.encode(); assert_eq!(alpn, b"\x08http/1.1".as_ref()); let alpn = AlpnProtocol::HTTP2.encode(); assert_eq!(alpn, b"\x02h2".as_ref()); let alpn = AlpnProtocol::HTTP3.encode(); assert_eq!(alpn, b"\x02h3".as_ref()); } } ================================================ FILE: src/trace.rs ================================================ macro_rules! debug { ($($arg:tt)+) => { { #[cfg(feature = "tracing")] { ::tracing::debug!($($arg)+); } } } } macro_rules! trace { ($($arg:tt)*) => { { #[cfg(feature = "tracing")] { ::tracing::trace!($($arg)+); } } } } macro_rules! trace_span { ($($arg:tt)*) => { { #[cfg(feature = "tracing")] { let _span = ::tracing::trace_span!($($arg)+); let _ = _span.entered(); } } } } macro_rules! warn { ($($arg:tt)*) => { { #[cfg(feature = "tracing")] { ::tracing::warn!($($arg)+); } } } } macro_rules! error { ($($arg:tt)*) => { { #[cfg(feature = "tracing")] { ::tracing::error!($($arg)+); } } } } ================================================ FILE: src/util.rs ================================================ use std::{fmt, fmt::Write}; use bytes::Bytes; use crate::header::{Entry, HeaderMap, HeaderValue, OccupiedEntry}; pub(crate) fn basic_auth(username: U, password: Option

) -> HeaderValue where U: fmt::Display, P: fmt::Display, { let encoded = { let mut buf = b"Basic ".to_vec(); let mut buf_str = String::with_capacity(32); let _ = write!(buf_str, "{username}:"); if let Some(password) = password { let _ = write!(buf_str, "{password}"); } let encoded = btls::base64::encode_block(buf_str.as_bytes()); buf.extend(encoded.into_bytes()); buf }; let mut header = HeaderValue::from_maybe_shared(Bytes::from(encoded)) .expect("base64 is always valid HeaderValue"); header.set_sensitive(true); header } pub(crate) fn fast_random() -> u64 { use std::{ cell::Cell, collections::hash_map::RandomState, hash::{BuildHasher, Hasher}, }; thread_local! { static KEY: RandomState = RandomState::new(); static COUNTER: Cell = const { Cell::new(0) }; } KEY.with(|key| { COUNTER.with(|ctr| { let n = ctr.get().wrapping_add(1); ctr.set(n); let mut h = key.build_hasher(); h.write_u64(n); h.finish() }) }) } pub(crate) fn replace_headers(dst: &mut HeaderMap, src: HeaderMap) { // IntoIter of HeaderMap yields (Option, HeaderValue). // The first time a name is yielded, it will be Some(name), and if // there are more values with the same name, the next yield will be // None. let mut prev_entry: Option> = None; for (key, value) in src { match key { Some(key) => match dst.entry(key) { Entry::Occupied(mut e) => { e.insert(value); prev_entry = Some(e); } Entry::Vacant(e) => { let e = e.insert_entry(value); prev_entry = Some(e); } }, None => match prev_entry { Some(ref mut entry) => { entry.append(value); } None => unreachable!("HeaderMap::into_iter yielded None first"), }, } } } pub(crate) struct Escape<'a>(&'a [u8]); impl<'a> Escape<'a> { pub(crate) fn new(bytes: &'a [u8]) -> Self { Escape(bytes) } } impl fmt::Debug for Escape<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "b\"{self}\"")?; Ok(()) } } impl fmt::Display for Escape<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { for &c in self.0 { // https://doc.rust-lang.org/reference.html#byte-escapes if c == b'\n' { write!(f, "\\n")?; } else if c == b'\r' { write!(f, "\\r")?; } else if c == b'\t' { write!(f, "\\t")?; } else if c == b'\\' || c == b'"' { write!(f, "\\{}", c as char)?; } else if c == b'\0' { write!(f, "\\0")?; // ASCII printable } else if (0x20..0x7f).contains(&c) { write!(f, "{}", c as char)?; } else { write!(f, "\\x{c:02x}")?; } } Ok(()) } } ================================================ FILE: tests/badssl.rs ================================================ use std::time::Duration; use wreq::{ Client, tls::{AlpsProtocol, TlsInfo, TlsOptions, TlsVersion, trust::CertStore}, }; macro_rules! join { ($sep:expr, $first:expr $(, $rest:expr)*) => { concat!($first $(, $sep, $rest)*) }; } #[tokio::test] async fn test_badssl_modern() { let text = Client::builder() .no_proxy() .connect_timeout(Duration::from_secs(360)) .build() .unwrap() .get("https://mozilla-modern.badssl.com/") .send() .await .unwrap() .text() .await .unwrap(); assert!(!text.is_empty()); } #[tokio::test] async fn test_badssl_self_signed() { let text = Client::builder() .tls_cert_verification(false) .connect_timeout(Duration::from_secs(360)) .no_proxy() .build() .unwrap() .get("https://self-signed.badssl.com/") .send() .await .unwrap() .text() .await .unwrap(); assert!(!text.is_empty()); } const CURVES_LIST: &str = join!( ":", "X25519", "P-256", "P-384", "P-521", "ffdhe2048", "ffdhe3072" ); #[tokio::test] async fn test_3des_support() -> wreq::Result<()> { let tls_options = TlsOptions::builder() .cipher_list(join!( ":", "TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA", "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA" )) .curves_list(CURVES_LIST) .build(); // Create a client with the TLS options let client = Client::builder() .tls_options(tls_options) .tls_cert_verification(false) .connect_timeout(Duration::from_secs(360)) .build()?; // Check if the client can connect to the 3des.badssl.com let content = client .get("https://3des.badssl.com/") .send() .await? .text() .await?; println!("3des.badssl.com is supported:\n{content}"); Ok(()) } #[tokio::test] async fn test_firefox_7x_100_cipher() -> wreq::Result<()> { let tls_options = TlsOptions::builder() .cipher_list(join!( ":", "TLS_DHE_RSA_WITH_AES_128_CBC_SHA", "TLS_DHE_RSA_WITH_AES_256_CBC_SHA", "TLS_DHE_RSA_WITH_AES_128_CBC_SHA256", "TLS_DHE_RSA_WITH_AES_256_CBC_SHA256" )) .curves_list(CURVES_LIST) .build(); // Create a client with the TLS options let client = Client::builder() .tls_options(tls_options) .tls_cert_verification(false) .connect_timeout(Duration::from_secs(360)) .build()?; // Check if the client can connect to the dh2048.badssl.com let content = client .get("https://dh2048.badssl.com/") .send() .await? .text() .await?; println!("dh2048.badssl.com is supported:\n{content}"); Ok(()) } #[tokio::test] async fn test_alps_new_endpoint() -> wreq::Result<()> { let tls_options = TlsOptions::builder() .min_tls_version(TlsVersion::TLS_1_2) .max_tls_version(TlsVersion::TLS_1_3) .alps_protocols([AlpsProtocol::HTTP2]) .alps_use_new_codepoint(true) .build(); let client = Client::builder() .tls_options(tls_options) .connect_timeout(Duration::from_secs(360)) .build()?; let resp = client.get("https://www.google.com").send().await?; assert!(resp.status().is_success()); Ok(()) } #[tokio::test] async fn test_aes_hw_override() -> wreq::Result<()> { const CIPHER_LIST: &str = join!( ":", "TLS_AES_128_GCM_SHA256", "TLS_CHACHA20_POLY1305_SHA256", "TLS_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_AES_128_GCM_SHA256", "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA" ); let tls_options = TlsOptions::builder() .cipher_list(CIPHER_LIST) .min_tls_version(TlsVersion::TLS_1_2) .max_tls_version(TlsVersion::TLS_1_3) .enable_ech_grease(true) .aes_hw_override(false) .preserve_tls13_cipher_list(true) .build(); // Create a client with the TLS options let client = Client::builder() .tls_options(tls_options) .connect_timeout(Duration::from_secs(360)) .build()?; let resp = client.get("https://tls.browserleaks.com").send().await?; assert!(resp.status().is_success()); let text = resp.text().await?; assert!(text.contains("ChaCha20Poly1305")); Ok(()) } #[tokio::test] async fn test_tls_self_signed_cert() { let client = Client::builder() .tls_cert_verification(false) .connect_timeout(Duration::from_secs(360)) .tls_info(true) .build() .unwrap(); let resp = client .get("https://self-signed.badssl.com/") .send() .await .unwrap(); let peer_cert_der = resp .extensions() .get::() .and_then(|info| info.peer_certificate()) .unwrap(); let self_signed_cert_store = CertStore::builder() .add_der_cert(peer_cert_der) .build() .unwrap(); let client = Client::builder() .tls_cert_store(self_signed_cert_store) .build() .unwrap(); let resp = client .get("https://self-signed.badssl.com/") .send() .await .unwrap(); assert!(resp.status().is_success()); let res = client.get("https://www.google.com").send().await; assert!(res.is_err()); } ================================================ FILE: tests/brotli.rs ================================================ mod support; use std::io::Read; use support::server; use tokio::io::AsyncWriteExt; use wreq::Client; #[tokio::test] async fn brotli_response() { brotli_case(10_000, 4096).await; } #[tokio::test] async fn brotli_single_byte_chunks() { brotli_case(10, 1).await; } #[tokio::test] async fn test_brotli_empty_body() { let server = server::http(move |req| async move { assert_eq!(req.method(), "HEAD"); http::Response::builder() .header("content-encoding", "br") .body(Default::default()) .unwrap() }); let res = wreq::head(format!("http://{}/brotli", server.addr())) .send() .await .unwrap(); let body = res.text().await.unwrap(); assert_eq!(body, ""); } #[tokio::test] async fn test_accept_header_is_not_changed_if_set() { let server = server::http(move |req| async move { assert_eq!(req.headers()["accept"], "application/json"); assert!( req.headers()["accept-encoding"] .to_str() .unwrap() .contains("br") ); http::Response::default() }); let res = wreq::get(format!("http://{}/accept", server.addr())) .header( wreq::header::ACCEPT, wreq::header::HeaderValue::from_static("application/json"), ) .send() .await .unwrap(); assert_eq!(res.status(), wreq::StatusCode::OK); } #[tokio::test] async fn test_accept_encoding_header_is_not_changed_if_set() { let server = server::http(move |req| async move { assert_eq!(req.headers()["accept"], "*/*"); assert_eq!(req.headers()["accept-encoding"], "identity"); http::Response::default() }); let res = wreq::get(format!("http://{}/accept-encoding", server.addr())) .header(wreq::header::ACCEPT, "*/*") .header( wreq::header::ACCEPT_ENCODING, wreq::header::HeaderValue::from_static("identity"), ) .send() .await .unwrap(); assert_eq!(res.status(), wreq::StatusCode::OK); } async fn brotli_case(response_size: usize, chunk_size: usize) { use futures_util::stream::StreamExt; let content: String = (0..response_size).fold(String::new(), |mut acc, i| { acc.push_str(&format!("test {i}")); acc }); let mut encoder = brotli::CompressorReader::new(content.as_bytes(), 4096, 5, 20); let mut brotlied_content = Vec::new(); encoder.read_to_end(&mut brotlied_content).unwrap(); let mut response = format!( "\ HTTP/1.1 200 OK\r\n\ Server: test-accept\r\n\ Content-Encoding: br\r\n\ Content-Length: {}\r\n\ \r\n", &brotlied_content.len() ) .into_bytes(); response.extend(&brotlied_content); let server = server::http(move |req| { assert!( req.headers()["accept-encoding"] .to_str() .unwrap() .contains("br") ); let brotlied = brotlied_content.clone(); async move { let len = brotlied.len(); let stream = futures_util::stream::unfold((brotlied, 0), move |(brotlied, pos)| async move { let chunk = brotlied.chunks(chunk_size).nth(pos)?.to_vec(); Some((chunk, (brotlied, pos + 1))) }); let body = wreq::Body::wrap_stream(stream.map(Ok::<_, std::convert::Infallible>)); http::Response::builder() .header("content-encoding", "br") .header("content-length", len) .body(body) .unwrap() } }); let res = wreq::get(format!("http://{}/brotli", server.addr())) .send() .await .expect("response"); let body = res.text().await.expect("text"); assert_eq!(body, content); } const COMPRESSED_RESPONSE_HEADERS: &[u8] = b"HTTP/1.1 200 OK\x0d\x0a\ Content-Type: text/plain\x0d\x0a\ Connection: keep-alive\x0d\x0a\ Content-Encoding: br\x0d\x0a"; const RESPONSE_CONTENT: &str = "some message here"; fn brotli_compress(input: &[u8]) -> Vec { let mut encoder = brotli::CompressorReader::new(input, 4096, 5, 20); let mut brotlied_content = Vec::new(); encoder.read_to_end(&mut brotlied_content).unwrap(); brotlied_content } #[tokio::test] async fn test_non_chunked_non_fragmented_response() { let server = server::low_level_with_response(|_raw_request, client_socket| { Box::new(async move { let brotlied_content = brotli_compress(RESPONSE_CONTENT.as_bytes()); let content_length_header = format!("Content-Length: {}\r\n\r\n", brotlied_content.len()).into_bytes(); let response = [ COMPRESSED_RESPONSE_HEADERS, &content_length_header, &brotlied_content, ] .concat(); client_socket .write_all(response.as_slice()) .await .expect("response write_all failed"); client_socket.flush().await.expect("response flush failed"); }) }); let res = wreq::get(format!("http://{}/", server.addr())) .send() .await .expect("response"); assert_eq!(res.text().await.expect("text"), RESPONSE_CONTENT); } #[tokio::test] async fn test_chunked_fragmented_response_1() { const DELAY_BETWEEN_RESPONSE_PARTS: tokio::time::Duration = tokio::time::Duration::from_millis(1000); const DELAY_MARGIN: tokio::time::Duration = tokio::time::Duration::from_millis(50); let server = server::low_level_with_response(|_raw_request, client_socket| { Box::new(async move { let brotlied_content = brotli_compress(RESPONSE_CONTENT.as_bytes()); let response_first_part = [ COMPRESSED_RESPONSE_HEADERS, format!( "Transfer-Encoding: chunked\r\n\r\n{:x}\r\n", brotlied_content.len() ) .as_bytes(), &brotlied_content, ] .concat(); let response_second_part = b"\r\n0\r\n\r\n"; client_socket .write_all(response_first_part.as_slice()) .await .expect("response_first_part write_all failed"); client_socket .flush() .await .expect("response_first_part flush failed"); tokio::time::sleep(DELAY_BETWEEN_RESPONSE_PARTS).await; client_socket .write_all(response_second_part) .await .expect("response_second_part write_all failed"); client_socket .flush() .await .expect("response_second_part flush failed"); }) }); let start = tokio::time::Instant::now(); let res = wreq::get(format!("http://{}/", server.addr())) .send() .await .expect("response"); assert_eq!(res.text().await.expect("text"), RESPONSE_CONTENT); assert!(start.elapsed() >= DELAY_BETWEEN_RESPONSE_PARTS - DELAY_MARGIN); } #[tokio::test] async fn test_chunked_fragmented_response_2() { const DELAY_BETWEEN_RESPONSE_PARTS: tokio::time::Duration = tokio::time::Duration::from_millis(1000); const DELAY_MARGIN: tokio::time::Duration = tokio::time::Duration::from_millis(50); let server = server::low_level_with_response(|_raw_request, client_socket| { Box::new(async move { let brotlied_content = brotli_compress(RESPONSE_CONTENT.as_bytes()); let response_first_part = [ COMPRESSED_RESPONSE_HEADERS, format!( "Transfer-Encoding: chunked\r\n\r\n{:x}\r\n", brotlied_content.len() ) .as_bytes(), &brotlied_content, b"\r\n", ] .concat(); let response_second_part = b"0\r\n\r\n"; client_socket .write_all(response_first_part.as_slice()) .await .expect("response_first_part write_all failed"); client_socket .flush() .await .expect("response_first_part flush failed"); tokio::time::sleep(DELAY_BETWEEN_RESPONSE_PARTS).await; client_socket .write_all(response_second_part) .await .expect("response_second_part write_all failed"); client_socket .flush() .await .expect("response_second_part flush failed"); }) }); let start = tokio::time::Instant::now(); let res = wreq::get(format!("http://{}/", server.addr())) .send() .await .expect("response"); assert_eq!(res.text().await.expect("text"), RESPONSE_CONTENT); assert!(start.elapsed() >= DELAY_BETWEEN_RESPONSE_PARTS - DELAY_MARGIN); } #[tokio::test] async fn test_chunked_fragmented_response_with_extra_bytes() { const DELAY_BETWEEN_RESPONSE_PARTS: tokio::time::Duration = tokio::time::Duration::from_millis(1000); const DELAY_MARGIN: tokio::time::Duration = tokio::time::Duration::from_millis(50); let server = server::low_level_with_response(|_raw_request, client_socket| { Box::new(async move { let brotlied_content = brotli_compress(RESPONSE_CONTENT.as_bytes()); let response_first_part = [ COMPRESSED_RESPONSE_HEADERS, format!( "Transfer-Encoding: chunked\r\n\r\n{:x}\r\n", brotlied_content.len() ) .as_bytes(), &brotlied_content, ] .concat(); let response_second_part = b"\r\n2ab\r\n0\r\n\r\n"; client_socket .write_all(response_first_part.as_slice()) .await .expect("response_first_part write_all failed"); client_socket .flush() .await .expect("response_first_part flush failed"); tokio::time::sleep(DELAY_BETWEEN_RESPONSE_PARTS).await; client_socket .write_all(response_second_part) .await .expect("response_second_part write_all failed"); client_socket .flush() .await .expect("response_second_part flush failed"); }) }); let start = tokio::time::Instant::now(); let res = Client::new() .get(format!("http://{}/", server.addr())) .send() .await .expect("response"); let err = res.text().await.expect_err("there must be an error"); assert!(err.is_decode()); assert!(start.elapsed() >= DELAY_BETWEEN_RESPONSE_PARTS - DELAY_MARGIN); } ================================================ FILE: tests/client.rs ================================================ mod support; #[cfg(feature = "json")] use std::collections::HashMap; use bytes::Bytes; use http::{ HeaderMap, HeaderValue, StatusCode, Version, header::{ self, ACCEPT, AUTHORIZATION, CACHE_CONTROL, CONTENT_LENGTH, CONTENT_TYPE, COOKIE, REFERER, TRANSFER_ENCODING, USER_AGENT, }, }; use http_body_util::{BodyExt, Full}; use pretty_env_logger::env_logger; use support::server; use tokio::io::AsyncWriteExt; use wreq::{Client, header::OrigHeaderMap, tls::TlsInfo}; #[tokio::test] async fn auto_headers() { let server = server::http(move |req| async move { assert_eq!(req.method(), "GET"); assert_eq!(req.headers()["accept"], "*/*"); assert_eq!(req.headers().get("user-agent"), None); if cfg!(feature = "gzip") { assert!( req.headers()["accept-encoding"] .to_str() .unwrap() .contains("gzip") ); } if cfg!(feature = "brotli") { assert!( req.headers()["accept-encoding"] .to_str() .unwrap() .contains("br") ); } if cfg!(feature = "zstd") { assert!( req.headers()["accept-encoding"] .to_str() .unwrap() .contains("zstd") ); } if cfg!(feature = "deflate") { assert!( req.headers()["accept-encoding"] .to_str() .unwrap() .contains("deflate") ); } http::Response::default() }); let url = format!("http://{}/1", server.addr()); let res = Client::builder() .no_proxy() .build() .unwrap() .get(&url) .header(wreq::header::ACCEPT, "*/*") .send() .await .unwrap(); assert_eq!(res.uri(), url.as_str()); assert_eq!(res.status(), wreq::StatusCode::OK); assert_eq!(res.remote_addr(), Some(server.addr())); } #[tokio::test] async fn test_headers_order_with_client() { use http::HeaderValue; use wreq::{ Client, header::{ACCEPT, CONTENT_TYPE, USER_AGENT}, }; let server = server::http(move |req| async move { assert_eq!(req.method(), "POST"); let expected_headers = [ ("cookie", "cookie1=cookie1-value"), ("cookie", "cookie2=cookie2-value"), ("user-agent", "my-test-client"), ("accept", "*/*"), ("content-type", "application/json"), ("authorization", "Bearer test-token"), ("referer", "https://example.com"), ("cache-control", "no-cache"), ]; for (i, (expected_key, expected_value)) in expected_headers.iter().enumerate() { let (key, value) = req.headers().iter().nth(i).unwrap(); assert_eq!(key.as_str(), *expected_key); assert_eq!(value.as_bytes(), expected_value.as_bytes()); } let full: Vec = req .into_body() .collect() .await .expect("must succeed") .to_bytes() .to_vec(); assert_eq!(full, br#"{"message":"hello"}"#); http::Response::default() }); let url = format!("http://{}/test", server.addr()); let client = Client::builder() .no_proxy() .default_headers({ let mut headers = HeaderMap::new(); headers.insert(ACCEPT, HeaderValue::from_static("*/*")); headers.insert(CONTENT_TYPE, HeaderValue::from_static("application/json")); headers.insert(USER_AGENT, HeaderValue::from_static("my-test-client")); headers.insert(AUTHORIZATION, HeaderValue::from_static("Bearer test-token")); headers.insert(REFERER, HeaderValue::from_static("https://example.com")); headers.append("cookie", HeaderValue::from_static("cookie1=cookie1-value")); headers.append("cookie", HeaderValue::from_static("cookie2=cookie2-value")); headers.insert(CACHE_CONTROL, HeaderValue::from_static("no-cache")); headers }) .orig_headers({ let mut orig_headers = OrigHeaderMap::new(); orig_headers.insert("cookie"); orig_headers.insert("user-agent"); orig_headers.insert("accept"); orig_headers.insert("content-type"); orig_headers.insert("authorization"); orig_headers.insert("referer"); orig_headers.insert("cache-control"); orig_headers }) .build() .unwrap(); let res = client .post(&url) .body(r#"{"message":"hello"}"#) .send() .await .unwrap(); assert_eq!(res.status(), wreq::StatusCode::OK); } #[tokio::test] async fn test_headers_order_with_request() { use http::HeaderValue; use wreq::{ Client, header::{ACCEPT, CONTENT_TYPE, USER_AGENT}, }; let server = server::http(move |req| async move { assert_eq!(req.method(), "POST"); let expected_headers = [ ("user-agent", "my-test-client"), ("accept", "*/*"), ("content-type", "application/json"), ("authorization", "Bearer test-token"), ("referer", "https://example.com"), ("cookie", "cookie1=cookie1"), ("cookie", "cookie2=cookie2"), ("cache-control", "no-cache"), ]; for (i, (expected_key, expected_value)) in expected_headers.iter().enumerate() { let (key, value) = req.headers().iter().nth(i).unwrap(); assert_eq!(key.as_str(), *expected_key); assert_eq!(value.as_bytes(), expected_value.as_bytes()); } let full: Vec = req .into_body() .collect() .await .expect("must succeed") .to_bytes() .to_vec(); assert_eq!(full, br#"{"message":"hello"}"#); http::Response::default() }); let url = format!("http://{}/test", server.addr()); let client = Client::builder().no_proxy().build().unwrap(); let res = client .post(&url) .headers({ let mut headers = HeaderMap::new(); headers.insert(ACCEPT, HeaderValue::from_static("*/*")); headers.insert(CONTENT_TYPE, HeaderValue::from_static("application/json")); headers.insert(USER_AGENT, HeaderValue::from_static("my-test-client")); headers.insert(AUTHORIZATION, HeaderValue::from_static("Bearer test-token")); headers.insert(REFERER, HeaderValue::from_static("https://example.com")); headers.append("cookie", HeaderValue::from_static("cookie1=cookie1")); headers.append("cookie", HeaderValue::from_static("cookie2=cookie2")); headers.insert(CACHE_CONTROL, HeaderValue::from_static("no-cache")); headers }) .orig_headers({ let mut orig_headers = OrigHeaderMap::new(); orig_headers.insert("user-agent"); orig_headers.insert("accept"); orig_headers.insert("content-type"); orig_headers.insert("authorization"); orig_headers.insert("referer"); orig_headers.insert("cookie"); orig_headers.insert("cache-control"); orig_headers }) .body(r#"{"message":"hello"}"#) .send() .await .unwrap(); assert_eq!(res.status(), wreq::StatusCode::OK); } #[tokio::test] async fn test_overwrite_headers() { let server = server::http(move |req| async move { let path = req.uri().path(); match path { "/1" => { assert_eq!(req.method(), "GET"); assert_eq!(req.headers()[USER_AGENT], "my-custom-agent"); let mut cookies = req.headers().get_all(COOKIE).iter(); assert_eq!(cookies.next().unwrap(), "a=b"); assert_eq!(cookies.next().unwrap(), "c=d"); assert_eq!(cookies.next(), None); } "/2" => { assert_eq!(req.method(), "GET"); assert_eq!(req.headers()[USER_AGENT], "my-custom-agent"); let mut cookies = req.headers().get_all(COOKIE).iter(); assert_eq!(cookies.next().unwrap(), "e=f"); assert_eq!(cookies.next().unwrap(), "g=h"); assert_eq!(cookies.next(), None); } "/3" => { assert_eq!(req.method(), "GET"); assert_eq!(req.headers()[USER_AGENT], "default-agent"); let mut cookies = req.headers().get_all(COOKIE).iter(); assert_eq!(cookies.next().unwrap(), "a=b"); assert_eq!(cookies.next().unwrap(), "c=d"); assert_eq!(cookies.next(), None); } "/4" => { assert_eq!(req.method(), "GET"); assert_eq!(req.headers()[USER_AGENT], "default-agent"); let mut cookies = req.headers().get_all(COOKIE).iter(); assert_eq!(cookies.next().unwrap(), "e=f"); assert_eq!(cookies.next().unwrap(), "g=h"); assert_eq!(cookies.next(), None); } _ => { unreachable!("Unexpected request path: {}", path); } } http::Response::default() }); let mut default_headers = header::HeaderMap::new(); default_headers.insert( USER_AGENT, header::HeaderValue::from_static("default-agent"), ); default_headers.insert(COOKIE, header::HeaderValue::from_static("a=b")); default_headers.append(COOKIE, header::HeaderValue::from_static("c=d")); let client = Client::builder() .no_proxy() .default_headers(default_headers) .build() .unwrap(); let url = format!("http://{}/1", server.addr()); let res = client .get(&url) .header(USER_AGENT, "my-custom-agent") .send() .await .unwrap(); assert_eq!(res.uri(), url.as_str()); assert_eq!(res.status(), wreq::StatusCode::OK); let url = format!("http://{}/2", server.addr()); let res = client .get(&url) .header(USER_AGENT, "my-custom-agent") .header(COOKIE, "e=f") .header(COOKIE, "g=h") .send() .await .unwrap(); assert_eq!(res.uri(), url.as_str()); assert_eq!(res.status(), wreq::StatusCode::OK); let url = format!("http://{}/3", server.addr()); let res = client.get(&url).send().await.unwrap(); assert_eq!(res.uri(), url.as_str()); assert_eq!(res.status(), wreq::StatusCode::OK); let url = format!("http://{}/4", server.addr()); let res = client .get(&url) .header(COOKIE, "e=f") .header(COOKIE, "g=h") .send() .await .unwrap(); assert_eq!(res.uri(), url.as_str()); assert_eq!(res.status(), wreq::StatusCode::OK); } #[tokio::test] async fn donot_set_content_length_0_if_have_no_body() { let server = server::http(move |req| async move { let headers = req.headers(); assert_eq!(headers.get(CONTENT_LENGTH), None); assert!(headers.get(CONTENT_TYPE).is_none()); assert!(headers.get(TRANSFER_ENCODING).is_none()); http::Response::default() }); let url = format!("http://{}/content-length", server.addr()); let res = Client::builder() .no_proxy() .build() .expect("client builder") .get(&url) .send() .await .expect("request"); assert_eq!(res.status(), wreq::StatusCode::OK); } #[tokio::test] async fn user_agent() { let server = server::http(move |req| async move { assert_eq!(req.headers()["user-agent"], "wreq-test-agent"); http::Response::default() }); let url = format!("http://{}/ua", server.addr()); let res = Client::builder() .user_agent("wreq-test-agent") .build() .expect("client builder") .get(&url) .send() .await .expect("request"); assert_eq!(res.status(), wreq::StatusCode::OK); } #[tokio::test] async fn response_text() { let _ = env_logger::try_init(); let server = server::http(move |_req| async { http::Response::new("Hello".into()) }); let client = Client::new(); let res = client .get(format!("http://{}/text", server.addr())) .send() .await .expect("Failed to get"); assert_eq!(res.content_length(), Some(5)); let text = res.text().await.expect("Failed to get text"); assert_eq!("Hello", text); } #[tokio::test] async fn response_bytes() { let _ = env_logger::try_init(); let server = server::http(move |_req| async { http::Response::new("Hello".into()) }); let client = Client::new(); let res = client .get(format!("http://{}/bytes", server.addr())) .send() .await .expect("Failed to get"); assert_eq!(res.content_length(), Some(5)); let bytes = res.bytes().await.expect("res.bytes()"); assert_eq!("Hello", bytes); } #[tokio::test] #[cfg(feature = "json")] async fn response_json() { let _ = env_logger::try_init(); let server = server::http(move |_req| async { http::Response::new("\"Hello\"".into()) }); let client = Client::new(); let res = client .get(format!("http://{}/json", server.addr())) .send() .await .expect("Failed to get"); let text = res.json::().await.expect("Failed to get json"); assert_eq!("Hello", text); } #[tokio::test] async fn body_pipe_response() { let _ = env_logger::try_init(); let server = server::http(move |req| async move { if req.uri() == "/get" { http::Response::new("pipe me".into()) } else { assert_eq!(req.uri(), "/pipe"); assert_eq!(req.headers()["content-length"], "7"); let full: Vec = req .into_body() .collect() .await .expect("must succeed") .to_bytes() .to_vec(); assert_eq!(full, b"pipe me"); http::Response::default() } }); let client = Client::new(); let res1 = client .get(format!("http://{}/get", server.addr())) .send() .await .expect("get1"); assert_eq!(res1.status(), wreq::StatusCode::OK); assert_eq!(res1.content_length(), Some(7)); // and now ensure we can "pipe" the response to another request let res2 = client .post(format!("http://{}/pipe", server.addr())) .body(res1) .send() .await .expect("res2"); assert_eq!(res2.status(), wreq::StatusCode::OK); } #[tokio::test] async fn overridden_dns_resolution_with_gai() { let _ = env_logger::builder().is_test(true).try_init(); let server = server::http(move |_req| async { http::Response::new("Hello".into()) }); let overridden_domain = "rust-lang.org"; let url = format!( "http://{overridden_domain}:{}/domain_override", server.addr().port() ); let client = Client::builder() .no_proxy() .resolve(overridden_domain, server.addr()) .build() .expect("client builder"); let req = client.get(&url); let res = req.send().await.expect("request"); assert_eq!(res.status(), wreq::StatusCode::OK); let text = res.text().await.expect("Failed to get text"); assert_eq!("Hello", text); } #[tokio::test] async fn overridden_dns_resolution_with_gai_multiple() { let _ = env_logger::builder().is_test(true).try_init(); let server = server::http(move |_req| async { http::Response::new("Hello".into()) }); let overridden_domain = "rust-lang.org"; let url = format!( "http://{overridden_domain}:{}/domain_override", server.addr().port() ); // the server runs on IPv4 localhost, so provide both IPv4 and IPv6 and let the happy eyeballs // algorithm decide which address to use. let client = Client::builder() .no_proxy() .resolve_to_addrs( overridden_domain, [ std::net::SocketAddr::new( std::net::IpAddr::V6(std::net::Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), server.addr().port(), ), server.addr(), ], ) .build() .expect("client builder"); let req = client.get(&url); let res = req.send().await.expect("request"); assert_eq!(res.status(), wreq::StatusCode::OK); let text = res.text().await.expect("Failed to get text"); assert_eq!("Hello", text); } #[cfg(feature = "hickory-dns")] #[tokio::test] async fn overridden_dns_resolution_with_hickory_dns() { let _ = env_logger::builder().is_test(true).try_init(); let server = server::http(move |_req| async { http::Response::new("Hello".into()) }); let overridden_domain = "rust-lang.org"; let url = format!( "http://{overridden_domain}:{}/domain_override", server.addr().port() ); let client = Client::builder() .no_proxy() .resolve(overridden_domain, server.addr()) .build() .expect("client builder"); let req = client.get(&url); let res = req.send().await.expect("request"); assert_eq!(res.status(), wreq::StatusCode::OK); let text = res.text().await.expect("Failed to get text"); assert_eq!("Hello", text); } #[cfg(feature = "hickory-dns")] #[tokio::test] async fn overridden_dns_resolution_with_hickory_dns_multiple() { let _ = env_logger::builder().is_test(true).try_init(); let server = server::http(move |_req| async { http::Response::new("Hello".into()) }); let overridden_domain = "rust-lang.org"; let url = format!( "http://{overridden_domain}:{}/domain_override", server.addr().port() ); // the server runs on IPv4 localhost, so provide both IPv4 and IPv6 and let the happy eyeballs // algorithm decide which address to use. let client = Client::builder() .no_proxy() .resolve_to_addrs( overridden_domain, [ std::net::SocketAddr::new( std::net::IpAddr::V6(std::net::Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), server.addr().port(), ), server.addr(), ], ) .build() .expect("client builder"); let req = client.get(&url); let res = req.send().await.expect("request"); assert_eq!(res.status(), wreq::StatusCode::OK); let text = res.text().await.expect("Failed to get text"); assert_eq!("Hello", text); } #[test] #[cfg(feature = "json")] fn add_json_default_content_type_if_not_set_manually() { let mut map = HashMap::new(); map.insert("body", "json"); let content_type = http::HeaderValue::from_static("application/vnd.api+json"); let req = Client::new() .post("https://google.com/") .header(CONTENT_TYPE, &content_type) .json(&map) .build() .expect("request is not valid"); assert_eq!(content_type, req.headers().get(CONTENT_TYPE).unwrap()); } #[test] #[cfg(feature = "json")] fn update_json_content_type_if_set_manually() { let mut map = HashMap::new(); map.insert("body", "json"); let req = Client::new() .post("https://google.com/") .json(&map) .build() .expect("request is not valid"); assert_eq!("application/json", req.headers().get(CONTENT_TYPE).unwrap()); } #[tokio::test] async fn test_tls_info() { let resp = Client::builder() .tls_info(true) .build() .expect("client builder") .get("https://google.com") .send() .await .expect("response"); let tls_info = resp.extensions().get::().unwrap(); let peer_certificate = tls_info.peer_certificate(); assert!(peer_certificate.is_some()); let der = peer_certificate.unwrap(); assert_eq!(der[0], 0x30); // ASN.1 SEQUENCE let resp = Client::builder() .build() .expect("client builder") .get("https://google.com") .send() .await .expect("response"); let tls_info = resp.extensions().get::(); assert!(tls_info.is_none()); } #[tokio::test] async fn close_connection_after_idle_timeout() { let mut server = server::http(move |_| async move { http::Response::default() }); let client = Client::builder() .pool_idle_timeout(std::time::Duration::from_secs(1)) .build() .unwrap(); let url = format!("http://{}", server.addr()); client.get(&url).send().await.unwrap(); tokio::time::sleep(std::time::Duration::from_secs(2)).await; assert!( server .events() .iter() .any(|e| matches!(e, server::Event::ConnectionClosed)) ); } #[tokio::test] async fn http1_reason_phrase() { let server = server::low_level_with_response(|_raw_request, client_socket| { Box::new(async move { client_socket .write_all(b"HTTP/1.1 418 I'm not a teapot\r\nContent-Length: 0\r\n\r\n") .await .expect("response write_all failed"); }) }); let client = Client::new(); let res = client .get(format!("http://{}", server.addr())) .send() .await .expect("Failed to get"); assert_eq!( res.error_for_status().unwrap_err().to_string(), format!( "HTTP status client error (418 I'm not a teapot) for uri (http://{}/)", server.addr() ) ); } #[tokio::test] async fn error_has_url() { let u = "http://does.not.exist.local/ever"; let err = wreq::get(u).send().await.unwrap_err(); assert_eq!( err.uri().map(ToString::to_string).as_deref(), Some(u), "{err:?}" ); } #[tokio::test] async fn http1_only() { let server = server::http(move |_| async move { http::Response::default() }); let resp = Client::builder() .http1_only() .build() .unwrap() .get(format!("http://{}", server.addr())) .send() .await .unwrap(); assert_eq!(resp.version(), wreq::Version::HTTP_11); let resp = wreq::get(format!("http://{}", server.addr())) .version(Version::HTTP_11) .send() .await .unwrap(); assert_eq!(resp.version(), wreq::Version::HTTP_11); } #[tokio::test] async fn http2_only() { let server = server::http(move |_| async move { http::Response::default() }); let resp = Client::builder() .http2_only() .build() .unwrap() .get(format!("http://{}", server.addr())) .send() .await .unwrap(); assert_eq!(resp.version(), wreq::Version::HTTP_2); let resp = wreq::get(format!("http://{}", server.addr())) .version(Version::HTTP_2) .send() .await .unwrap(); assert_eq!(resp.version(), wreq::Version::HTTP_2); } #[tokio::test] async fn connection_pool_cache() { let client = Client::default(); let url = "https://hyper.rs"; let resp = client .get(url) .version(http::Version::HTTP_2) .send() .await .unwrap(); assert_eq!(resp.status(), wreq::StatusCode::OK); assert_eq!(resp.version(), http::Version::HTTP_2); let resp = client .get(url) .version(http::Version::HTTP_11) .send() .await .unwrap(); assert_eq!(resp.status(), wreq::StatusCode::OK); assert_eq!(resp.version(), http::Version::HTTP_11); let resp = client .get(url) .version(http::Version::HTTP_2) .send() .await .unwrap(); assert_eq!(resp.status(), wreq::StatusCode::OK); assert_eq!(resp.version(), http::Version::HTTP_2); } #[tokio::test] async fn http1_send_case_sensitive_headers() { // Create a request with a case-sensitive header let mut orig_headers = OrigHeaderMap::new(); orig_headers.insert("X-custom-header"); orig_headers.insert("Host"); let resp = wreq::get("https://tls.browserleaks.com") .header("X-Custom-Header", "value") .orig_headers(orig_headers) .version(Version::HTTP_11) .send() .await .unwrap() .text() .await .unwrap(); assert!(resp.contains("X-custom-header")); assert!(resp.contains("Host")); } #[tokio::test] async fn tunnel_includes_proxy_auth_with_multiple_proxies() { let url = "http://hyper.rs.local/prox"; let server1 = server::http(move |req| { assert_eq!(req.method(), "GET"); assert_eq!(req.uri(), url); assert_eq!(req.headers()["host"], "hyper.rs.local"); assert_eq!( req.headers()["proxy-authorization"], "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" ); assert_eq!(req.headers()["proxy-header"], "proxy2"); async { http::Response::default() } }); let proxy_url = format!("http://Aladdin:open%20sesame@{}", server1.addr()); let mut headers1 = wreq::header::HeaderMap::new(); headers1.insert("proxy-header", "proxy1".parse().unwrap()); let mut headers2 = wreq::header::HeaderMap::new(); headers2.insert("proxy-header", "proxy2".parse().unwrap()); let client = Client::builder() // When processing proxy headers, the first one is iterated, // and if the current URL does not match, the proxy is skipped .proxy( wreq::Proxy::https(&proxy_url) .unwrap() .custom_http_headers(headers1.clone()), ) // When processing proxy headers, the second one is iterated, // and for the current URL matching, the proxy will be used .proxy( wreq::Proxy::http(&proxy_url) .unwrap() .custom_http_headers(headers2.clone()), ) .build() .unwrap(); let res = client.get(url).send().await.unwrap(); assert_eq!(res.uri(), url); assert_eq!(res.status(), wreq::StatusCode::OK); let client = Client::builder() // When processing proxy headers, the first one is iterated, // and for the current URL matching, the proxy will be used .proxy( wreq::Proxy::http(&proxy_url) .unwrap() .custom_http_headers(headers2), ) // When processing proxy headers, the second one is iterated, // and if the current URL does not match, the proxy is skipped .proxy( wreq::Proxy::https(&proxy_url) .unwrap() .custom_http_headers(headers1), ) .build() .unwrap(); let res = client.get(url).send().await.unwrap(); assert_eq!(res.uri(), url); assert_eq!(res.status(), wreq::StatusCode::OK); } #[tokio::test] async fn skip_default_headers() { let server = server::http(move |req| async move { let path = req.uri().path(); match path { "/skip" => { assert_eq!(req.method(), "GET"); assert_eq!(req.headers().get(USER_AGENT), None); assert_eq!(req.headers().get(ACCEPT), None); } "/no_skip" => { assert_eq!(req.method(), "GET"); assert_eq!(req.headers()[USER_AGENT], "test-agent"); assert_eq!(req.headers()[ACCEPT], "*/*"); } _ => unreachable!("Unexpected request path: {path}"), } http::Response::default() }); let client = Client::builder() .default_headers({ let mut headers = wreq::header::HeaderMap::new(); headers.insert(USER_AGENT, "test-agent".parse().unwrap()); headers.insert(ACCEPT, "*/*".parse().unwrap()); headers }) .no_proxy() .build() .unwrap(); let url = format!("http://{}/skip", server.addr()); let res = client .get(&url) .default_headers(false) .send() .await .unwrap(); assert_eq!(res.uri(), url.as_str()); assert_eq!(res.status(), wreq::StatusCode::OK); let url = format!("http://{}/no_skip", server.addr()); let res = client.get(&url).send().await.unwrap(); assert_eq!(res.uri(), url.as_str()); assert_eq!(res.status(), wreq::StatusCode::OK); } #[tokio::test] async fn test_client_same_header_values_append() { let server = server::http(move |req| async move { let path = req.uri().path(); match path { "/duplicate-cookies" => { let cookie_values: Vec<_> = req.headers().get_all(header::COOKIE).iter().collect(); assert_eq!(cookie_values.len(), 1); assert_eq!(cookie_values[0], "duplicate=same_value"); } "/no-duplicate-cookies" => { let cookie_values: Vec<_> = req.headers().get_all(header::COOKIE).iter().collect(); assert_eq!(cookie_values.len(), 3); assert_eq!(cookie_values[0], "duplicate=same_value"); assert_eq!(cookie_values[1], "unique1=value1"); assert_eq!(cookie_values[2], "unique2=value2"); } _ => unreachable!("Unexpected request path: {}", path), } http::Response::default() }); let client = Client::builder() .no_proxy() .default_headers({ let mut headers = HeaderMap::new(); headers.insert( header::COOKIE, HeaderValue::from_static("duplicate=same_value"), ); headers.append(header::COOKIE, HeaderValue::from_static("unique1=value1")); headers.append(header::COOKIE, HeaderValue::from_static("unique2=value2")); headers }) .build() .unwrap(); let res = client .get(format!("http://{}/duplicate-cookies", server.addr())) .header(header::COOKIE, "duplicate=same_value") .send() .await .unwrap(); assert_eq!(res.status(), wreq::StatusCode::OK); let res = client .get(format!("http://{}/no-duplicate-cookies", server.addr())) .send() .await .unwrap(); assert_eq!(res.status(), wreq::StatusCode::OK); } #[cfg(all( feature = "gzip", feature = "brotli", feature = "deflate", feature = "zstd" ))] #[tokio::test] async fn test_client_default_accept_encoding() { let server = server::http(move |req| async move { let accept_encoding = req.headers().get(header::ACCEPT_ENCODING).unwrap(); if req.uri() == "/default" { assert_eq!(accept_encoding, "zstd"); } if req.uri() == "/custom" { assert_eq!(accept_encoding, "gzip"); } http::Response::default() }); let client = Client::builder() .default_headers({ let mut headers = HeaderMap::new(); headers.insert(header::ACCEPT_ENCODING, HeaderValue::from_static("zstd")); headers }) .no_proxy() .build() .unwrap(); let _ = client .get(format!("http://{}/default", server.addr())) .send() .await .unwrap(); let _ = client .get(format!("http://{}/custom", server.addr())) .header(header::ACCEPT_ENCODING, "gzip") .send() .await .unwrap(); } #[tokio::test] async fn response_trailers() { let server = server::http(move |req| async move { assert_eq!(req.uri().path(), "/trailers"); let body = Full::new(Bytes::from("HelloWorld!")).with_trailers(async move { let mut trailers = http::HeaderMap::new(); trailers.insert("chunky-trailer1", HeaderValue::from_static("value1")); trailers.insert("chunky-trailer2", HeaderValue::from_static("value2")); Some(Ok(trailers)) }); let mut resp = http::Response::new(wreq::Body::wrap(body)); resp.headers_mut().insert( header::TRAILER, header::HeaderValue::from_static("chunky-trailer1, chunky-trailer2"), ); resp.headers_mut().insert( header::TRANSFER_ENCODING, header::HeaderValue::from_static("chunked"), ); resp }); let mut res = wreq::get(format!("http://{}/trailers", server.addr())) .header(header::TE, "trailers") .send() .await .expect("Failed to get response"); assert_eq!(res.status(), StatusCode::OK); let mut body_content = Vec::new(); let mut trailers = HeaderMap::default(); while let Some(chunk) = res.frame().await { match chunk .unwrap() .into_data() .map_err(|frame| frame.into_trailers()) { Ok(res) => { body_content.extend_from_slice(&res); } Err(Ok(res)) => { trailers.extend(res); } _ => (), } } let body = String::from_utf8(body_content).expect("Invalid UTF-8"); assert_eq!(body, "HelloWorld!"); assert_eq!(trailers["chunky-trailer1"], "value1"); assert_eq!(trailers["chunky-trailer2"], "value2"); } ================================================ FILE: tests/connector_layers.rs ================================================ mod support; use std::time::Duration; use futures_util::future::join_all; use pretty_env_logger::env_logger; use support::{layer::DelayLayer, server}; use tower::{layer::util::Identity, limit::ConcurrencyLimitLayer, timeout::TimeoutLayer}; use wreq::Client; #[tokio::test] async fn non_op_layer() { let _ = env_logger::try_init(); let server = server::http(move |_req| async { http::Response::default() }); let url = format!("http://{}", server.addr()); let client = Client::builder() .connector_layer(Identity::new()) .no_proxy() .build() .unwrap(); let res = client.get(url).send().await; assert!(res.is_ok()); } #[tokio::test] async fn non_op_layer_with_timeout() { let _ = env_logger::try_init(); let client = Client::builder() .connector_layer(Identity::new()) .connect_timeout(Duration::from_millis(200)) .no_proxy() .build() .unwrap(); // never returns let url = "http://192.0.2.1:81/slow"; let res = client.get(url).send().await; let err = res.unwrap_err(); assert!(err.is_connect() && err.is_timeout()); } #[tokio::test] async fn with_connect_timeout_layer_never_returning() { let _ = env_logger::try_init(); let client = Client::builder() .connector_layer(TimeoutLayer::new(Duration::from_millis(100))) .no_proxy() .build() .unwrap(); // never returns let url = "http://192.0.2.1:81/slow"; let res = client.get(url).send().await; let err = res.unwrap_err(); assert!(err.is_connect() && err.is_timeout()); } #[tokio::test] async fn with_connect_timeout_layer_slow() { let _ = env_logger::try_init(); let server = server::http(move |_req| async { http::Response::default() }); let url = format!("http://{}", server.addr()); let client = Client::builder() .connector_layer(DelayLayer::new(Duration::from_millis(200))) .connector_layer(TimeoutLayer::new(Duration::from_millis(100))) .no_proxy() .build() .unwrap(); let res = client.get(url).send().await; let err = res.unwrap_err(); assert!(err.is_connect() && err.is_timeout()); } #[tokio::test] async fn multiple_timeout_layers_under_threshold() { let _ = env_logger::try_init(); let server = server::http(move |_req| async { http::Response::default() }); let url = format!("http://{}", server.addr()); let client = Client::builder() .connector_layer(DelayLayer::new(Duration::from_millis(100))) .connector_layer(TimeoutLayer::new(Duration::from_millis(200))) .connector_layer(TimeoutLayer::new(Duration::from_millis(300))) .connector_layer(TimeoutLayer::new(Duration::from_millis(500))) .connect_timeout(Duration::from_millis(200)) .no_proxy() .build() .unwrap(); let res = client.get(url).send().await; assert!(res.is_ok()); } #[tokio::test] async fn multiple_timeout_layers_over_threshold() { let _ = env_logger::try_init(); let server = server::http(move |_req| async { http::Response::default() }); let url = format!("http://{}", server.addr()); let client = Client::builder() .connector_layer(DelayLayer::new(Duration::from_millis(100))) .connector_layer(TimeoutLayer::new(Duration::from_millis(50))) .connector_layer(TimeoutLayer::new(Duration::from_millis(50))) .connector_layer(TimeoutLayer::new(Duration::from_millis(50))) .connect_timeout(Duration::from_millis(50)) .no_proxy() .build() .unwrap(); let res = client.get(url).send().await; let err = res.unwrap_err(); assert!(err.is_connect() && err.is_timeout()); } #[tokio::test] async fn with_concurrency_limit_layer_timeout() { let _ = env_logger::try_init(); let server = server::http(move |_req| async { http::Response::default() }); let url = format!("http://{}", server.addr()); let client = Client::builder() .connector_layer(DelayLayer::new(Duration::from_millis(100))) .connector_layer(ConcurrencyLimitLayer::new(1)) .timeout(Duration::from_millis(200)) .pool_max_idle_per_host(0) // disable connection reuse to force resource contention on the concurrency limit semaphore .no_proxy() .build() .unwrap(); // first call succeeds since no resource contention let res = client.get(url.clone()).send().await; assert!(res.is_ok()); // 3 calls where the second two wait on the first and time out let mut futures = Vec::new(); for _ in 0..3 { futures.push(client.clone().get(url.clone()).send()); } let all_res = join_all(futures).await; let timed_out = all_res .into_iter() .any(|res| res.is_err_and(|err| err.is_timeout())); assert!(timed_out, "at least one request should have timed out"); } #[tokio::test] async fn with_concurrency_limit_layer_success() { let _ = env_logger::try_init(); let server = server::http(move |_req| async { http::Response::default() }); let url = format!("http://{}", server.addr()); let client = Client::builder() .connector_layer(DelayLayer::new(Duration::from_millis(100))) .connector_layer(TimeoutLayer::new(Duration::from_millis(200))) .connector_layer(ConcurrencyLimitLayer::new(1)) .timeout(Duration::from_millis(1000)) .pool_max_idle_per_host(0) // disable connection reuse to force resource contention on the concurrency limit semaphore .no_proxy() .build() .unwrap(); // first call succeeds since no resource contention let res = client.get(url.clone()).send().await; assert!(res.is_ok()); // 3 calls of which all are individually below the inner timeout // and the sum is below outer timeout which affects the final call which waited the whole time let mut futures = Vec::new(); for _ in 0..3 { futures.push(client.clone().get(url.clone()).send()); } let all_res = join_all(futures).await; for res in all_res.into_iter() { assert!( res.is_ok(), "neither outer long timeout or inner short timeout should be exceeded" ); } } #[tokio::test] async fn no_generic_bounds_required_for_client_new() { let _ = env_logger::try_init(); let server = server::http(move |_req| async { http::Response::default() }); let url = format!("http://{}", server.addr()); let res = wreq::get(url).send().await; assert!(res.is_ok()); } ================================================ FILE: tests/cookie.rs ================================================ mod support; use http::{Version, header::COOKIE}; use support::server; use wreq::{Client, cookie::Jar}; #[tokio::test] async fn cookie_response_accessor() { let server = server::http(move |_req| async move { http::Response::builder() .header("Set-Cookie", "key=val") .header( "Set-Cookie", "expires=1; Expires=Wed, 21 Oct 2015 07:28:00 GMT", ) .header("Set-Cookie", "path=1; Path=/the-path") .header("Set-Cookie", "maxage=1; Max-Age=100") .header("Set-Cookie", "domain=1; Domain=mydomain") .header("Set-Cookie", "secure=1; Secure") .header("Set-Cookie", "httponly=1; HttpOnly") .header("Set-Cookie", "samesitelax=1; SameSite=Lax") .header("Set-Cookie", "samesitestrict=1; SameSite=Strict") .body(Default::default()) .unwrap() }); let url = format!("http://{}/", server.addr()); let res = wreq::get(&url).send().await.unwrap(); let cookies = res.cookies().collect::>(); // key=val assert_eq!(cookies[0].name(), "key"); assert_eq!(cookies[0].value(), "val"); // expires assert_eq!(cookies[1].name(), "expires"); assert_eq!( cookies[1].expires().unwrap(), std::time::SystemTime::UNIX_EPOCH + std::time::Duration::from_secs(1_445_412_480) ); // path assert_eq!(cookies[2].name(), "path"); assert_eq!(cookies[2].path().unwrap(), "/the-path"); // max-age assert_eq!(cookies[3].name(), "maxage"); assert_eq!( cookies[3].max_age().unwrap(), std::time::Duration::from_secs(100) ); // domain assert_eq!(cookies[4].name(), "domain"); assert_eq!(cookies[4].domain().unwrap(), "mydomain"); // secure assert_eq!(cookies[5].name(), "secure"); assert!(cookies[5].secure()); // httponly assert_eq!(cookies[6].name(), "httponly"); assert!(cookies[6].http_only()); // samesitelax assert_eq!(cookies[7].name(), "samesitelax"); assert!(cookies[7].same_site_lax()); // samesitestrict assert_eq!(cookies[8].name(), "samesitestrict"); assert!(cookies[8].same_site_strict()); } #[tokio::test] async fn cookie_store_simple() { let server = server::http(move |req| async move { if req.uri() == "/2" { assert_eq!(req.headers()["cookie"], "key=val"); } http::Response::builder() .header("Set-Cookie", "key=val; HttpOnly") .body(Default::default()) .unwrap() }); let client = Client::builder().cookie_store(true).build().unwrap(); let url = format!("http://{}/", server.addr()); client.get(&url).send().await.unwrap(); let url = format!("http://{}/2", server.addr()); client.get(&url).send().await.unwrap(); } #[tokio::test] async fn cookie_store_overwrite_existing() { let server = server::http(move |req| async move { if req.uri() == "/" { http::Response::builder() .header("Set-Cookie", "key=val") .body(Default::default()) .unwrap() } else if req.uri() == "/2" { assert_eq!(req.headers()["cookie"], "key=val"); http::Response::builder() .header("Set-Cookie", "key=val2") .body(Default::default()) .unwrap() } else { assert_eq!(req.uri(), "/3"); assert_eq!(req.headers()["cookie"], "key=val2"); http::Response::default() } }); let client = Client::builder().cookie_store(true).build().unwrap(); let url = format!("http://{}/", server.addr()); client.get(&url).send().await.unwrap(); let url = format!("http://{}/2", server.addr()); client.get(&url).send().await.unwrap(); let url = format!("http://{}/3", server.addr()); client.get(&url).send().await.unwrap(); } #[tokio::test] async fn cookie_store_max_age() { let server = server::http(move |req| async move { assert_eq!(req.headers().get("cookie"), None); http::Response::builder() .header("Set-Cookie", "key=val; Max-Age=0") .body(Default::default()) .unwrap() }); let client = Client::builder().cookie_store(true).build().unwrap(); let url = format!("http://{}/", server.addr()); client.get(&url).send().await.unwrap(); client.get(&url).send().await.unwrap(); } #[tokio::test] async fn cookie_store_expires() { let server = server::http(move |req| async move { assert_eq!(req.headers().get("cookie"), None); http::Response::builder() .header( "Set-Cookie", "key=val; Expires=Wed, 21 Oct 2015 07:28:00 GMT", ) .body(Default::default()) .unwrap() }); let client = Client::builder().cookie_store(true).build().unwrap(); let url = format!("http://{}/", server.addr()); client.get(&url).send().await.unwrap(); client.get(&url).send().await.unwrap(); } #[tokio::test] async fn cookie_store_path() { let server = server::http(move |req| async move { if req.uri() == "/" { assert_eq!(req.headers().get("cookie"), None); http::Response::builder() .header("Set-Cookie", "key=val; Path=/subpath") .body(Default::default()) .unwrap() } else { assert_eq!(req.uri(), "/subpath"); assert_eq!(req.headers()["cookie"], "key=val"); http::Response::default() } }); let client = Client::builder().cookie_store(true).build().unwrap(); let url = format!("http://{}/", server.addr()); client.get(&url).send().await.unwrap(); client.get(&url).send().await.unwrap(); let url = format!("http://{}/subpath", server.addr()); client.get(&url).send().await.unwrap(); } #[tokio::test] async fn cookie_store_stores_response_cookie_with_manual_cookie() { let server = server::http(|req| async move { if req.uri() == "/1" { assert_eq!(req.headers()["cookie"], "key=val"); } if req.uri() == "/2" { assert_eq!(req.headers()["cookie"], "key=val"); } http::Response::builder() .header("Set-Cookie", "key=val; HttpOnly") .body(Default::default()) .unwrap() }); let client = Client::builder().cookie_store(true).build().unwrap(); let set_url = format!("http://{}/1", server.addr()); let _ = client .get(&set_url) .header("cookie", "key=val") .send() .await .unwrap(); let check_url = format!("http://{}/2", server.addr()); let _ = client.get(&check_url).send().await.unwrap(); } #[tokio::test] async fn cookie_request_level_compression() { let server = server::http(|req| async move { match req.uri().path() { "/set" => http::Response::builder() .header("Set-Cookie", "cookie1=value1") .header("Set-Cookie", "cookie2=value2") .header("Set-Cookie", "cookie3=value3") .body(Default::default()) .unwrap(), "/default" | "/compressed" => { assert_eq!(req.version(), Version::HTTP_11); let cookies = req .headers() .get(COOKIE) .and_then(|v| v.to_str().ok()) .unwrap(); assert!( cookies.contains("cookie1=value1") && cookies.contains("cookie2=value2") && cookies.contains("cookie3=value3") ); assert!(cookies.contains("; ")); http::Response::default() } "/uncompressed" => { assert_eq!(req.version(), Version::HTTP_2); let cookies: Vec<_> = req .headers() .get_all(COOKIE) .iter() .map(|v| v.to_str().unwrap()) .collect(); assert_eq!(cookies.len(), 3); assert!(cookies.contains(&"cookie1=value1")); assert!(cookies.contains(&"cookie2=value2")); assert!(cookies.contains(&"cookie3=value3")); http::Response::default() } _ => unreachable!(), } }); let base_url = format!("http://{}", server.addr()); // Create a client with this jar let client = Client::builder() .cookie_provider(Jar::default()) .build() .unwrap(); // Set cookies client .get(format!("{}/set", base_url)) .send() .await .unwrap(); // Request with default behavior (compressed) client .get(format!("{}/default", base_url)) .send() .await .unwrap(); // Request with compressed cookies client .get(format!("{}/compressed", base_url)) .version(Version::HTTP_11) .send() .await .unwrap(); // Request with uncompressed cookies client .get(format!("{}/uncompressed", base_url)) .version(Version::HTTP_2) .send() .await .unwrap(); } ================================================ FILE: tests/deflate.rs ================================================ mod support; use std::io::Write; use flate2::{Compression, write::ZlibEncoder}; use support::server; use tokio::io::AsyncWriteExt; #[tokio::test] async fn deflate_response() { deflate_case(10_000, 4096).await; } #[tokio::test] async fn deflate_single_byte_chunks() { deflate_case(10, 1).await; } #[tokio::test] async fn test_deflate_empty_body() { let server = server::http(move |req| async move { assert_eq!(req.method(), "HEAD"); http::Response::builder() .header("content-encoding", "deflate") .body(Default::default()) .unwrap() }); let res = wreq::head(format!("http://{}/deflate", server.addr())) .send() .await .unwrap(); let body = res.text().await.unwrap(); assert_eq!(body, ""); } #[tokio::test] async fn test_accept_header_is_not_changed_if_set() { let server = server::http(move |req| async move { assert_eq!(req.headers()["accept"], "application/json"); assert!( req.headers()["accept-encoding"] .to_str() .unwrap() .contains("deflate") ); http::Response::default() }); let res = wreq::get(format!("http://{}/accept", server.addr())) .header( wreq::header::ACCEPT, wreq::header::HeaderValue::from_static("application/json"), ) .send() .await .unwrap(); assert_eq!(res.status(), wreq::StatusCode::OK); } #[tokio::test] async fn test_accept_encoding_header_is_not_changed_if_set() { let server = server::http(move |req| async move { assert_eq!(req.headers()["accept"], "*/*"); assert_eq!(req.headers()["accept-encoding"], "identity"); http::Response::default() }); let res = wreq::get(format!("http://{}/accept-encoding", server.addr())) .header(wreq::header::ACCEPT, "*/*") .header( wreq::header::ACCEPT_ENCODING, wreq::header::HeaderValue::from_static("identity"), ) .send() .await .unwrap(); assert_eq!(res.status(), wreq::StatusCode::OK); } async fn deflate_case(response_size: usize, chunk_size: usize) { use futures_util::stream::StreamExt; let content: String = (0..response_size).fold(String::new(), |mut acc, i| { acc.push_str(&format!("test {i}")); acc }); let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default()); encoder.write_all(content.as_bytes()).unwrap(); let deflated_content = encoder.finish().unwrap(); let mut response = format!( "\ HTTP/1.1 200 OK\r\n\ Server: test-accept\r\n\ Content-Encoding: deflate\r\n\ Content-Length: {}\r\n\ \r\n", &deflated_content.len() ) .into_bytes(); response.extend(&deflated_content); let server = server::http(move |req| { assert!( req.headers()["accept-encoding"] .to_str() .unwrap() .contains("deflate") ); let deflated = deflated_content.clone(); async move { let len = deflated.len(); let stream = futures_util::stream::unfold((deflated, 0), move |(deflated, pos)| async move { let chunk = deflated.chunks(chunk_size).nth(pos)?.to_vec(); Some((chunk, (deflated, pos + 1))) }); let body = wreq::Body::wrap_stream(stream.map(Ok::<_, std::convert::Infallible>)); http::Response::builder() .header("content-encoding", "deflate") .header("content-length", len) .body(body) .unwrap() } }); let res = wreq::get(format!("http://{}/deflate", server.addr())) .send() .await .expect("response"); let body = res.text().await.expect("text"); assert_eq!(body, content); } const COMPRESSED_RESPONSE_HEADERS: &[u8] = b"HTTP/1.1 200 OK\x0d\x0a\ Content-Type: text/plain\x0d\x0a\ Connection: keep-alive\x0d\x0a\ Content-Encoding: deflate\x0d\x0a"; const RESPONSE_CONTENT: &str = "some message here"; fn deflate_compress(input: &[u8]) -> Vec { let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default()); encoder.write_all(input).unwrap(); encoder.finish().unwrap() } #[tokio::test] async fn test_non_chunked_non_fragmented_response() { let server = server::low_level_with_response(|_raw_request, client_socket| { Box::new(async move { let deflated_content = deflate_compress(RESPONSE_CONTENT.as_bytes()); let content_length_header = format!("Content-Length: {}\r\n\r\n", deflated_content.len()).into_bytes(); let response = [ COMPRESSED_RESPONSE_HEADERS, &content_length_header, &deflated_content, ] .concat(); client_socket .write_all(response.as_slice()) .await .expect("response write_all failed"); client_socket.flush().await.expect("response flush failed"); }) }); let res = wreq::get(format!("http://{}/", server.addr())) .send() .await .expect("response"); assert_eq!(res.text().await.expect("text"), RESPONSE_CONTENT); } #[tokio::test] async fn test_chunked_fragmented_response_1() { const DELAY_BETWEEN_RESPONSE_PARTS: tokio::time::Duration = tokio::time::Duration::from_millis(1000); const DELAY_MARGIN: tokio::time::Duration = tokio::time::Duration::from_millis(50); let server = server::low_level_with_response(|_raw_request, client_socket| { Box::new(async move { let deflated_content = deflate_compress(RESPONSE_CONTENT.as_bytes()); let response_first_part = [ COMPRESSED_RESPONSE_HEADERS, format!( "Transfer-Encoding: chunked\r\n\r\n{:x}\r\n", deflated_content.len() ) .as_bytes(), &deflated_content, ] .concat(); let response_second_part = b"\r\n0\r\n\r\n"; client_socket .write_all(response_first_part.as_slice()) .await .expect("response_first_part write_all failed"); client_socket .flush() .await .expect("response_first_part flush failed"); tokio::time::sleep(DELAY_BETWEEN_RESPONSE_PARTS).await; client_socket .write_all(response_second_part) .await .expect("response_second_part write_all failed"); client_socket .flush() .await .expect("response_second_part flush failed"); }) }); let start = tokio::time::Instant::now(); let res = wreq::get(format!("http://{}/", server.addr())) .send() .await .expect("response"); assert_eq!(res.text().await.expect("text"), RESPONSE_CONTENT); assert!(start.elapsed() >= DELAY_BETWEEN_RESPONSE_PARTS - DELAY_MARGIN); } #[tokio::test] async fn test_chunked_fragmented_response_2() { const DELAY_BETWEEN_RESPONSE_PARTS: tokio::time::Duration = tokio::time::Duration::from_millis(1000); const DELAY_MARGIN: tokio::time::Duration = tokio::time::Duration::from_millis(50); let server = server::low_level_with_response(|_raw_request, client_socket| { Box::new(async move { let deflated_content = deflate_compress(RESPONSE_CONTENT.as_bytes()); let response_first_part = [ COMPRESSED_RESPONSE_HEADERS, format!( "Transfer-Encoding: chunked\r\n\r\n{:x}\r\n", deflated_content.len() ) .as_bytes(), &deflated_content, b"\r\n", ] .concat(); let response_second_part = b"0\r\n\r\n"; client_socket .write_all(response_first_part.as_slice()) .await .expect("response_first_part write_all failed"); client_socket .flush() .await .expect("response_first_part flush failed"); tokio::time::sleep(DELAY_BETWEEN_RESPONSE_PARTS).await; client_socket .write_all(response_second_part) .await .expect("response_second_part write_all failed"); client_socket .flush() .await .expect("response_second_part flush failed"); }) }); let start = tokio::time::Instant::now(); let res = wreq::get(format!("http://{}/", server.addr())) .send() .await .expect("response"); assert_eq!(res.text().await.expect("text"), RESPONSE_CONTENT); assert!(start.elapsed() >= DELAY_BETWEEN_RESPONSE_PARTS - DELAY_MARGIN); } #[tokio::test] async fn test_chunked_fragmented_response_with_extra_bytes() { const DELAY_BETWEEN_RESPONSE_PARTS: tokio::time::Duration = tokio::time::Duration::from_millis(1000); const DELAY_MARGIN: tokio::time::Duration = tokio::time::Duration::from_millis(50); let server = server::low_level_with_response(|_raw_request, client_socket| { Box::new(async move { let deflated_content = deflate_compress(RESPONSE_CONTENT.as_bytes()); let response_first_part = [ COMPRESSED_RESPONSE_HEADERS, format!( "Transfer-Encoding: chunked\r\n\r\n{:x}\r\n", deflated_content.len() ) .as_bytes(), &deflated_content, ] .concat(); let response_second_part = b"\r\n2ab\r\n0\r\n\r\n"; client_socket .write_all(response_first_part.as_slice()) .await .expect("response_first_part write_all failed"); client_socket .flush() .await .expect("response_first_part flush failed"); tokio::time::sleep(DELAY_BETWEEN_RESPONSE_PARTS).await; client_socket .write_all(response_second_part) .await .expect("response_second_part write_all failed"); client_socket .flush() .await .expect("response_second_part flush failed"); }) }); let start = tokio::time::Instant::now(); let res = wreq::get(format!("http://{}/", server.addr())) .send() .await .expect("response"); let err = res.text().await.expect_err("there must be an error"); assert!(err.is_decode()); assert!(start.elapsed() >= DELAY_BETWEEN_RESPONSE_PARTS - DELAY_MARGIN); } ================================================ FILE: tests/emulate.rs ================================================ use std::{ io::{self, Write}, time::Duration, }; use brotli::{CompressorWriter as BrotliEncoder, Decompressor as BrotliDecoder}; use flate2::{Compression, read::ZlibDecoder, write::ZlibEncoder}; use wreq::{ Client, Emulation, http1::Http1Options, http2::{ Http2Options, PseudoId, PseudoOrder, SettingId, SettingsOrder, StreamDependency, StreamId, }, tls::{ AlpnProtocol, ExtensionType, KeyShare, TlsOptions, TlsVersion, compress::{CertificateCompressionAlgorithm, CertificateCompressor, Codec}, }, }; use zstd::stream::{Decoder as ZstdDecoder, Encoder as ZstdEncoder}; #[derive(Debug)] struct BrotliCompressor { q: u32, lgwin: u32, } #[derive(Debug)] struct ZlibCompressor; #[derive(Debug)] struct ZstdCompressor; impl CertificateCompressor for BrotliCompressor { fn compress(&self) -> Codec { let q = self.q; let lgwin = self.lgwin; Codec::Dynamic(Box::new(move |input, output| { let mut writer = BrotliEncoder::new(output, input.len(), q, lgwin); writer.write_all(input)?; writer.flush()?; Ok(()) })) } fn decompress(&self) -> Codec { Codec::Pointer(|input, output| { let mut reader = BrotliDecoder::new(input, 4096); io::copy(&mut reader, output)?; Ok(()) }) } fn algorithm(&self) -> CertificateCompressionAlgorithm { CertificateCompressionAlgorithm::BROTLI } } impl CertificateCompressor for ZlibCompressor { fn compress(&self) -> Codec { Codec::Pointer(|input, output| { let mut encoder = ZlibEncoder::new(output, Compression::default()); encoder.write_all(input)?; encoder.finish()?; Ok(()) }) } fn decompress(&self) -> Codec { Codec::Pointer(|input, output| { let mut reader = ZlibDecoder::new(input); io::copy(&mut reader, output)?; Ok(()) }) } fn algorithm(&self) -> CertificateCompressionAlgorithm { CertificateCompressionAlgorithm::ZLIB } } impl CertificateCompressor for ZstdCompressor { fn compress(&self) -> Codec { Codec::Pointer(|input, output| { let mut encoder = ZstdEncoder::new(output, 0)?; encoder.write_all(input)?; encoder.finish()?; Ok(()) }) } fn decompress(&self) -> Codec { Codec::Pointer(|input, output| { let mut reader = ZstdDecoder::new(input)?; io::copy(&mut reader, output)?; Ok(()) }) } fn algorithm(&self) -> CertificateCompressionAlgorithm { CertificateCompressionAlgorithm::ZSTD } } macro_rules! join { ($sep:expr, $first:expr $(, $rest:expr)*) => { concat!($first $(, $sep, $rest)*) }; } fn tls_options_template() -> TlsOptions { // TLS options config TlsOptions::builder() .curves_list(join!( ":", "X25519MLKEM768", "X25519", "P-256", "P-384", "P-521", "ffdhe2048", "ffdhe3072" )) .cipher_list(join!( ":", "TLS_AES_128_GCM_SHA256", "TLS_CHACHA20_POLY1305_SHA256", "TLS_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_AES_128_GCM_SHA256", "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA" )) .sigalgs_list(join!( ":", "ecdsa_secp256r1_sha256", "ecdsa_secp384r1_sha384", "ecdsa_secp521r1_sha512", "rsa_pss_rsae_sha256", "rsa_pss_rsae_sha384", "rsa_pss_rsae_sha512", "rsa_pkcs1_sha256", "rsa_pkcs1_sha384", "rsa_pkcs1_sha512", "ecdsa_sha1", "rsa_pkcs1_sha1" )) .delegated_credentials(join!( ":", "ecdsa_secp256r1_sha256", "ecdsa_secp384r1_sha384", "ecdsa_secp521r1_sha512", "ecdsa_sha1" )) .certificate_compressors(vec![ &ZlibCompressor as _, &BrotliCompressor { q: 11, lgwin: 32 } as _, &ZstdCompressor as _, ]) .alpn_protocols([AlpnProtocol::HTTP2, AlpnProtocol::HTTP1]) .record_size_limit(0x4001) .pre_shared_key(true) .enable_ech_grease(true) .enable_ocsp_stapling(true) .enable_signed_cert_timestamps(true) .min_tls_version(TlsVersion::TLS_1_2) .max_tls_version(TlsVersion::TLS_1_3) .key_shares(vec![ KeyShare::X25519_MLKEM768, KeyShare::X25519, KeyShare::P256, ]) .preserve_tls13_cipher_list(true) .aes_hw_override(true) .random_aes_hw_override(true) .extension_permutation(&[ ExtensionType::SERVER_NAME, ExtensionType::EXTENDED_MASTER_SECRET, ExtensionType::RENEGOTIATE, ExtensionType::SUPPORTED_GROUPS, ExtensionType::EC_POINT_FORMATS, ExtensionType::SESSION_TICKET, ExtensionType::APPLICATION_LAYER_PROTOCOL_NEGOTIATION, ExtensionType::STATUS_REQUEST, ExtensionType::DELEGATED_CREDENTIAL, ExtensionType::CERTIFICATE_TIMESTAMP, ExtensionType::KEY_SHARE, ExtensionType::SUPPORTED_VERSIONS, ExtensionType::SIGNATURE_ALGORITHMS, ExtensionType::PSK_KEY_EXCHANGE_MODES, ExtensionType::RECORD_SIZE_LIMIT, ExtensionType::CERT_COMPRESSION, ExtensionType::ENCRYPTED_CLIENT_HELLO, ExtensionType::PADDING, ]) .build() } fn http1_options_template() -> Http1Options { // HTTP/1 options config Http1Options::builder() .allow_obsolete_multiline_headers_in_responses(true) .max_headers(100) .build() } fn http2_options_template() -> Http2Options { // HTTP/2 headers frame pseudo-header order let headers_pseudo_order = PseudoOrder::builder() .extend([ PseudoId::Method, PseudoId::Path, PseudoId::Authority, PseudoId::Scheme, ]) .build(); // HTTP/2 settings frame order let settings_order = SettingsOrder::builder() .extend([ SettingId::HeaderTableSize, SettingId::EnablePush, SettingId::MaxConcurrentStreams, SettingId::InitialWindowSize, SettingId::MaxFrameSize, SettingId::MaxHeaderListSize, SettingId::EnableConnectProtocol, SettingId::NoRfc7540Priorities, ]) .build(); Http2Options::builder() .header_table_size(65536) .enable_push(false) .initial_window_size(131072) .max_frame_size(16384) .initial_connection_window_size(12517377 + 65535) .headers_stream_dependency(StreamDependency::new(StreamId::ZERO, 41, false)) .headers_pseudo_order(headers_pseudo_order) .settings_order(settings_order) .build() } fn emulation_template() -> Emulation { // This provider encapsulates TLS, HTTP/1, HTTP/2 Emulation::builder() .tls_options(tls_options_template()) .http1_options(http1_options_template()) .http2_options(http2_options_template()) .build(Default::default()) } #[tokio::test] async fn test_emulation() -> wreq::Result<()> { let client = Client::builder() .emulation(emulation_template()) .connect_timeout(Duration::from_secs(10)) .tls_cert_verification(false) .build()?; let text = client .get("https://tls.browserleaks.com/") .send() .await? .text() .await?; assert!( text.contains("t13d1717h2_5b57614c22b0_3cbfd9057e0d"), "Response ja4_hash fingerprint not found: {text}" ); assert!( text.contains("6ea73faa8fc5aac76bded7bd238f6433"), "Response akamai_hash fingerprint not found: {text}" ); Ok(()) } #[tokio::test] async fn test_request_with_emulation() -> wreq::Result<()> { let client = Client::builder() .connect_timeout(Duration::from_secs(10)) .tls_cert_verification(false) .build()?; let text = client .get("https://tls.browserleaks.com/") .emulation(emulation_template()) .send() .await? .text() .await?; assert!( text.contains("t13d1717h2_5b57614c22b0_3cbfd9057e0d"), "Response ja4_hash fingerprint not found: {text}" ); assert!( text.contains("6ea73faa8fc5aac76bded7bd238f6433"), "Response akamai_hash fingerprint not found: {text}" ); Ok(()) } #[tokio::test] async fn test_request_with_emulation_tls() -> wreq::Result<()> { let client = Client::builder() .connect_timeout(Duration::from_secs(10)) .tls_cert_verification(false) .build()?; let emulation = Emulation::builder() .tls_options(tls_options_template()) .build(Default::default()); let text = client .get("https://tls.browserleaks.com/") .emulation(emulation) .send() .await? .text() .await?; assert!( text.contains("t13d1717h2_5b57614c22b0_3cbfd9057e0d"), "Response ja4_hash fingerprint not found: {text}" ); Ok(()) } #[tokio::test] async fn test_request_with_emulation_http2() -> wreq::Result<()> { let client = Client::builder() .connect_timeout(Duration::from_secs(10)) .tls_cert_verification(false) .build()?; let emulation = Emulation::builder() .http2_options(http2_options_template()) .build(Default::default()); let text = client .get("https://tls.browserleaks.com/") .emulation(emulation) .send() .await? .text() .await?; assert!( text.contains("6ea73faa8fc5aac76bded7bd238f6433"), "Response akamai_hash fingerprint not found: {text}" ); Ok(()) } ================================================ FILE: tests/gzip.rs ================================================ mod support; use std::io::Write; use flate2::{Compression, write::GzEncoder}; use support::server; use tokio::io::AsyncWriteExt; #[tokio::test] async fn gzip_response() { gzip_case(10_000, 4096).await; } #[tokio::test] async fn gzip_single_byte_chunks() { gzip_case(10, 1).await; } #[tokio::test] async fn test_gzip_empty_body() { let server = server::http(move |req| async move { assert_eq!(req.method(), "HEAD"); http::Response::builder() .header("content-encoding", "gzip") .body(Default::default()) .unwrap() }); let res = wreq::head(format!("http://{}/gzip", server.addr())) .send() .await .unwrap(); let body = res.text().await.unwrap(); assert_eq!(body, ""); } #[tokio::test] async fn test_accept_header_is_not_changed_if_set() { let server = server::http(move |req| async move { assert_eq!(req.headers()["accept"], "application/json"); assert!( req.headers()["accept-encoding"] .to_str() .unwrap() .contains("gzip") ); http::Response::default() }); let res = wreq::get(format!("http://{}/accept", server.addr())) .header( wreq::header::ACCEPT, wreq::header::HeaderValue::from_static("application/json"), ) .send() .await .unwrap(); assert_eq!(res.status(), wreq::StatusCode::OK); } #[tokio::test] async fn test_accept_encoding_header_is_not_changed_if_set() { let server = server::http(move |req| async move { assert_eq!(req.headers()["accept"], "*/*"); assert_eq!(req.headers()["accept-encoding"], "identity"); http::Response::default() }); let res = wreq::get(format!("http://{}/accept-encoding", server.addr())) .header(wreq::header::ACCEPT, "*/*") .header( wreq::header::ACCEPT_ENCODING, wreq::header::HeaderValue::from_static("identity"), ) .send() .await .unwrap(); assert_eq!(res.status(), wreq::StatusCode::OK); } async fn gzip_case(response_size: usize, chunk_size: usize) { use futures_util::stream::StreamExt; let content: String = (0..response_size).fold(String::new(), |mut acc, i| { acc.push_str(&format!("test {i}")); acc }); let mut encoder = GzEncoder::new(Vec::new(), Compression::default()); encoder.write_all(content.as_bytes()).unwrap(); let gzipped_content = encoder.finish().unwrap(); let mut response = format!( "\ HTTP/1.1 200 OK\r\n\ Server: test-accept\r\n\ Content-Encoding: gzip\r\n\ Content-Length: {}\r\n\ \r\n", &gzipped_content.len() ) .into_bytes(); response.extend(&gzipped_content); let server = server::http(move |req| { assert!( req.headers()["accept-encoding"] .to_str() .unwrap() .contains("gzip") ); let gzipped = gzipped_content.clone(); async move { let len = gzipped.len(); let stream = futures_util::stream::unfold((gzipped, 0), move |(gzipped, pos)| async move { let chunk = gzipped.chunks(chunk_size).nth(pos)?.to_vec(); Some((chunk, (gzipped, pos + 1))) }); let body = wreq::Body::wrap_stream(stream.map(Ok::<_, std::convert::Infallible>)); http::Response::builder() .header("content-encoding", "gzip") .header("content-length", len) .body(body) .unwrap() } }); let res = wreq::get(format!("http://{}/gzip", server.addr())) .send() .await .expect("response"); let body = res.text().await.expect("text"); assert_eq!(body, content); } const COMPRESSED_RESPONSE_HEADERS: &[u8] = b"HTTP/1.1 200 OK\x0d\x0a\ Content-Type: text/plain\x0d\x0a\ Connection: keep-alive\x0d\x0a\ Content-Encoding: gzip\x0d\x0a"; const RESPONSE_CONTENT: &str = "some message here"; fn gzip_compress(input: &[u8]) -> Vec { let mut encoder = GzEncoder::new(Vec::new(), Compression::default()); encoder.write_all(input).unwrap(); encoder.finish().unwrap() } #[tokio::test] async fn test_non_chunked_non_fragmented_response() { let server = server::low_level_with_response(|_raw_request, client_socket| { Box::new(async move { let gzipped_content = gzip_compress(RESPONSE_CONTENT.as_bytes()); let content_length_header = format!("Content-Length: {}\r\n\r\n", gzipped_content.len()).into_bytes(); let response = [ COMPRESSED_RESPONSE_HEADERS, &content_length_header, &gzipped_content, ] .concat(); client_socket .write_all(response.as_slice()) .await .expect("response write_all failed"); client_socket.flush().await.expect("response flush failed"); }) }); let res = wreq::get(format!("http://{}/", server.addr())) .send() .await .expect("response"); assert_eq!(res.text().await.expect("text"), RESPONSE_CONTENT); } #[tokio::test] async fn test_chunked_fragmented_response_1() { const DELAY_BETWEEN_RESPONSE_PARTS: tokio::time::Duration = tokio::time::Duration::from_millis(1000); const DELAY_MARGIN: tokio::time::Duration = tokio::time::Duration::from_millis(50); let server = server::low_level_with_response(|_raw_request, client_socket| { Box::new(async move { let gzipped_content = gzip_compress(RESPONSE_CONTENT.as_bytes()); let response_first_part = [ COMPRESSED_RESPONSE_HEADERS, format!( "Transfer-Encoding: chunked\r\n\r\n{:x}\r\n", gzipped_content.len() ) .as_bytes(), &gzipped_content, ] .concat(); let response_second_part = b"\r\n0\r\n\r\n"; client_socket .write_all(response_first_part.as_slice()) .await .expect("response_first_part write_all failed"); client_socket .flush() .await .expect("response_first_part flush failed"); tokio::time::sleep(DELAY_BETWEEN_RESPONSE_PARTS).await; client_socket .write_all(response_second_part) .await .expect("response_second_part write_all failed"); client_socket .flush() .await .expect("response_second_part flush failed"); }) }); let start = tokio::time::Instant::now(); let res = wreq::get(format!("http://{}/", server.addr())) .send() .await .expect("response"); assert_eq!(res.text().await.expect("text"), RESPONSE_CONTENT); assert!(start.elapsed() >= DELAY_BETWEEN_RESPONSE_PARTS - DELAY_MARGIN); } #[tokio::test] async fn test_chunked_fragmented_response_2() { const DELAY_BETWEEN_RESPONSE_PARTS: tokio::time::Duration = tokio::time::Duration::from_millis(1000); const DELAY_MARGIN: tokio::time::Duration = tokio::time::Duration::from_millis(50); let server = server::low_level_with_response(|_raw_request, client_socket| { Box::new(async move { let gzipped_content = gzip_compress(RESPONSE_CONTENT.as_bytes()); let response_first_part = [ COMPRESSED_RESPONSE_HEADERS, format!( "Transfer-Encoding: chunked\r\n\r\n{:x}\r\n", gzipped_content.len() ) .as_bytes(), &gzipped_content, b"\r\n", ] .concat(); let response_second_part = b"0\r\n\r\n"; client_socket .write_all(response_first_part.as_slice()) .await .expect("response_first_part write_all failed"); client_socket .flush() .await .expect("response_first_part flush failed"); tokio::time::sleep(DELAY_BETWEEN_RESPONSE_PARTS).await; client_socket .write_all(response_second_part) .await .expect("response_second_part write_all failed"); client_socket .flush() .await .expect("response_second_part flush failed"); }) }); let start = tokio::time::Instant::now(); let res = wreq::get(format!("http://{}/", server.addr())) .send() .await .expect("response"); assert_eq!(res.text().await.expect("text"), RESPONSE_CONTENT); assert!(start.elapsed() >= DELAY_BETWEEN_RESPONSE_PARTS - DELAY_MARGIN); } #[tokio::test] async fn test_chunked_fragmented_response_with_extra_bytes() { const DELAY_BETWEEN_RESPONSE_PARTS: tokio::time::Duration = tokio::time::Duration::from_millis(1000); const DELAY_MARGIN: tokio::time::Duration = tokio::time::Duration::from_millis(50); let server = server::low_level_with_response(|_raw_request, client_socket| { Box::new(async move { let gzipped_content = gzip_compress(RESPONSE_CONTENT.as_bytes()); let response_first_part = [ COMPRESSED_RESPONSE_HEADERS, format!( "Transfer-Encoding: chunked\r\n\r\n{:x}\r\n", gzipped_content.len() ) .as_bytes(), &gzipped_content, ] .concat(); let response_second_part = b"\r\n2ab\r\n0\r\n\r\n"; client_socket .write_all(response_first_part.as_slice()) .await .expect("response_first_part write_all failed"); client_socket .flush() .await .expect("response_first_part flush failed"); tokio::time::sleep(DELAY_BETWEEN_RESPONSE_PARTS).await; client_socket .write_all(response_second_part) .await .expect("response_second_part write_all failed"); client_socket .flush() .await .expect("response_second_part flush failed"); }) }); let start = tokio::time::Instant::now(); let res = wreq::get(format!("http://{}/", server.addr())) .send() .await .expect("response"); let err = res.text().await.expect_err("there must be an error"); assert!(err.is_decode()); assert!(start.elapsed() >= DELAY_BETWEEN_RESPONSE_PARTS - DELAY_MARGIN); } ================================================ FILE: tests/layers.rs ================================================ mod support; use std::time::Duration; use futures_util::future::join_all; use pretty_env_logger::env_logger; use support::{ layer::{DelayLayer, SharedConcurrencyLimitLayer}, server, }; use tower::{layer::util::Identity, limit::ConcurrencyLimitLayer, timeout::TimeoutLayer}; use wreq::Client; #[tokio::test] async fn non_op_layer() { let _ = env_logger::try_init(); let server = server::http(move |_req| async { http::Response::default() }); let url = format!("http://{}", server.addr()); let client = Client::builder() .layer(Identity::new()) .no_proxy() .build() .unwrap(); let res = client.get(url).send().await; assert!(res.is_ok()); } #[tokio::test] async fn non_op_layer_with_timeout() { let _ = env_logger::try_init(); let client = Client::builder() .layer(Identity::new()) .connect_timeout(Duration::from_millis(200)) .no_proxy() .build() .unwrap(); // never returns let url = "http://192.0.2.1:81/slow"; let res = client.get(url).send().await; let err = res.unwrap_err(); assert!(err.is_connect() && err.is_timeout()); } #[tokio::test] async fn with_connect_timeout_layer_never_returning() { let _ = env_logger::try_init(); let client = Client::builder() .layer(TimeoutLayer::new(Duration::from_millis(100))) .no_proxy() .build() .unwrap(); // never returns let url = "http://192.0.2.1:81/slow"; let res = client.get(url).send().await; let err = res.unwrap_err(); assert!(err.is_timeout()); } #[tokio::test] async fn with_connect_timeout_layer_slow() { let _ = env_logger::try_init(); let server = server::http(move |_req| async { http::Response::default() }); let url = format!("http://{}", server.addr()); let client = Client::builder() .layer(DelayLayer::new(Duration::from_millis(200))) .layer(TimeoutLayer::new(Duration::from_millis(100))) .no_proxy() .build() .unwrap(); let res = client.get(url).send().await; let err = res.unwrap_err(); assert!(err.is_timeout()); } #[tokio::test] async fn multiple_timeout_layers_under_threshold() { let _ = env_logger::try_init(); let server = server::http(move |_req| async { http::Response::default() }); let url = format!("http://{}", server.addr()); let client = Client::builder() .layer(DelayLayer::new(Duration::from_millis(100))) .layer(TimeoutLayer::new(Duration::from_millis(200))) .layer(TimeoutLayer::new(Duration::from_millis(300))) .layer(TimeoutLayer::new(Duration::from_millis(500))) .timeout(Duration::from_millis(200)) .no_proxy() .build() .unwrap(); let res = client.get(url).send().await; assert!(res.is_ok()); } #[tokio::test] async fn multiple_timeout_layers_over_threshold() { let _ = env_logger::try_init(); let server = server::http(move |_req| async { http::Response::default() }); let url = format!("http://{}", server.addr()); let client = Client::builder() .layer(DelayLayer::new(Duration::from_millis(100))) .layer(TimeoutLayer::new(Duration::from_millis(50))) .layer(TimeoutLayer::new(Duration::from_millis(50))) .layer(TimeoutLayer::new(Duration::from_millis(50))) .connect_timeout(Duration::from_millis(50)) .no_proxy() .build() .unwrap(); let res = client.get(url).send().await; let err = res.unwrap_err(); assert!(err.is_timeout()); } #[tokio::test] async fn layer_insert_headers() { let _ = env_logger::try_init(); let server = server::http(move |req| async move { let headers = req.headers().clone(); assert!(headers.contains_key("x-test-header")); http::Response::default() }); let url = format!("http://{}", server.addr()); let client = Client::builder() .layer(tower::util::MapRequestLayer::new( move |mut req: http::Request| { req.headers_mut().insert( "x-test-header", http::HeaderValue::from_static("test-value"), ); req }, )) .no_proxy() .build() .unwrap(); let res = client.get(url).send().await; assert!(res.is_ok()); } #[tokio::test] async fn with_concurrency_limit_layer_timeout() { let _ = env_logger::try_init(); let server = server::http(move |_req| async { http::Response::default() }); let url = format!("http://{}", server.addr()); let client = Client::builder() .layer(DelayLayer::new(Duration::from_millis(100))) .layer(SharedConcurrencyLimitLayer::new(2)) .timeout(Duration::from_millis(200)) .pool_max_idle_per_host(0) // disable connection reuse to force resource contention on the concurrency limit semaphore .no_proxy() .build() .unwrap(); // first call succeeds since no resource contention let res = client.get(url.clone()).send().await; assert!(res.is_ok()); // 3 calls where the second two wait on the first and time out let mut futures = Vec::new(); for _ in 0..3 { futures.push(client.clone().get(url.clone()).send()); } let all_res = join_all(futures).await; let timed_out = all_res .into_iter() .any(|res| res.is_err_and(|err| err.is_timeout())); assert!(timed_out, "at least one request should have timed out"); } #[tokio::test] async fn with_concurrency_limit_layer_success() { let _ = env_logger::try_init(); let server = server::http(move |_req| async { http::Response::default() }); let url = format!("http://{}", server.addr()); let client = Client::builder() .layer(DelayLayer::new(Duration::from_millis(100))) .layer(TimeoutLayer::new(Duration::from_millis(200))) .layer(ConcurrencyLimitLayer::new(1)) //2 .timeout(Duration::from_millis(1000)) .pool_max_idle_per_host(0) // disable connection reuse to force resource contention on the concurrency limit semaphore .no_proxy() .build() .unwrap(); // first call succeeds since no resource contention let res = client.get(url.clone()).send().await; assert!(res.is_ok()); // 3 calls of which all are individually below the inner timeout // and the sum is below outer timeout which affects the final call which waited the whole time let mut futures = Vec::new(); for _ in 0..3 { futures.push(client.clone().get(url.clone()).send()); } let all_res = join_all(futures).await; for res in all_res.into_iter() { assert!( res.is_ok(), "neither outer long timeout or inner short timeout should be exceeded" ); } } #[tokio::test] async fn no_generic_bounds_required_for_client_new() { let _ = env_logger::try_init(); let server = server::http(move |_req| async { http::Response::default() }); let url = format!("http://{}", server.addr()); let res = wreq::get(url).send().await; assert!(res.is_ok()); } ================================================ FILE: tests/multipart.rs ================================================ mod support; use http_body_util::BodyExt; use pretty_env_logger::env_logger; use support::server; #[tokio::test] async fn text_part() { let _ = env_logger::try_init(); let form = wreq::multipart::Form::new().text("foo", "bar"); let expected_body = format!( "\ --{0}\r\n\ Content-Disposition: form-data; name=\"foo\"\r\n\r\n\ bar\r\n\ --{0}--\r\n\ ", form.boundary() ); let ct = format!("multipart/form-data; boundary={}", form.boundary()); let server = server::http(move |mut req| { let ct = ct.clone(); let expected_body = expected_body.clone(); async move { assert_eq!(req.method(), "POST"); assert_eq!(req.headers()["content-type"], ct); assert_eq!( req.headers()["content-length"], expected_body.len().to_string() ); let mut full: Vec = Vec::new(); while let Some(item) = req.body_mut().frame().await { full.extend(&*item.unwrap().into_data().unwrap()); } assert_eq!(full, expected_body.as_bytes()); http::Response::default() } }); let url = format!("http://{}/multipart/1", server.addr()); let res = wreq::post(&url).multipart(form).send().await.unwrap(); assert_eq!(res.uri(), url.as_str()); assert_eq!(res.status(), wreq::StatusCode::OK); } #[tokio::test] async fn text_part_with_custom_boundary() { let _ = env_logger::try_init(); let form = wreq::multipart::Form::with_boundary("----WebKitFormBoundary0123456789").text("foo", "bar"); let expected_body = "\ ------WebKitFormBoundary0123456789\r\n\ Content-Disposition: form-data; name=\"foo\"\r\n\r\n\ bar\r\n\ ------WebKitFormBoundary0123456789--\r\n\ "; let ct = "multipart/form-data; boundary=----WebKitFormBoundary0123456789"; let server = server::http(move |mut req| async move { assert_eq!(req.method(), "POST"); assert_eq!(req.headers()["content-type"], ct); assert_eq!( req.headers()["content-length"], expected_body.len().to_string() ); let mut full: Vec = Vec::new(); while let Some(item) = req.body_mut().frame().await { full.extend(&*item.unwrap().into_data().unwrap()); } assert_eq!(full, expected_body.as_bytes()); http::Response::default() }); let url = format!("http://{}/multipart/1", server.addr()); let res = wreq::post(&url).multipart(form).send().await.unwrap(); assert_eq!(res.uri(), url.as_str()); assert_eq!(res.status(), wreq::StatusCode::OK); } #[cfg(feature = "stream")] #[tokio::test] async fn stream_part() { use futures_util::{future, stream}; let _ = env_logger::try_init(); let stream = wreq::Body::wrap_stream(stream::once(future::ready(Ok::<_, wreq::Error>( "part1 part2".to_owned(), )))); let part = wreq::multipart::Part::stream(stream); let form = wreq::multipart::Form::new() .text("foo", "bar") .part("part_stream", part); let expected_body = format!( "\ --{0}\r\n\ Content-Disposition: form-data; name=\"foo\"\r\n\ \r\n\ bar\r\n\ --{0}\r\n\ Content-Disposition: form-data; name=\"part_stream\"\r\n\ \r\n\ part1 part2\r\n\ --{0}--\r\n\ ", form.boundary() ); let ct = format!("multipart/form-data; boundary={}", form.boundary()); let server = server::http(move |req| { let ct = ct.clone(); let expected_body = expected_body.clone(); async move { assert_eq!(req.method(), "POST"); assert_eq!(req.headers()["content-type"], ct); assert_eq!(req.headers()["transfer-encoding"], "chunked"); let full = req.collect().await.unwrap().to_bytes(); assert_eq!(full, expected_body.as_bytes()); http::Response::default() } }); let url = format!("http://{}/multipart/1", server.addr()); let res = wreq::post(&url) .multipart(form) .send() .await .expect("Failed to post multipart"); assert_eq!(res.uri(), url.as_str()); assert_eq!(res.status(), wreq::StatusCode::OK); } #[cfg(feature = "stream")] #[tokio::test] async fn async_impl_file_part() { let _ = env_logger::try_init(); let form = wreq::multipart::Form::new() .file("foo", "Cargo.lock") .await .unwrap(); let fcontents = std::fs::read_to_string("Cargo.lock").unwrap(); let expected_body = format!( "\ --{0}\r\n\ Content-Disposition: form-data; name=\"foo\"; filename=\"Cargo.lock\"\r\n\ Content-Type: application/octet-stream\r\n\r\n\ {1}\r\n\ --{0}--\r\n\ ", form.boundary(), fcontents ); let ct = format!("multipart/form-data; boundary={}", form.boundary()); let server = server::http(move |req| { let ct = ct.clone(); let expected_body = expected_body.clone(); async move { assert_eq!(req.method(), "POST"); assert_eq!(req.headers()["content-type"], ct); // files know their exact size assert_eq!( req.headers()["content-length"], expected_body.len().to_string() ); let full = req.collect().await.unwrap().to_bytes(); assert_eq!(full, expected_body.as_bytes()); http::Response::default() } }); let url = format!("http://{}/multipart/3", server.addr()); let res = wreq::post(&url).multipart(form).send().await.unwrap(); assert_eq!(res.uri(), url.as_str()); assert_eq!(res.status(), wreq::StatusCode::OK); } ================================================ FILE: tests/proxy.rs ================================================ mod support; use std::{env, sync::LazyLock}; use support::server; use tokio::sync::Mutex; use wreq::Client; // serialize tests that read from / write to environment variables static HTTP_PROXY_ENV_MUTEX: LazyLock> = LazyLock::new(|| Mutex::new(())); #[tokio::test] async fn http_proxy() { let url = "http://hyper.rs.local/prox"; let server = server::http(move |req| { assert_eq!(req.method(), "GET"); assert_eq!(req.uri(), url); assert_eq!(req.headers()["host"], "hyper.rs.local"); async { http::Response::default() } }); let proxy = format!("http://{}", server.addr()); let res = Client::builder() .proxy(wreq::Proxy::http(&proxy).unwrap()) .build() .unwrap() .get(url) .send() .await .unwrap(); assert_eq!(res.uri(), url); assert_eq!(res.status(), wreq::StatusCode::OK); } #[tokio::test] async fn http_proxy_basic_auth() { let url = "http://hyper.rs.local/prox"; let server = server::http(move |req| { assert_eq!(req.method(), "GET"); assert_eq!(req.uri(), url); assert_eq!(req.headers()["host"], "hyper.rs.local"); assert_eq!( req.headers()["proxy-authorization"], "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" ); async { http::Response::default() } }); let proxy = format!("http://{}", server.addr()); let res = Client::builder() .proxy( wreq::Proxy::http(&proxy) .unwrap() .basic_auth("Aladdin", "open sesame"), ) .build() .unwrap() .get(url) .send() .await .unwrap(); assert_eq!(res.uri(), url); assert_eq!(res.status(), wreq::StatusCode::OK); } #[tokio::test] async fn http_proxy_basic_auth_parsed() { let url = "http://hyper.rs.local/prox"; let server = server::http(move |req| { assert_eq!(req.method(), "GET"); assert_eq!(req.uri(), url); assert_eq!(req.headers()["host"], "hyper.rs.local"); assert_eq!( req.headers()["proxy-authorization"], "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" ); async { http::Response::default() } }); let proxy = format!("http://Aladdin:open%20sesame@{}", server.addr()); let res = Client::builder() .proxy(wreq::Proxy::http(&proxy).unwrap()) .build() .unwrap() .get(url) .send() .await .unwrap(); assert_eq!(res.uri(), url); assert_eq!(res.status(), wreq::StatusCode::OK); let res = wreq::get(url) .proxy(wreq::Proxy::http(&proxy).unwrap()) .send() .await .unwrap(); assert_eq!(res.uri(), url); assert_eq!(res.status(), wreq::StatusCode::OK); } #[tokio::test] async fn system_http_proxy_basic_auth_parsed() { let url = "http://hyper.rs.local/prox"; let server = server::http(move |req| { assert_eq!(req.method(), "GET"); assert_eq!(req.uri(), url); assert_eq!(req.headers()["host"], "hyper.rs.local"); assert_eq!( req.headers()["proxy-authorization"], "Basic QWxhZGRpbjpvcGVuc2VzYW1l" ); async { http::Response::default() } }); // avoid races with other tests that change "http_proxy" let _env_lock = HTTP_PROXY_ENV_MUTEX.lock().await; // save system setting first. let system_proxy = env::var("http_proxy"); // set-up http proxy. unsafe { env::set_var( "http_proxy", format!("http://Aladdin:opensesame@{}", server.addr()), ) } let res = Client::builder() .build() .unwrap() .get(url) .send() .await .unwrap(); assert_eq!(res.uri(), url); assert_eq!(res.status(), wreq::StatusCode::OK); // reset user setting. unsafe { match system_proxy { Err(_) => env::remove_var("http_proxy"), Ok(proxy) => env::set_var("http_proxy", proxy), } } } #[tokio::test] async fn test_no_proxy() { let server = server::http(move |req| { assert_eq!(req.method(), "GET"); assert_eq!(req.uri(), "/4"); async { http::Response::default() } }); let proxy = format!("http://{}", server.addr()); let url = format!("http://{}/4", server.addr()); // set up proxy and use no_proxy to clear up client builder proxies. let res = Client::builder() .proxy(wreq::Proxy::http(&proxy).unwrap()) .no_proxy() .build() .unwrap() .get(&url) .send() .await .unwrap(); assert_eq!(res.uri(), url.as_str()); assert_eq!(res.status(), wreq::StatusCode::OK); } #[tokio::test] async fn test_using_system_proxy() { let url = "http://not.a.real.sub.hyper.rs.local/prox"; let server = server::http(move |req| { assert_eq!(req.method(), "GET"); assert_eq!(req.uri(), url); assert_eq!(req.headers()["host"], "not.a.real.sub.hyper.rs.local"); async { http::Response::default() } }); // avoid races with other tests that change "http_proxy" let _env_lock = HTTP_PROXY_ENV_MUTEX.lock().await; // save system setting first. let system_proxy = env::var("http_proxy"); // set-up http proxy. unsafe { env::set_var("http_proxy", format!("http://{}", server.addr())); } // system proxy is used by default let res = wreq::get(url).send().await.unwrap(); assert_eq!(res.uri(), url); assert_eq!(res.status(), wreq::StatusCode::OK); // reset user setting. unsafe { match system_proxy { Err(_) => env::remove_var("http_proxy"), Ok(proxy) => env::set_var("http_proxy", proxy), } } } #[tokio::test] async fn http_over_http() { let url = "http://hyper.rs.local/prox"; let server = server::http(move |req| { assert_eq!(req.method(), "GET"); assert_eq!(req.uri(), url); assert_eq!(req.headers()["host"], "hyper.rs.local"); async { http::Response::default() } }); let proxy = format!("http://{}", server.addr()); let res = Client::builder() .proxy(wreq::Proxy::http(&proxy).unwrap()) .build() .unwrap() .get(url) .send() .await .unwrap(); assert_eq!(res.uri(), url); assert_eq!(res.status(), wreq::StatusCode::OK); } #[tokio::test] async fn http_proxy_custom_headers() { let url = "http://hyper.rs.local/prox"; let server = server::http(move |req| { assert_eq!(req.method(), "GET"); assert_eq!(req.uri(), url); assert_eq!(req.headers()["host"], "hyper.rs.local"); assert_eq!( req.headers()["proxy-authorization"], "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" ); assert_eq!(req.headers()["x-custom-header"], "value"); async { http::Response::default() } }); let proxy = format!("http://Aladdin:open%20sesame@{}", server.addr()); let proxy = wreq::Proxy::http(&proxy).unwrap().custom_http_headers({ let mut headers = http::HeaderMap::new(); headers.insert("x-custom-header", "value".parse().unwrap()); headers }); let res = Client::builder() .proxy(proxy.clone()) .build() .unwrap() .get(url) .send() .await .unwrap(); assert_eq!(res.uri(), url); assert_eq!(res.status(), wreq::StatusCode::OK); let res = wreq::get(url).proxy(proxy).send().await.unwrap(); assert_eq!(res.uri(), url); assert_eq!(res.status(), wreq::StatusCode::OK); } #[tokio::test] async fn tunnel_detects_auth_required() { let url = "https://hyper.rs.local/prox"; let server = server::http(move |req| { assert_eq!(req.method(), "CONNECT"); assert_eq!(req.uri(), "hyper.rs.local:443"); assert!( !req.headers() .contains_key(http::header::PROXY_AUTHORIZATION) ); async { let mut res = http::Response::default(); *res.status_mut() = http::StatusCode::PROXY_AUTHENTICATION_REQUIRED; res } }); let proxy = format!("http://{}", server.addr()); let err = Client::builder() .proxy(wreq::Proxy::https(&proxy).unwrap()) .build() .unwrap() .get(url) .send() .await .unwrap_err(); let err = support::error::inspect(err).pop().unwrap(); assert!( err.contains("auth"), "proxy auth err expected, got: {err:?}" ); } #[tokio::test] async fn tunnel_includes_proxy_auth() { let url = "https://hyper.rs.local/prox"; let server = server::http(move |req| { assert_eq!(req.method(), "CONNECT"); assert_eq!(req.uri(), "hyper.rs.local:443"); assert_eq!( req.headers()["proxy-authorization"], "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" ); async { // return 400 to not actually deal with TLS tunneling let mut res = http::Response::default(); *res.status_mut() = http::StatusCode::BAD_REQUEST; res } }); let proxy = format!("http://Aladdin:open%20sesame@{}", server.addr()); let err = Client::builder() .proxy(wreq::Proxy::https(&proxy).unwrap()) .build() .unwrap() .get(url) .send() .await .unwrap_err(); let err = support::error::inspect(err).pop().unwrap(); assert!( err.contains("unsuccessful"), "tunnel unsuccessful expected, got: {err:?}" ); } #[tokio::test] async fn tunnel_includes_user_agent() { let url = "https://hyper.rs.local/prox"; let server = server::http(move |req| { assert_eq!(req.method(), "CONNECT"); assert_eq!(req.uri(), "hyper.rs.local:443"); assert_eq!(req.headers()["user-agent"], "wreq-test"); async { // return 400 to not actually deal with TLS tunneling let mut res = http::Response::default(); *res.status_mut() = http::StatusCode::BAD_REQUEST; res } }); let proxy = format!("http://{}", server.addr()); let err = Client::builder() .proxy(wreq::Proxy::https(&proxy).unwrap().custom_http_headers({ let mut headers = http::HeaderMap::new(); headers.insert("user-agent", "wreq-test".parse().unwrap()); headers })) .user_agent("wreq-test") .build() .unwrap() .get(url) .send() .await .unwrap_err(); let err = support::error::inspect(err).pop().unwrap(); assert!( err.contains("unsuccessful"), "tunnel unsuccessful expected, got: {err:?}" ); } #[tokio::test] async fn proxy_tunnel_connect_error() { let client = Client::builder() .tls_cert_verification(false) .no_proxy() .build() .unwrap(); let invalid_proxies = vec![ "http://invalid.proxy:8080", "https://invalid.proxy:8080", "socks4://invalid.proxy:8080", "socks4a://invalid.proxy:8080", "socks5://invalid.proxy:8080", "socks5h://invalid.proxy:8080", ]; let target_urls = ["https://example.com", "http://example.com"]; for proxy in invalid_proxies { for url in target_urls { let err = client .get(url) .proxy(wreq::Proxy::all(proxy).unwrap()) .send() .await .unwrap_err(); assert!( err.is_proxy_connect(), "proxy connect error expected, got: {err:?}" ); } } } ================================================ FILE: tests/redirect.rs ================================================ mod support; use http::StatusCode; use http_body_util::BodyExt; use support::server; use wreq::{ Body, Client, redirect::{History, Policy}, }; #[tokio::test] async fn test_redirect_301_and_302_and_303_changes_post_to_get() { let codes = [301u16, 302, 303]; for &code in &codes { let redirect = server::http(move |req| async move { if req.method() == "POST" { assert_eq!(req.uri(), &*format!("/{code}")); http::Response::builder() .status(code) .header("location", "/dst") .header("server", "test-redirect") .body(Body::default()) .unwrap() } else { assert_eq!(req.method(), "GET"); http::Response::builder() .header("server", "test-dst") .body(Body::default()) .unwrap() } }); let url = format!("http://{}/{}", redirect.addr(), code); let dst = format!("http://{}/{}", redirect.addr(), "dst"); let res = wreq::post(&url) .redirect(Policy::default()) .send() .await .unwrap(); assert_eq!(res.uri(), dst.as_str()); assert_eq!(res.status(), wreq::StatusCode::OK); assert_eq!( res.headers().get(wreq::header::SERVER).unwrap(), &"test-dst" ); } } #[tokio::test] async fn test_redirect_307_and_308_tries_to_get_again() { let codes = [307u16, 308]; for &code in &codes { let redirect = server::http(move |req| async move { assert_eq!(req.method(), "GET"); if req.uri() == &*format!("/{code}") { http::Response::builder() .status(code) .header("location", "/dst") .header("server", "test-redirect") .body(Body::default()) .unwrap() } else { assert_eq!(req.uri(), "/dst"); http::Response::builder() .header("server", "test-dst") .body(Body::default()) .unwrap() } }); let url = format!("http://{}/{}", redirect.addr(), code); let dst = format!("http://{}/{}", redirect.addr(), "dst"); let res = wreq::get(&url) .redirect(Policy::default()) .send() .await .unwrap(); assert_eq!(res.uri(), dst.as_str()); assert_eq!(res.status(), wreq::StatusCode::OK); assert_eq!( res.headers().get(wreq::header::SERVER).unwrap(), &"test-dst" ); } } #[tokio::test] async fn test_redirect_307_and_308_tries_to_post_again() { let _ = pretty_env_logger::env_logger::try_init(); let codes = [307u16, 308]; for &code in &codes { let redirect = server::http(move |mut req| async move { assert_eq!(req.method(), "POST"); assert_eq!(req.headers()["content-length"], "5"); let data = req .body_mut() .frame() .await .unwrap() .unwrap() .into_data() .unwrap(); assert_eq!(&*data, b"Hello"); if req.uri() == &*format!("/{code}") { http::Response::builder() .status(code) .header("location", "/dst") .header("server", "test-redirect") .body(Body::default()) .unwrap() } else { assert_eq!(req.uri(), "/dst"); http::Response::builder() .header("server", "test-dst") .body(Body::default()) .unwrap() } }); let url = format!("http://{}/{}", redirect.addr(), code); let dst = format!("http://{}/{}", redirect.addr(), "dst"); let res = wreq::post(&url) .redirect(Policy::default()) .body("Hello") .send() .await .unwrap(); assert_eq!(res.uri(), dst.as_str()); assert_eq!(res.status(), wreq::StatusCode::OK); assert_eq!( res.headers().get(wreq::header::SERVER).unwrap(), &"test-dst" ); } } #[tokio::test] async fn test_redirect_removes_sensitive_headers() { use tokio::sync::watch; let (tx, rx) = watch::channel::>(None); let end_server = server::http(move |req| { let mut rx = rx.clone(); async move { assert_eq!(req.headers().get("cookie"), None); rx.changed().await.unwrap(); let mid_addr = rx.borrow().unwrap(); assert_eq!( req.headers()["referer"], format!("http://{mid_addr}/sensitive") ); http::Response::default() } }); let end_addr = end_server.addr(); let mid_server = server::http(move |req| async move { assert_eq!(req.headers()["cookie"], "foo=bar"); http::Response::builder() .status(302) .header("location", format!("http://{end_addr}/end")) .body(Body::default()) .unwrap() }); tx.send(Some(mid_server.addr())).unwrap(); Client::builder() .redirect(Policy::default()) .build() .unwrap() .get(format!("http://{}/sensitive", mid_server.addr())) .header( wreq::header::COOKIE, wreq::header::HeaderValue::from_static("foo=bar"), ) .send() .await .unwrap(); } #[tokio::test] async fn test_redirect_policy_can_return_errors() { let server = server::http(move |req| async move { assert_eq!(req.uri(), "/loop"); http::Response::builder() .status(302) .header("location", "/loop") .body(Body::default()) .unwrap() }); let url = format!("http://{}/loop", server.addr()); let err = wreq::get(&url) .redirect(Policy::default()) .send() .await .unwrap_err(); assert!(err.is_redirect()); } #[tokio::test] async fn test_redirect_policy_can_stop_redirects_without_an_error() { let server = server::http(move |req| async move { assert_eq!(req.uri(), "/no-redirect"); http::Response::builder() .status(302) .header("location", "/dont") .body(Body::default()) .unwrap() }); let url = format!("http://{}/no-redirect", server.addr()); let res = Client::builder() .redirect(Policy::none()) .build() .unwrap() .get(&url) .send() .await .unwrap(); assert_eq!(res.uri(), url.as_str()); assert_eq!(res.status(), wreq::StatusCode::FOUND); } #[tokio::test] async fn test_referer_is_not_set_if_disabled() { let server = server::http(move |req| async move { if req.uri() == "/no-refer" { http::Response::builder() .status(302) .header("location", "/dst") .body(Body::default()) .unwrap() } else { assert_eq!(req.uri(), "/dst"); assert_eq!(req.headers().get("referer"), None); http::Response::default() } }); Client::builder() .referer(false) .build() .unwrap() .get(format!("http://{}/no-refer", server.addr())) .send() .await .unwrap(); } #[tokio::test] async fn test_invalid_location_stops_redirect_gh484() { let server = server::http(move |_req| async move { http::Response::builder() .status(302) .header("location", "http://www.yikes{KABOOM}") .body(Body::default()) .unwrap() }); let url = format!("http://{}/yikes", server.addr()); let res = wreq::get(&url).send().await.unwrap(); assert_eq!(res.uri(), url.as_str()); assert_eq!(res.status(), wreq::StatusCode::FOUND); } #[tokio::test] async fn test_invalid_scheme_is_rejected() { let server = server::http(move |_req| async move { http::Response::builder() .status(302) .header("location", "htt://www.yikes.com/") .body(Body::default()) .unwrap() }); let url = format!("http://{}/yikes", server.addr()); let err = wreq::get(&url) .redirect(Policy::default()) .send() .await .unwrap_err(); assert!(err.is_builder()); } #[cfg(feature = "cookies")] #[tokio::test] async fn test_redirect_302_with_set_cookies() { let code = 302; let server = server::http(move |req| async move { if req.uri() == "/302" { http::Response::builder() .status(302) .header("location", "/dst") .header("set-cookie", "key=value") .body(Body::default()) .unwrap() } else { assert_eq!(req.uri(), "/dst"); assert_eq!(req.headers()["cookie"], "key=value"); http::Response::default() } }); let url = format!("http://{}/{}", server.addr(), code); let dst = format!("http://{}/{}", server.addr(), "dst"); let client = Client::builder() .cookie_store(true) .redirect(Policy::default()) .build() .unwrap(); let res = client.get(&url).send().await.unwrap(); assert_eq!(res.uri(), dst.as_str()); assert_eq!(res.status(), wreq::StatusCode::OK); } #[tokio::test] async fn test_redirect_limit_to_1() { let server = server::http(move |req| async move { let i: i32 = req .uri() .path() .rsplit('/') .next() .unwrap() .parse::() .unwrap(); assert!(req.uri().path().ends_with(&format!("/redirect/{i}"))); http::Response::builder() .status(302) .header("location", format!("/redirect/{}", i + 1)) .body(Body::default()) .unwrap() }); // The number at the end of the uri indicates the total number of redirections let url = format!("http://{}/redirect/0", server.addr()); let client = Client::builder() .redirect(Policy::limited(1)) .build() .unwrap(); let res = client.get(&url).send().await.unwrap_err(); // If the maximum limit is 1, then the final uri should be /redirect/1 assert_eq!( res.uri().unwrap().to_string(), format!("http://{}/redirect/1", server.addr()).as_str() ); assert!(res.is_redirect()); } #[tokio::test] async fn test_scheme_only_check_after_policy_return_follow() { let server = server::http(move |_| async move { http::Response::builder() .status(302) .header("location", "htt://www.yikes.com/") .body(Body::default()) .unwrap() }); let url = format!("http://{}/yikes", server.addr()); let res = Client::builder() .redirect(Policy::custom(|attempt| attempt.stop())) .build() .unwrap() .get(&url) .send() .await; assert!(res.is_ok()); assert_eq!(res.unwrap().status(), wreq::StatusCode::FOUND); let res = Client::builder() .redirect(Policy::custom(|attempt| attempt.follow())) .build() .unwrap() .get(&url) .send() .await; assert!(res.is_err()); assert!(res.unwrap_err().is_builder()); } #[tokio::test] async fn test_redirect_301_302_303_empty_payload_headers() { let codes = [301u16, 302, 303]; for &code in &codes { let redirect = server::http(move |mut req| async move { if req.method() == "POST" { let data = req .body_mut() .frame() .await .unwrap() .unwrap() .into_data() .unwrap(); assert_eq!(&*data, b"Hello"); if req.headers().get(wreq::header::CONTENT_LENGTH).is_some() { assert_eq!(req.headers()[wreq::header::CONTENT_LENGTH], "5"); } assert_eq!(req.uri(), &*format!("/{code}")); http::Response::builder() .header("location", "/dst") .header("server", "test-dst") .status(code) .body(Body::default()) .unwrap() } else { assert_eq!(req.method(), "GET"); assert!(req.headers().get(wreq::header::CONTENT_TYPE).is_none()); assert!(req.headers().get(wreq::header::CONTENT_LENGTH).is_none()); assert!(req.headers().get(wreq::header::CONTENT_ENCODING).is_none()); http::Response::builder() .header("server", "test-dst") .body(Body::default()) .unwrap() } }); let url = format!("http://{}/{}", redirect.addr(), code); let dst = format!("http://{}/{}", redirect.addr(), "dst"); let res = wreq::post(&url) .redirect(Policy::default()) .body("Hello") .header(wreq::header::CONTENT_TYPE, "text/plain") .header(wreq::header::CONTENT_LENGTH, "5") .header(wreq::header::CONTENT_ENCODING, "identity") .send() .await .unwrap(); assert_eq!(res.uri(), dst.as_str()); assert_eq!(res.status(), 200); assert_eq!( res.headers().get(wreq::header::SERVER).unwrap(), &"test-dst" ); } } #[tokio::test] async fn test_redirect_history() { let redirect = server::http(move |req| async move { if req.uri() == "/first" { http::Response::builder() .status(302) .header("location", "/second") .body(Body::default()) .unwrap() } else if req.uri() == "/second" { http::Response::builder() .status(302) .header("location", "/dst") .body(Body::default()) .unwrap() } else { assert_eq!(req.uri(), "/dst"); http::Response::builder() .header("server", "test-dst") .body(Body::default()) .unwrap() } }); let url = format!("http://{}/first", redirect.addr()); let dst = format!("http://{}/{}", redirect.addr(), "dst"); let client = Client::builder() .redirect(Policy::default()) .build() .unwrap(); let res = client.get(&url).send().await.unwrap(); assert_eq!(res.uri(), dst.as_str()); assert_eq!(res.status(), wreq::StatusCode::OK); assert_eq!( res.headers().get(wreq::header::SERVER).unwrap(), &"test-dst" ); let mut history = res.extensions().get::().unwrap().into_iter(); let next1 = history.next().unwrap(); assert_eq!(next1.status, 302); assert_eq!(next1.previous.path(), "/first"); assert_eq!(next1.uri.path(), "/second"); assert_eq!(next1.headers["location"], "/second"); let next2 = history.next().unwrap(); assert_eq!(next2.status, 302); assert_eq!(next2.previous.path(), "/second"); assert_eq!(next2.uri.path(), "/dst"); assert_eq!(next2.headers["location"], "/dst"); assert!(history.next().is_none()); } #[cfg(feature = "cookies")] #[tokio::test] async fn test_redirect_applies_set_cookie_from_redirect() { let server = server::http(move |req| async move { match req.uri().path() { "/start" => http::Response::builder() .status(302) .header("location", "/dst") .header("set-cookie", "session=abc; Path=/") .body(Body::default()) .unwrap(), "/dst" => { assert_eq!(req.headers()["cookie"], "session=abc"); http::Response::builder() .status(200) .body(Body::default()) .unwrap() } _ => http::Response::builder() .status(404) .body(Body::default()) .unwrap(), } }); let start = format!("http://{}/start", server.addr()); let dst = format!("http://{}/dst", server.addr()); let client = Client::builder() .cookie_store(true) .redirect(Policy::default()) .build() .unwrap(); let res = client.get(&start).send().await.unwrap(); assert_eq!(res.uri(), dst.as_str()); assert_eq!(res.status(), wreq::StatusCode::OK); } #[tokio::test] async fn test_redirect_async_pending_follow() { let server = server::http(move |req| async move { if req.uri() == "/async-redirect" { http::Response::builder() .status(302) .header("location", "/dst") .body(Body::default()) .unwrap() } else { assert_eq!(req.uri(), "/dst"); http::Response::builder() .header("server", "test-dst") .body(Body::default()) .unwrap() } }); let url = format!("http://{}/async-redirect", server.addr()); let dst = format!("http://{}/dst", server.addr()); let client = Client::builder() .redirect(Policy::custom(|attempt| { attempt.pending(|attempt| async move { // Simulate async decision-making tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; attempt.follow() }) })) .build() .unwrap(); let res = client.get(&url).send().await.unwrap(); assert_eq!(res.uri(), dst.as_str()); assert_eq!(res.status(), wreq::StatusCode::OK); assert_eq!( res.headers().get(wreq::header::SERVER).unwrap(), &"test-dst" ); } #[tokio::test] async fn test_redirect_location_is_encoded() { let server = server::http(move |req| async move { if req.uri() == "/start" { http::Response::builder() .status(302) .header("location", "/dst path") .body(wreq::Body::default()) .unwrap() } else { assert_eq!(req.uri().path(), "/dst%20path"); http::Response::builder() .status(StatusCode::OK) .body(wreq::Body::default()) .unwrap() } }); let url = format!("http://{}/start", server.addr()); let dst = format!("http://{}/dst%20path", server.addr()); let client = Client::builder() .redirect(Policy::default()) .build() .unwrap(); let res = client.get(&url).send().await.unwrap(); assert_eq!(res.uri(), dst.as_str()); assert_eq!(res.status(), StatusCode::OK); } ================================================ FILE: tests/retry.rs ================================================ mod support; use std::sync::{ Arc, atomic::{AtomicUsize, Ordering}, }; use support::server; use wreq::Client; #[tokio::test] async fn retries_apply_in_scope() { let _ = pretty_env_logger::try_init(); let cnt = Arc::new(AtomicUsize::new(0)); let server = server::http(move |_req| { let cnt = cnt.clone(); async move { if cnt.fetch_add(1, Ordering::Relaxed) == 0 { // first req is bad http::Response::builder() .status(http::StatusCode::SERVICE_UNAVAILABLE) .body(Default::default()) .unwrap() } else { http::Response::default() } } }); let scope = server.addr().ip().to_string(); let policy = wreq::retry::Policy::for_host(scope).classify_fn(|req_rep| { if req_rep.status() == Some(http::StatusCode::SERVICE_UNAVAILABLE) { req_rep.retryable() } else { req_rep.success() } }); let url = format!("http://{}", server.addr()); let resp = Client::builder() .retry(policy) .build() .unwrap() .get(url) .send() .await .unwrap(); assert_eq!(resp.status(), 200); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn default_retries_have_a_limit() { let _ = pretty_env_logger::try_init(); let server = server::http_with_config( move |req| async move { assert_eq!(req.version(), http::Version::HTTP_2); // refused forever Err(http2::Error::from(http2::Reason::REFUSED_STREAM)) }, |_| {}, ); let client = Client::builder().http2_only().build().unwrap(); let url = format!("http://{}", server.addr()); let _err = client.get(url).send().await.unwrap_err(); } // NOTE: using the default "current_thread" runtime here would cause the test to // fail, because the only thread would block until `panic_rx` receives a // notification while the client needs to be driven to get the graceful shutdown // done. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn highly_concurrent_requests_to_http2_server_with_low_max_concurrent_streams() { let client = Client::builder().http2_only().no_proxy().build().unwrap(); let server = server::http_with_config( move |req| async move { assert_eq!(req.version(), http::Version::HTTP_2); Ok::<_, std::convert::Infallible>(http::Response::default()) }, |builder| { builder.http2().max_concurrent_streams(1); }, ); let url = format!("http://{}", server.addr()); let futs = (0..100).map(|_| { let client = client.clone(); let url = url.clone(); async move { let res = client.get(&url).send().await.unwrap(); assert_eq!(res.status(), wreq::StatusCode::OK); } }); futures_util::future::join_all(futs).await; } #[tokio::test] async fn highly_concurrent_requests_to_slow_http2_server_with_low_max_concurrent_streams() { use support::delay_server; let client = Client::builder().http2_only().no_proxy().build().unwrap(); let server = delay_server::Server::new( move |req| async move { assert_eq!(req.version(), http::Version::HTTP_2); http::Response::default() }, |http| { http.http2().max_concurrent_streams(1); }, std::time::Duration::from_secs(2), ) .await; let url = format!("http://{}", server.addr()); let futs = (0..100).map(|_| { let client = client.clone(); let url = url.clone(); async move { let res = client.get(&url).send().await.unwrap(); assert_eq!(res.status(), wreq::StatusCode::OK); } }); futures_util::future::join_all(futs).await; server.shutdown().await; } ================================================ FILE: tests/support/crl.pem ================================================ -----BEGIN X509 CRL----- MIIBnjCBhwIBATANBgkqhkiG9w0BAQsFADANMQswCQYDVQQDDAJjYRcNMjQwOTI2 MDA0MjU1WhcNMjQxMDI2MDA0MjU1WjAUMBICAQEXDTI0MDkyNjAwNDI0NlqgMDAu MB8GA1UdIwQYMBaAFDxOaZI8zUaGX7mXAZ9Zd8jhyC3sMAsGA1UdFAQEAgIQATAN BgkqhkiG9w0BAQsFAAOCAQEAsqBa289UYKAOaH2gp3yC7YBF7uVZ25i3WV/InKjK zT/fFzZ9rL87ofl0VuR0GPAfwLXFQ96vYUg/nrlxF/A6FmQKf9JSlVBIVXaS2uyk fmdVX8fdU13uD2uKThT5Fojk5nKAeui0xwjTHqe9BjyDscQ5d5pkLIJUj/JbQmRF D/OtEpYQZMAdHLDF0a/9v69g/evlPlpTcikAU+T8rXp45rrsuuUgyhJ00UnE41j8 MmMi3cn23JjFTyOrYx5g/0VFUNcwZpgZSnxNvFbcoh9oHHqS+UDESrwQmkmwrVvH a7PEJq5ZPtjUPa0i7oFNa9cC+11Doo5bxkpCWhypvgTUzw== -----END X509 CRL----- ================================================ FILE: tests/support/delay_server.rs ================================================ #![allow(unused)] use std::{convert::Infallible, future::Future, net, time::Duration}; use futures_util::FutureExt; use http::{Request, Response}; use hyper::service::service_fn; use tokio::{net::TcpListener, select, sync::oneshot}; /// This server, unlike [`super::server::Server`], allows for delaying the /// specified amount of time after each TCP connection is established. This is /// useful for testing the behavior of the client when the server is slow. /// /// For example, in case of HTTP/2, once the TCP/TLS connection is established, /// both endpoints are supposed to send a preface and an initial `SETTINGS` /// frame (See [RFC9113 3.4] for details). What if these frames are delayed for /// whatever reason? This server allows for testing such scenarios. /// /// [RFC9113 3.4]: https://www.rfc-editor.org/rfc/rfc9113.html#name-http-2-connection-preface pub struct Server { addr: net::SocketAddr, shutdown_tx: Option>, server_terminated_rx: oneshot::Receiver<()>, } type Builder = hyper_util::server::conn::auto::Builder; impl Server { pub async fn new(func: F1, apply_config: F2, delay: Duration) -> Self where F1: Fn(Request) -> Fut + Clone + Send + 'static, Fut: Future> + Send + 'static, F2: FnOnce(&mut Builder) -> Bu + Send + 'static, { let (shutdown_tx, shutdown_rx) = oneshot::channel(); let (server_terminated_tx, server_terminated_rx) = oneshot::channel(); let tcp_listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); let addr = tcp_listener.local_addr().unwrap(); tokio::spawn(async move { let mut builder = hyper_util::server::conn::auto::Builder::new(hyper_util::rt::TokioExecutor::new()); apply_config(&mut builder); tokio::spawn(async move { let builder = builder; let (connection_shutdown_tx, connection_shutdown_rx) = oneshot::channel(); let connection_shutdown_rx = connection_shutdown_rx.shared(); let mut shutdown_rx = std::pin::pin!(shutdown_rx); let mut handles = Vec::new(); loop { select! { _ = shutdown_rx.as_mut() => { connection_shutdown_tx.send(()).unwrap(); break; } res = tcp_listener.accept() => { let (stream, _) = res.unwrap(); let io = hyper_util::rt::TokioIo::new(stream); let handle = tokio::spawn({ let connection_shutdown_rx = connection_shutdown_rx.clone(); let func = func.clone(); let svc = service_fn(move |req| { let fut = func(req); async move { Ok::<_, Infallible>(fut.await) }}); let builder = builder.clone(); async move { let fut = builder.serve_connection_with_upgrades(io, svc); tokio::time::sleep(delay).await; let mut conn = std::pin::pin!(fut); select! { _ = conn.as_mut() => {} _ = connection_shutdown_rx => { conn.as_mut().graceful_shutdown(); conn.await.unwrap(); } } } }); handles.push(handle); } } } futures_util::future::join_all(handles).await; server_terminated_tx.send(()).unwrap(); }); }); Self { addr, shutdown_tx: Some(shutdown_tx), server_terminated_rx, } } pub async fn shutdown(mut self) { if let Some(tx) = self.shutdown_tx.take() { let _ = tx.send(()); } self.server_terminated_rx.await.unwrap(); } pub fn addr(&self) -> net::SocketAddr { self.addr } } ================================================ FILE: tests/support/error.rs ================================================ use std::error::Error as StdError; #[allow(unused)] pub fn inspect(err: E) -> Vec where E: Into>, { let berr = err.into(); let mut err = Some(&*berr as &(dyn StdError + 'static)); let mut errs = Vec::new(); while let Some(e) = err { errs.push(e.to_string()); err = e.source(); } errs } ================================================ FILE: tests/support/layer.rs ================================================ use std::{ future::Future, pin::Pin, task::{Context, Poll}, time::Duration, }; use futures::future::BoxFuture; use pin_project_lite::pin_project; use tokio::time::Sleep; use tower::{BoxError, Layer, Service}; /// This tower layer injects an arbitrary delay before calling downstream layers. #[derive(Clone)] pub struct DelayLayer { delay: Duration, } impl DelayLayer { #[allow(unused)] pub const fn new(delay: Duration) -> Self { DelayLayer { delay } } } impl Layer for DelayLayer { type Service = Delay; fn layer(&self, service: S) -> Self::Service { Delay::new(service, self.delay) } } impl std::fmt::Debug for DelayLayer { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { f.debug_struct("DelayLayer") .field("delay", &self.delay) .finish() } } /// This tower service injects an arbitrary delay before calling downstream layers. #[derive(Debug, Clone)] pub struct Delay { inner: S, delay: Duration, } impl Delay { pub fn new(inner: S, delay: Duration) -> Self { Delay { inner, delay } } } impl Service for Delay where S: Service, S::Error: Into, { type Response = S::Response; type Error = BoxError; type Future = ResponseFuture; fn poll_ready( &mut self, cx: &mut std::task::Context<'_>, ) -> std::task::Poll> { println!("Delay::poll_ready called"); match self.inner.poll_ready(cx) { Poll::Pending => Poll::Pending, Poll::Ready(r) => Poll::Ready(r.map_err(Into::into)), } } fn call(&mut self, req: Request) -> Self::Future { println!("Delay::call executed"); let response = self.inner.call(req); let sleep = tokio::time::sleep(self.delay); ResponseFuture::new(response, sleep) } } // `Delay` response future pin_project! { #[derive(Debug)] pub struct ResponseFuture { #[pin] response: S, #[pin] sleep: Sleep, } } impl ResponseFuture { pub(crate) fn new(response: S, sleep: Sleep) -> Self { ResponseFuture { response, sleep } } } impl Future for ResponseFuture where F: Future>, E: Into, { type Output = Result; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.project(); // First poll the sleep until complete match this.sleep.poll(cx) { Poll::Pending => return Poll::Pending, Poll::Ready(_) => {} } // Then poll the inner future match this.response.poll(cx) { Poll::Ready(v) => Poll::Ready(v.map_err(Into::into)), Poll::Pending => Poll::Pending, } } } #[derive(Clone)] pub struct SharedConcurrencyLimitLayer { semaphore: std::sync::Arc, } impl SharedConcurrencyLimitLayer { #[allow(unused)] pub fn new(limit: usize) -> Self { Self { semaphore: std::sync::Arc::new(tokio::sync::Semaphore::new(limit)), } } } impl tower::Layer for SharedConcurrencyLimitLayer { type Service = SharedConcurrencyLimit; fn layer(&self, inner: S) -> Self::Service { SharedConcurrencyLimit { inner, semaphore: self.semaphore.clone(), } } } #[derive(Clone)] pub struct SharedConcurrencyLimit { inner: S, semaphore: std::sync::Arc, } impl tower::Service for SharedConcurrencyLimit where S: tower::Service + Clone + Send + 'static, S::Future: Send + 'static, Req: Send + 'static, { type Response = S::Response; type Error = S::Error; type Future = BoxFuture<'static, Result>; fn poll_ready( &mut self, _cx: &mut std::task::Context<'_>, ) -> std::task::Poll> { // always ready, we handle limits in call std::task::Poll::Ready(Ok(())) } fn call(&mut self, req: Req) -> Self::Future { let semaphore = self.semaphore.clone(); let mut inner = self.inner.clone(); Box::pin(async move { let _permit = semaphore.acquire_owned().await.unwrap(); inner.call(req).await }) } } ================================================ FILE: tests/support/mod.rs ================================================ pub mod delay_server; pub mod error; pub mod layer; pub mod server; // TODO: remove once done converting to new support server? #[allow(unused)] pub static DEFAULT_USER_AGENT: &str = concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION")); ================================================ FILE: tests/support/server.rs ================================================ use std::{ convert::Infallible, future::Future, io, net, sync::mpsc as std_mpsc, thread, time::Duration, }; use tokio::{io::AsyncReadExt, net::TcpStream, runtime, sync::oneshot}; use wreq::Body; pub struct Server { addr: net::SocketAddr, panic_rx: std_mpsc::Receiver<()>, events_rx: std_mpsc::Receiver, shutdown_tx: Option>, } #[non_exhaustive] pub enum Event { ConnectionClosed, } impl Server { pub fn addr(&self) -> net::SocketAddr { self.addr } #[allow(unused)] pub fn events(&mut self) -> Vec { let mut events = Vec::new(); while let Ok(event) = self.events_rx.try_recv() { events.push(event); } events } } impl Drop for Server { fn drop(&mut self) { if let Some(tx) = self.shutdown_tx.take() { let _ = tx.send(()); } if !::std::thread::panicking() { self.panic_rx .recv_timeout(Duration::from_secs(3)) .expect("test server should not panic"); } } } #[allow(unused)] pub fn http(func: F) -> Server where F: Fn(http::Request) -> Fut + Clone + Send + 'static, Fut: Future> + Send + 'static, { let infall = move |req| { let fut = func(req); async move { Ok::<_, Infallible>(fut.await) } }; http_with_config(infall, |_builder| {}) } type Builder = hyper_util::server::conn::auto::Builder; pub fn http_with_config(func: F1, apply_config: F2) -> Server where F1: Fn(http::Request) -> Fut + Clone + Send + 'static, Fut: Future, E>> + Send + 'static, E: Into>, F2: FnOnce(&mut Builder) -> Bu + Send + 'static, { // Spawn new runtime in thread to prevent reactor execution context conflict let test_name = thread::current().name().unwrap_or("").to_string(); thread::spawn(move || { let rt = runtime::Builder::new_current_thread() .enable_all() .build() .expect("new rt"); let listener = rt.block_on(async move { tokio::net::TcpListener::bind(&std::net::SocketAddr::from(([127, 0, 0, 1], 0))) .await .unwrap() }); let addr = listener.local_addr().unwrap(); let (shutdown_tx, mut shutdown_rx) = oneshot::channel(); let (panic_tx, panic_rx) = std_mpsc::channel(); let (events_tx, events_rx) = std_mpsc::channel(); let tname = format!( "test({test_name})-support-server", ); thread::Builder::new() .name(tname) .spawn(move || { rt.block_on(async move { let mut builder = hyper_util::server::conn::auto::Builder::new(hyper_util::rt::TokioExecutor::new()); apply_config(&mut builder); loop { tokio::select! { _ = &mut shutdown_rx => { break; } accepted = listener.accept() => { let (io, _) = accepted.expect("accepted"); let func = func.clone(); let svc = hyper::service::service_fn(func); let builder = builder.clone(); let events_tx = events_tx.clone(); tokio::spawn(async move { let _ = builder.serve_connection_with_upgrades(hyper_util::rt::TokioIo::new(io), svc).await; let _ = events_tx.send(Event::ConnectionClosed); }); } } } let _ = panic_tx.send(()); }); }) .expect("thread spawn"); Server { addr, panic_rx, events_rx, shutdown_tx: Some(shutdown_tx), } }) .join() .unwrap() } #[allow(unused)] pub fn low_level_with_response(do_response: F) -> Server where for<'c> F: Fn(&'c [u8], &'c mut TcpStream) -> Box + Send + 'c> + Clone + Send + 'static, { // Spawn new runtime in thread to prevent reactor execution context conflict let test_name = thread::current().name().unwrap_or("").to_string(); thread::spawn(move || { let rt = runtime::Builder::new_current_thread() .enable_all() .build() .expect("new rt"); let listener = rt.block_on(async move { tokio::net::TcpListener::bind(&std::net::SocketAddr::from(([127, 0, 0, 1], 0))) .await .unwrap() }); let addr = listener.local_addr().unwrap(); let (shutdown_tx, mut shutdown_rx) = oneshot::channel(); let (panic_tx, panic_rx) = std_mpsc::channel(); let (events_tx, events_rx) = std_mpsc::channel(); let tname = format!("test({test_name})-support-server",); thread::Builder::new() .name(tname) .spawn(move || { rt.block_on(async move { loop { tokio::select! { _ = &mut shutdown_rx => { break; } accepted = listener.accept() => { let (io, _) = accepted.expect("accepted"); let do_response = do_response.clone(); let events_tx = events_tx.clone(); tokio::spawn(async move { low_level_server_client(io, do_response).await; let _ = events_tx.send(Event::ConnectionClosed); }); } } } let _ = panic_tx.send(()); }); }) .expect("thread spawn"); Server { addr, panic_rx, events_rx, shutdown_tx: Some(shutdown_tx), } }) .join() .unwrap() } #[allow(unused)] async fn low_level_server_client(mut client_socket: TcpStream, do_response: F) where for<'c> F: Fn(&'c [u8], &'c mut TcpStream) -> Box + Send + 'c>, { loop { let request = low_level_read_http_request(&mut client_socket) .await .expect("read_http_request failed"); if request.is_empty() { // connection closed by client break; } Box::into_pin(do_response(&request, &mut client_socket)).await; } } #[allow(unused)] async fn low_level_read_http_request(client_socket: &mut TcpStream) -> io::Result> { let mut buf = Vec::new(); // Read until the delimiter "\r\n\r\n" is found loop { let mut temp_buffer = [0; 1024]; let n = client_socket.read(&mut temp_buffer).await?; if n == 0 { break; } buf.extend_from_slice(&temp_buffer[..n]); if let Some(pos) = buf.windows(4).position(|window| window == b"\r\n\r\n") { return Ok(buf.drain(..pos + 4).collect()); } } Ok(buf) } ================================================ FILE: tests/timeouts.rs ================================================ mod support; use std::time::Duration; use pretty_env_logger::env_logger; use support::server; use wreq::Client; #[tokio::test] async fn client_timeout() { let _ = env_logger::try_init(); let server = server::http(move |_req| { async { // delay returning the response tokio::time::sleep(Duration::from_millis(300)).await; http::Response::default() } }); let client = Client::builder() .timeout(Duration::from_millis(100)) .no_proxy() .build() .unwrap(); let url = format!("http://{}/slow", server.addr()); let err = client.get(&url).send().await.unwrap_err(); assert!(err.is_timeout()); assert_eq!(err.uri().map(|u| u.to_string()), Some(url)); } #[tokio::test] async fn request_timeout() { let _ = env_logger::try_init(); let server = server::http(move |_req| { async { // delay returning the response tokio::time::sleep(Duration::from_millis(300)).await; http::Response::default() } }); let client = Client::builder().no_proxy().build().unwrap(); let url = format!("http://{}/slow", server.addr()); let err = client .get(&url) .timeout(Duration::from_millis(100)) .send() .await .unwrap_err(); assert!(err.is_timeout() && !err.is_connect()); assert_eq!(err.uri().map(|u| u.to_string()), Some(url)); } #[tokio::test] async fn connect_timeout() { let _ = env_logger::try_init(); let client = Client::builder() .connect_timeout(Duration::from_millis(100)) .no_proxy() .build() .unwrap(); let url = "http://192.0.2.1:81/slow"; let err = client .get(url) .timeout(Duration::from_millis(1000)) .send() .await .unwrap_err(); assert!(err.is_timeout()); } #[tokio::test] async fn connect_many_timeout_succeeds() { let _ = env_logger::try_init(); let server = server::http(move |_req| async { http::Response::default() }); let port = server.addr().port(); let client = Client::builder() .resolve_to_addrs( "many_addrs", ["127.0.0.1:81".parse().unwrap(), server.addr()], ) .connect_timeout(Duration::from_millis(100)) .no_proxy() .build() .unwrap(); let url = format!("http://many_addrs:{port}/eventual"); let _ = client .get(url) .timeout(Duration::from_millis(1000)) .send() .await .unwrap(); } #[tokio::test] async fn connect_many_timeout() { let _ = env_logger::try_init(); let client = Client::builder() .resolve_to_addrs( "many_addrs", [ "192.0.2.1:81".parse().unwrap(), "192.0.2.2:81".parse().unwrap(), ], ) .connect_timeout(Duration::from_millis(100)) .no_proxy() .build() .unwrap(); let url = "http://many_addrs:81/slow".to_string(); let err = client .get(url) .timeout(Duration::from_millis(1000)) .send() .await .unwrap_err(); assert!(err.is_connect() && err.is_timeout()); } #[cfg(feature = "stream")] #[tokio::test] async fn response_timeout() { let _ = env_logger::try_init(); let server = server::http(move |_req| { async { // immediate response, but delayed body let body = wreq::Body::wrap_stream(futures_util::stream::once(async { tokio::time::sleep(Duration::from_secs(1)).await; Ok::<_, std::convert::Infallible>("Hello") })); http::Response::new(body) } }); let client = Client::builder() .timeout(Duration::from_millis(500)) .no_proxy() .build() .unwrap(); let url = format!("http://{}/slow", server.addr()); let res = client.get(&url).send().await.expect("Failed to get"); let err = res.text().await.unwrap_err(); assert!(err.is_timeout()); } #[tokio::test] async fn read_timeout_applies_to_headers() { let _ = env_logger::try_init(); let server = server::http(move |_req| { async { // delay returning the response tokio::time::sleep(Duration::from_millis(300)).await; http::Response::default() } }); let client = Client::builder() .read_timeout(Duration::from_millis(100)) .no_proxy() .build() .unwrap(); let url = format!("http://{}/slow", server.addr()); let err = client.get(&url).send().await.unwrap_err(); assert!(err.is_timeout()); assert_eq!(err.uri().map(|u| u.to_string()), Some(url)); } #[cfg(feature = "stream")] #[tokio::test] async fn read_timeout_applies_to_body() { let _ = env_logger::try_init(); let server = server::http(move |_req| { async { // immediate response, but delayed body let body = wreq::Body::wrap_stream(futures_util::stream::once(async { tokio::time::sleep(Duration::from_millis(300)).await; Ok::<_, std::convert::Infallible>("Hello") })); http::Response::new(body) } }); let client = Client::builder() .read_timeout(Duration::from_millis(100)) .no_proxy() .build() .unwrap(); let url = format!("http://{}/slow", server.addr()); let res = client.get(&url).send().await.expect("Failed to get"); let err = res.text().await.unwrap_err(); assert!(err.is_timeout()); } #[cfg(feature = "stream")] #[tokio::test] async fn read_timeout_allows_slow_response_body() { let _ = env_logger::try_init(); let server = server::http(move |_req| { async { // immediate response, but body that has slow chunks let slow = futures_util::stream::unfold(0, |state| async move { if state < 3 { tokio::time::sleep(Duration::from_millis(100)).await; Some(( Ok::<_, std::convert::Infallible>(state.to_string()), state + 1, )) } else { None } }); let body = wreq::Body::wrap_stream(slow); http::Response::new(body) } }); let client = Client::builder() .read_timeout(Duration::from_millis(200)) //.timeout(Duration::from_millis(200)) .no_proxy() .build() .unwrap(); let url = format!("http://{}/slow", server.addr()); let res = client.get(&url).send().await.expect("Failed to get"); let body = res.text().await.expect("body text"); assert_eq!(body, "012"); } #[tokio::test] async fn response_body_timeout_forwards_size_hint() { let _ = env_logger::try_init(); let server = server::http(move |_req| async { http::Response::new(b"hello".to_vec().into()) }); let client = Client::builder().no_proxy().build().unwrap(); let url = format!("http://{}/slow", server.addr()); let res = client .get(&url) .timeout(Duration::from_secs(1)) .send() .await .expect("response"); assert_eq!(res.content_length(), Some(5)); } ================================================ FILE: tests/unix_socket.rs ================================================ #![cfg(unix)] use std::{hash::BuildHasher, time::Duration}; use http::Method; use http_body_util::Full; use hyper::{Request, Response, body::Incoming, service::service_fn}; use hyper_util::{ rt::{TokioExecutor, TokioIo}, server::conn::auto::Builder, }; use tokio::{net::UnixListener, task}; use wreq::{Client, Proxy}; fn random_sock_path() -> std::path::PathBuf { let mut buf = std::env::temp_dir(); // libstd uses system random to create each one let rng = std::collections::hash_map::RandomState::new(); let n = rng.hash_one("uds-sock"); buf.push(format!("test-uds-sock-{}", n)); buf } #[tokio::test] async fn test_unix_socket() { let sock_path = random_sock_path(); let listener = UnixListener::bind(&sock_path).unwrap(); let server = async move { loop { let (stream, _) = listener.accept().await.unwrap(); let io = TokioIo::new(stream); let service = service_fn(|_req: Request| async { Ok::<_, hyper::Error>(Response::new(Full::new(&b"hello unix"[..]))) }); task::spawn(async move { if let Err(e) = hyper::server::conn::http1::Builder::new() .serve_connection(io, service) .await { eprintln!("server error: {:?}", e); } }); } }; tokio::spawn(server); let client = Client::builder() .proxy(Proxy::unix(sock_path).unwrap()) .timeout(Duration::from_secs(10)) .build() .unwrap(); let resp = client.get("http://localhost/").send().await.unwrap(); let body = resp.text().await.unwrap(); assert_eq!(body, "hello unix"); } #[tokio::test] async fn test_proxy_unix_socket() { let sock_path = random_sock_path(); let listener = UnixListener::bind(&sock_path).unwrap(); let server = async move { loop { let (stream, _) = listener.accept().await.unwrap(); let io = TokioIo::new(stream); let service = service_fn(|req: Request| { async move { if Method::CONNECT == req.method() { // Received an HTTP request like: // ``` // CONNECT www.domain.com:443 HTTP/1.1 // Host: www.domain.com:443 // Proxy-Connection: Keep-Alive // ``` // // When HTTP method is CONNECT we should return an empty body, // then we can eventually upgrade the connection and talk a new protocol. // // Note: only after client received an empty body with STATUS_OK can the // connection be upgraded, so we can't return a response inside // `on_upgrade` future. let authority = req.uri().authority().cloned().unwrap(); tokio::task::spawn({ let req = req; async move { match hyper::upgrade::on(req).await { Ok(upgraded) => { tracing::info!("upgraded connection to: {}", authority); if let Ok(mut io) = tokio::net::TcpStream::connect(authority.to_string()) .await { let _ = tokio::io::copy_bidirectional( &mut TokioIo::new(upgraded), &mut io, ) .await; } } Err(e) => tracing::warn!("upgrade error: {}", e), } } }); Ok::<_, hyper::Error>(Response::new(Full::new(&b""[..]))) } else { Ok::<_, hyper::Error>(Response::new(Full::new( &b"unsupported request method"[..], ))) } } }); task::spawn(async move { if let Err(e) = Builder::new(TokioExecutor::new()) .serve_connection_with_upgrades(io, service) .await { eprintln!("server error: {:?}", e); } }); } }; tokio::spawn(server); let client = Client::builder() .proxy(Proxy::unix(sock_path).unwrap()) .timeout(Duration::from_secs(10)) .build() .unwrap(); let resp = client.get("https://www.google.com").send().await.unwrap(); assert!(resp.status().is_success(), "Expected successful response"); } ================================================ FILE: tests/upgrade.rs ================================================ mod support; use http::Method; use support::server; use tokio::io::{AsyncReadExt, AsyncWriteExt}; use wreq::Client; #[tokio::test] async fn http_upgrade() { let server = server::http(move |req| { assert_eq!(req.method(), "GET"); assert_eq!(req.headers()["connection"], "upgrade"); assert_eq!(req.headers()["upgrade"], "foobar"); tokio::spawn(async move { let mut upgraded = hyper_util::rt::TokioIo::new(hyper::upgrade::on(req).await.unwrap()); let mut buf = vec![0; 7]; upgraded.read_exact(&mut buf).await.unwrap(); assert_eq!(buf, b"foo=bar"); upgraded.write_all(b"bar=foo").await.unwrap(); }); async { http::Response::builder() .status(http::StatusCode::SWITCHING_PROTOCOLS) .header(http::header::CONNECTION, "upgrade") .header(http::header::UPGRADE, "foobar") .body(wreq::Body::default()) .unwrap() } }); let res = Client::builder() .build() .unwrap() .get(format!("http://{}", server.addr())) .header(http::header::CONNECTION, "upgrade") .header(http::header::UPGRADE, "foobar") .send() .await .unwrap(); assert_eq!(res.status(), http::StatusCode::SWITCHING_PROTOCOLS); let mut upgraded = res.upgrade().await.unwrap(); upgraded.write_all(b"foo=bar").await.unwrap(); let mut buf = vec![]; upgraded.read_to_end(&mut buf).await.unwrap(); assert_eq!(buf, b"bar=foo"); } #[tokio::test] async fn http2_upgrade() { let server = server::http_with_config( move |req| { assert_eq!(req.method(), http::Method::CONNECT); assert_eq!(req.version(), http::Version::HTTP_2); tokio::spawn(async move { let mut upgraded = hyper_util::rt::TokioIo::new(hyper::upgrade::on(req).await.unwrap()); let mut buf = vec![0; 7]; upgraded.read_exact(&mut buf).await.unwrap(); assert_eq!(buf, b"foo=bar"); upgraded.write_all(b"bar=foo").await.unwrap(); }); async { Ok::<_, std::convert::Infallible>(http::Response::default()) } }, |builder| { let mut http2 = builder.http2(); http2.enable_connect_protocol(); }, ); let res = Client::builder() .http2_only() .build() .unwrap() .request(Method::CONNECT, format!("http://{}", server.addr())) .send() .await .unwrap(); assert_eq!(res.status(), http::StatusCode::OK); assert_eq!(res.version(), http::Version::HTTP_2); let mut upgraded = res.upgrade().await.unwrap(); upgraded.write_all(b"foo=bar").await.unwrap(); let mut buf = vec![]; upgraded.read_to_end(&mut buf).await.unwrap(); assert_eq!(buf, b"bar=foo"); } ================================================ FILE: tests/zstd.rs ================================================ mod support; use support::server; use tokio::io::AsyncWriteExt; use wreq::Client; #[tokio::test] async fn zstd_response() { zstd_case(10_000, 4096).await; } #[tokio::test] async fn zstd_single_byte_chunks() { zstd_case(10, 1).await; } #[tokio::test] async fn test_zstd_empty_body() { let server = server::http(move |req| async move { assert_eq!(req.method(), "HEAD"); http::Response::builder() .header("content-encoding", "zstd") .body(Default::default()) .unwrap() }); let res = wreq::head(format!("http://{}/zstd", server.addr())) .send() .await .unwrap(); let body = res.text().await.unwrap(); assert_eq!(body, ""); } #[tokio::test] async fn test_accept_header_is_not_changed_if_set() { let server = server::http(move |req| async move { assert_eq!(req.headers()["accept"], "application/json"); assert!( req.headers()["accept-encoding"] .to_str() .unwrap() .contains("zstd") ); http::Response::default() }); let res = wreq::get(format!("http://{}/accept", server.addr())) .header( wreq::header::ACCEPT, wreq::header::HeaderValue::from_static("application/json"), ) .send() .await .unwrap(); assert_eq!(res.status(), wreq::StatusCode::OK); } #[tokio::test] async fn test_accept_encoding_header_is_not_changed_if_set() { let server = server::http(move |req| async move { assert_eq!(req.headers()["accept"], "*/*"); assert_eq!(req.headers()["accept-encoding"], "identity"); http::Response::default() }); let res = wreq::get(format!("http://{}/accept-encoding", server.addr())) .header(wreq::header::ACCEPT, "*/*") .header( wreq::header::ACCEPT_ENCODING, wreq::header::HeaderValue::from_static("identity"), ) .send() .await .unwrap(); assert_eq!(res.status(), wreq::StatusCode::OK); } async fn zstd_case(response_size: usize, chunk_size: usize) { use futures_util::stream::StreamExt; let content: String = (0..response_size).fold(String::new(), |mut acc, i| { acc.push_str(&format!("test {i}")); acc }); let zstded_content = zstd::encode_all(content.as_bytes(), 3).unwrap(); let mut response = format!( "\ HTTP/1.1 200 OK\r\n\ Server: test-accept\r\n\ Content-Encoding: zstd\r\n\ Content-Length: {}\r\n\ \r\n", &zstded_content.len() ) .into_bytes(); response.extend(&zstded_content); let server = server::http(move |req| { assert!( req.headers()["accept-encoding"] .to_str() .unwrap() .contains("zstd") ); let zstded = zstded_content.clone(); async move { let len = zstded.len(); let stream = futures_util::stream::unfold((zstded, 0), move |(zstded, pos)| async move { let chunk = zstded.chunks(chunk_size).nth(pos)?.to_vec(); Some((chunk, (zstded, pos + 1))) }); let body = wreq::Body::wrap_stream(stream.map(Ok::<_, std::convert::Infallible>)); http::Response::builder() .header("content-encoding", "zstd") .header("content-length", len) .body(body) .unwrap() } }); let res = wreq::get(format!("http://{}/zstd", server.addr())) .send() .await .expect("response"); let body = res.text().await.expect("text"); assert_eq!(body, content); } const COMPRESSED_RESPONSE_HEADERS: &[u8] = b"HTTP/1.1 200 OK\x0d\x0a\ Content-Type: text/plain\x0d\x0a\ Connection: keep-alive\x0d\x0a\ Content-Encoding: zstd\x0d\x0a"; const RESPONSE_CONTENT: &str = "some message here"; fn zstd_compress(input: &[u8]) -> Vec { zstd::encode_all(input, 3).unwrap() } #[tokio::test] async fn test_non_chunked_non_fragmented_response() { let server = server::low_level_with_response(|_raw_request, client_socket| { Box::new(async move { let zstded_content = zstd_compress(RESPONSE_CONTENT.as_bytes()); let content_length_header = format!("Content-Length: {}\r\n\r\n", zstded_content.len()).into_bytes(); let response = [ COMPRESSED_RESPONSE_HEADERS, &content_length_header, &zstded_content, ] .concat(); client_socket .write_all(response.as_slice()) .await .expect("response write_all failed"); client_socket.flush().await.expect("response flush failed"); }) }); let res = wreq::get(format!("http://{}/", server.addr())) .send() .await .expect("response"); assert_eq!(res.text().await.expect("text"), RESPONSE_CONTENT); } #[tokio::test] async fn test_chunked_fragmented_response_1() { const DELAY_BETWEEN_RESPONSE_PARTS: tokio::time::Duration = tokio::time::Duration::from_millis(1000); const DELAY_MARGIN: tokio::time::Duration = tokio::time::Duration::from_millis(50); let server = server::low_level_with_response(|_raw_request, client_socket| { Box::new(async move { let zstded_content = zstd_compress(RESPONSE_CONTENT.as_bytes()); let response_first_part = [ COMPRESSED_RESPONSE_HEADERS, format!( "Transfer-Encoding: chunked\r\n\r\n{:x}\r\n", zstded_content.len() ) .as_bytes(), &zstded_content, ] .concat(); let response_second_part = b"\r\n0\r\n\r\n"; client_socket .write_all(response_first_part.as_slice()) .await .expect("response_first_part write_all failed"); client_socket .flush() .await .expect("response_first_part flush failed"); tokio::time::sleep(DELAY_BETWEEN_RESPONSE_PARTS).await; client_socket .write_all(response_second_part) .await .expect("response_second_part write_all failed"); client_socket .flush() .await .expect("response_second_part flush failed"); }) }); let start = tokio::time::Instant::now(); let res = wreq::get(format!("http://{}/", server.addr())) .send() .await .expect("response"); assert_eq!(res.text().await.expect("text"), RESPONSE_CONTENT); assert!(start.elapsed() >= DELAY_BETWEEN_RESPONSE_PARTS - DELAY_MARGIN); } #[tokio::test] async fn test_chunked_fragmented_response_2() { const DELAY_BETWEEN_RESPONSE_PARTS: tokio::time::Duration = tokio::time::Duration::from_millis(1000); const DELAY_MARGIN: tokio::time::Duration = tokio::time::Duration::from_millis(50); let server = server::low_level_with_response(|_raw_request, client_socket| { Box::new(async move { let zstded_content = zstd_compress(RESPONSE_CONTENT.as_bytes()); let response_first_part = [ COMPRESSED_RESPONSE_HEADERS, format!( "Transfer-Encoding: chunked\r\n\r\n{:x}\r\n", zstded_content.len() ) .as_bytes(), &zstded_content, b"\r\n", ] .concat(); let response_second_part = b"0\r\n\r\n"; client_socket .write_all(response_first_part.as_slice()) .await .expect("response_first_part write_all failed"); client_socket .flush() .await .expect("response_first_part flush failed"); tokio::time::sleep(DELAY_BETWEEN_RESPONSE_PARTS).await; client_socket .write_all(response_second_part) .await .expect("response_second_part write_all failed"); client_socket .flush() .await .expect("response_second_part flush failed"); }) }); let start = tokio::time::Instant::now(); let res = wreq::get(format!("http://{}/", server.addr())) .send() .await .expect("response"); assert_eq!(res.text().await.expect("text"), RESPONSE_CONTENT); assert!(start.elapsed() >= DELAY_BETWEEN_RESPONSE_PARTS - DELAY_MARGIN); } #[tokio::test] async fn test_chunked_fragmented_response_with_extra_bytes() { const DELAY_BETWEEN_RESPONSE_PARTS: tokio::time::Duration = tokio::time::Duration::from_millis(1000); const DELAY_MARGIN: tokio::time::Duration = tokio::time::Duration::from_millis(50); let server = server::low_level_with_response(|_raw_request, client_socket| { Box::new(async move { let zstded_content = zstd_compress(RESPONSE_CONTENT.as_bytes()); let response_first_part = [ COMPRESSED_RESPONSE_HEADERS, format!( "Transfer-Encoding: chunked\r\n\r\n{:x}\r\n", zstded_content.len() ) .as_bytes(), &zstded_content, ] .concat(); let response_second_part = b"\r\n2ab\r\n0\r\n\r\n"; client_socket .write_all(response_first_part.as_slice()) .await .expect("response_first_part write_all failed"); client_socket .flush() .await .expect("response_first_part flush failed"); tokio::time::sleep(DELAY_BETWEEN_RESPONSE_PARTS).await; client_socket .write_all(response_second_part) .await .expect("response_second_part write_all failed"); client_socket .flush() .await .expect("response_second_part flush failed"); }) }); let start = tokio::time::Instant::now(); let res = wreq::get(format!("http://{}/", server.addr())) .send() .await .expect("response"); let err = res.text().await.expect_err("there must be an error"); assert!(err.is_decode()); assert!(start.elapsed() >= DELAY_BETWEEN_RESPONSE_PARTS - DELAY_MARGIN); } // Big response can have multiple ZSTD frames in it #[tokio::test] async fn test_non_chunked_non_fragmented_multiple_frames_response() { let server = server::low_level_with_response(|_raw_request, client_socket| { Box::new(async move { // Split the content into two parts let content_bytes = RESPONSE_CONTENT.as_bytes(); let mid = content_bytes.len() / 2; // Compress each part separately to create multiple ZSTD frames let compressed_part1 = zstd::encode_all(&content_bytes[0..mid], 3).unwrap(); let compressed_part2 = zstd::encode_all(&content_bytes[mid..], 3).unwrap(); // Concatenate the compressed frames let mut zstded_content = compressed_part1; zstded_content.extend_from_slice(&compressed_part2); // Set Content-Length to the total length of the concatenated frames let content_length_header = format!("Content-Length: {}\r\n\r\n", zstded_content.len()).into_bytes(); let response = [ COMPRESSED_RESPONSE_HEADERS, &content_length_header, &zstded_content, ] .concat(); client_socket .write_all(response.as_slice()) .await .expect("response write_all failed"); client_socket.flush().await.expect("response flush failed"); }) }); let res = wreq::get(format!("http://{}/", server.addr())) .send() .await .expect("response"); assert_eq!(res.text().await.expect("text"), RESPONSE_CONTENT); } #[tokio::test] async fn test_chunked_fragmented_multiple_frames_in_one_chunk() { // Define constants for delay and timing margin const DELAY_BETWEEN_RESPONSE_PARTS: tokio::time::Duration = tokio::time::Duration::from_millis(1000); // 1-second delay const DELAY_MARGIN: tokio::time::Duration = tokio::time::Duration::from_millis(50); // Margin for timing assertions // Set up a low-level server let server = server::low_level_with_response(|_raw_request, client_socket| { Box::new(async move { // Split RESPONSE_CONTENT into two parts let mid = RESPONSE_CONTENT.len() / 2; let part1 = &RESPONSE_CONTENT[0..mid]; let part2 = &RESPONSE_CONTENT[mid..]; // Compress each part separately to create two ZSTD frames let compressed_part1 = zstd_compress(part1.as_bytes()); let compressed_part2 = zstd_compress(part2.as_bytes()); // Concatenate the frames into a single chunk's data let chunk_data = [compressed_part1.as_slice(), compressed_part2.as_slice()].concat(); // Calculate the chunk size in bytes let chunk_size = chunk_data.len(); // Prepare the initial response part: headers + chunk size let headers = [ COMPRESSED_RESPONSE_HEADERS, /* e.g., "HTTP/1.1 200 OK\r\nContent-Encoding: * zstd\r\n" */ b"Transfer-Encoding: chunked\r\n\r\n", // Indicate chunked encoding format!("{chunk_size:x}\r\n").as_bytes(), // Chunk size in hex ] .concat(); // Send headers + chunk size + chunk data client_socket .write_all([headers.as_slice(), &chunk_data].concat().as_slice()) .await .expect("write_all failed"); client_socket.flush().await.expect("flush failed"); // Introduce a delay to simulate fragmentation tokio::time::sleep(DELAY_BETWEEN_RESPONSE_PARTS).await; // Send chunk terminator + final chunk client_socket .write_all(b"\r\n0\r\n\r\n") .await .expect("write_all failed"); client_socket.flush().await.expect("flush failed"); }) }); // Record the start time for delay verification let start = tokio::time::Instant::now(); let res = wreq::get(format!("http://{}/", server.addr())) .send() .await .expect("Failed to get response"); // Verify the decompressed response matches the original content assert_eq!( res.text().await.expect("Failed to read text"), RESPONSE_CONTENT ); assert!(start.elapsed() >= DELAY_BETWEEN_RESPONSE_PARTS - DELAY_MARGIN); } #[tokio::test] async fn test_connection_reuse_with_chunked_fragmented_multiple_frames_in_one_chunk() { // Define constants for delay and timing margin const DELAY_BETWEEN_RESPONSE_PARTS: tokio::time::Duration = tokio::time::Duration::from_millis(1000); // 1-second delay const DELAY_MARGIN: tokio::time::Duration = tokio::time::Duration::from_millis(50); // Margin for timing assertions // We will record the peer addresses of each client request here let peer_addrs = std::sync::Arc::new(std::sync::Mutex::new(Vec::::new())); let peer_addrs_clone = peer_addrs.clone(); // Set up a low-level server (it will reuse existing client connection, executing callback for // each client request) let server = server::low_level_with_response(move |_raw_request, client_socket| { let peer_addrs = peer_addrs_clone.clone(); Box::new(async move { // Split RESPONSE_CONTENT into two parts let mid = RESPONSE_CONTENT.len() / 2; let part1 = &RESPONSE_CONTENT[0..mid]; let part2 = &RESPONSE_CONTENT[mid..]; // Compress each part separately to create two ZSTD frames let compressed_part1 = zstd_compress(part1.as_bytes()); let compressed_part2 = zstd_compress(part2.as_bytes()); // Concatenate the frames into a single chunk's data let chunk_data = [compressed_part1.as_slice(), compressed_part2.as_slice()].concat(); // Calculate the chunk size in bytes let chunk_size = chunk_data.len(); // Prepare the initial response part: headers + chunk size let headers = [ COMPRESSED_RESPONSE_HEADERS, /* e.g., "HTTP/1.1 200 OK\r\nContent-Encoding: * zstd\r\n" */ b"Transfer-Encoding: chunked\r\n\r\n", // Indicate chunked encoding format!("{chunk_size:x}\r\n").as_bytes(), // Chunk size in hex ] .concat(); // Send headers + chunk size + chunk data client_socket .write_all([headers.as_slice(), &chunk_data].concat().as_slice()) .await .expect("write_all failed"); client_socket.flush().await.expect("flush failed"); // Introduce a delay to simulate fragmentation tokio::time::sleep(DELAY_BETWEEN_RESPONSE_PARTS).await; peer_addrs .lock() .unwrap() .push(client_socket.peer_addr().unwrap()); // Send chunk terminator + final chunk client_socket .write_all(b"\r\n0\r\n\r\n") .await .expect("write_all failed"); client_socket.flush().await.expect("flush failed"); }) }); let client = Client::builder() .pool_idle_timeout(std::time::Duration::from_secs(30)) .pool_max_idle_per_host(1) .build() .unwrap(); const NUMBER_OF_REQUESTS: usize = 5; for _ in 0..NUMBER_OF_REQUESTS { // Record the start time for delay verification let start = tokio::time::Instant::now(); let res = client .get(format!("http://{}/", server.addr())) .send() .await .expect("Failed to get response"); // Verify the decompressed response matches the original content assert_eq!( res.text().await.expect("Failed to read text"), RESPONSE_CONTENT ); assert!(start.elapsed() >= DELAY_BETWEEN_RESPONSE_PARTS - DELAY_MARGIN); } drop(client); // Check that all peer addresses are the same let peer_addrs = peer_addrs.lock().unwrap(); assert_eq!( peer_addrs.len(), NUMBER_OF_REQUESTS, "Expected {} peer addresses, but got {}", NUMBER_OF_REQUESTS, peer_addrs.len() ); let first_addr = peer_addrs[0]; assert!( peer_addrs.iter().all(|addr| addr == &first_addr), "All peer addresses should be the same, but found differences: {peer_addrs:?}" ); }