Repository: ngrok/ngrok-rs Branch: main Commit: 4133242da599 Files: 50 Total size: 310.5 KB Directory structure: gitextract_5pce2p9h/ ├── .envrc ├── .github/ │ └── workflows/ │ ├── ci.yml │ ├── docs.yml │ ├── publish.yml │ ├── release.yml │ └── rust-cache/ │ └── action.yml ├── .gitignore ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── cargo-doc-ngrok/ │ ├── Cargo.toml │ └── src/ │ └── main.rs ├── flake.nix ├── ngrok/ │ ├── CHANGELOG.md │ ├── Cargo.toml │ ├── README.md │ ├── assets/ │ │ ├── ngrok.ca.crt │ │ ├── policy-inbound.json │ │ └── policy.json │ ├── examples/ │ │ ├── axum.rs │ │ ├── connect.rs │ │ ├── domain.crt │ │ ├── domain.key │ │ ├── labeled.rs │ │ ├── mingrok.rs │ │ └── tls.rs │ └── src/ │ ├── config/ │ │ ├── common.rs │ │ ├── headers.rs │ │ ├── http.rs │ │ ├── labeled.rs │ │ ├── oauth.rs │ │ ├── oidc.rs │ │ ├── policies.rs │ │ ├── tcp.rs │ │ ├── tls.rs │ │ └── webhook_verification.rs │ ├── conn.rs │ ├── forwarder.rs │ ├── internals/ │ │ ├── proto.rs │ │ ├── raw_session.rs │ │ └── rpc.rs │ ├── lib.rs │ ├── online_tests.rs │ ├── proxy_proto.rs │ ├── session.rs │ ├── tunnel.rs │ └── tunnel_ext.rs └── rustfmt.toml ================================================ FILE CONTENTS ================================================ ================================================ FILE: .envrc ================================================ use flake ================================================ FILE: .github/workflows/ci.yml ================================================ on: push: branches: [main] pull_request: workflow_call: secrets: NGROK_AUTHTOKEN: required: true name: Continuous integration jobs: udeps: name: Udeps runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: jrobsonchase/direnv-action@v0.7 - uses: ./.github/workflows/rust-cache - uses: actions-rs/cargo@v1 with: command: udeps args: --workspace --all-targets --all-features fmt: name: Rustfmt runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: jrobsonchase/direnv-action@v0.7 - uses: actions-rs/cargo@v1 with: command: fmt args: --all -- --check clippy: name: Clippy runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: jrobsonchase/direnv-action@v0.7 - uses: ./.github/workflows/rust-cache - uses: actions-rs/cargo@v1 with: command: clippy args: --all-targets --all-features --workspace -- -D warnings test-nix: name: Test Nix runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: jrobsonchase/direnv-action@v0.7 - uses: ./.github/workflows/rust-cache - uses: actions-rs/cargo@v1 env: NGROK_AUTHTOKEN: ${{ secrets.NGROK_AUTHTOKEN }} with: command: test args: --workspace --all-targets test-stable: name: Test Stable runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable override: true # We don't actually have sccache installed here (yet), but it still # benefits from the cargo cache. - uses: ./.github/workflows/rust-cache - uses: actions-rs/cargo@v1 env: NGROK_AUTHTOKEN: ${{ secrets.NGROK_AUTHTOKEN }} with: command: test args: --features=paid-tests,long-tests --workspace --all-targets test-win: name: Test Windows Stable runs-on: windows-latest steps: - uses: actions/checkout@v4 - uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable override: true # We don't actually have sccache installed here (yet), but it still # benefits from the cargo cache. - uses: ./.github/workflows/rust-cache - uses: actions-rs/cargo@v1 env: NGROK_AUTHTOKEN: ${{ secrets.NGROK_AUTHTOKEN }} with: command: test args: --workspace --all-targets semver: name: Semver Check runs-on: ubuntu-latest strategy: matrix: crate: [muxado, ngrok] steps: - uses: actions/checkout@v4 - uses: jrobsonchase/direnv-action@v0.7 - uses: ./.github/workflows/rust-cache - uses: actions-rs/cargo@v1 name: semver checks with: command: semver-checks args: check-release -p ${{ matrix.crate }} ================================================ FILE: .github/workflows/docs.yml ================================================ on: push: branches: [main] name: Publish Docs jobs: build: name: Build Rustdocs runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable override: true - name: update apt run: sudo apt-get update - name: install protoc run: sudo apt-get -o Acquire::Retries=3 install -y protobuf-compiler - uses: actions-rs/cargo@v1 with: command: doc args: --no-deps - name: Archive docs shell: sh run: | echo "" > target/doc/index.html chmod -c -R +r target/doc | while read line; do echo "::warning title=Changed permissions on a file::$line" done - name: Upload static files as artifact uses: actions/upload-pages-artifact@v3 with: path: target/doc # Deployment job deploy: # Grant GITHUB_TOKEN the permissions required to make a Pages deployment permissions: pages: write # to deploy to Pages id-token: write # to verify the deployment originates from an appropriate source environment: name: github-pages url: ${{ steps.deployment.outputs.page_url }} runs-on: ubuntu-latest needs: build steps: - name: Deploy to GitHub Pages id: deployment uses: actions/deploy-pages@v4 ================================================ FILE: .github/workflows/publish.yml ================================================ on: workflow_dispatch: name: Publish All jobs: ci: name: Run CI uses: ./.github/workflows/ci.yml secrets: NGROK_AUTHTOKEN: ${{ secrets.NGROK_AUTHTOKEN }} # Publishing jobs - these run sequentially as before publish-muxado: name: Publish muxado uses: ./.github/workflows/release.yml needs: [ci] permissions: contents: write with: crate: muxado secrets: CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} publish-ngrok: name: Publish ngrok uses: ./.github/workflows/release.yml needs: [publish-muxado] if: needs.publish-muxado.result == 'success' || needs.publish-muxado.result == 'skipped' permissions: contents: write with: crate: ngrok secrets: CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} publish-cargo-doc-ngrok: name: Publish cargo-doc-ngrok uses: ./.github/workflows/release.yml needs: [publish-ngrok] if: needs.publish-ngrok.result == 'success' || needs.publish-ngrok.result == 'skipped' permissions: contents: write with: crate: cargo-doc-ngrok secrets: CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} ================================================ FILE: .github/workflows/release.yml ================================================ on: workflow_dispatch: inputs: crate: description: 'Crate to release' required: true default: 'ngrok' workflow_call: inputs: crate: description: 'Crate to release' required: true type: string secrets: CARGO_REGISTRY_TOKEN: required: true name: Release jobs: cargo-publish: name: Publish and Tag runs-on: ubuntu-latest permissions: contents: write continue-on-error: true steps: - uses: actions/checkout@v4 - uses: jrobsonchase/direnv-action@v0.7 - name: cargo publish run: | version="$(extract-crate-version ${{inputs.crate}})" crate="${{inputs.crate}}" tag="${crate}-v${version}" echo "Checking if crate $crate version $version exists on crates.io" result=$(cargo search $crate --limit 1 | grep "$version" || true) if [ -n "$result" ]; then echo "Crate $crate version $version already exists on crates.io, skipping publish." exit 0 fi echo "Crate version $version not found on crates.io, proceeding with publish." cargo publish -p $crate --token ${{ secrets.CARGO_REGISTRY_TOKEN }} - name: tag release run: | version="$(extract-crate-version ${{inputs.crate}})" git config user.name "GitHub Action" git config user.email noreply@ngrok.com tag="${{inputs.crate}}-v${version}" echo "Version ${version}, tag ${tag}" echo "Fetching all tags in the repository" git fetch --tags if git rev-parse "refs/tags/$tag" >/dev/null 2>&1; then echo "Tag $tag already exists, skipping tag creation." else echo "Tag $tag does not exist, pushing tag." git tag -a -m "Version ${version}" $tag git push --tags fi ================================================ FILE: .github/workflows/rust-cache/action.yml ================================================ name: 'rust cache setup' description: 'Set up cargo and sccache caches' inputs: {} outputs: {} runs: using: "composite" steps: - name: configure sccache uses: actions/github-script@v6 with: script: | core.exportVariable('ACTIONS_CACHE_URL', process.env.ACTIONS_CACHE_URL || ''); core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || ''); core.exportVariable('SCCACHE_GHA_CACHE_TO', 'sccache-${{runner.os}}-${{github.ref_name}}'); core.exportVariable('SCCACHE_GHA_CACHE_FROM', 'sccache-${{runner.os}}-main,sccache-${{runner.os}}-'); - name: cargo registry cache uses: actions/cache@v3 with: key: cargo-${{ runner.os }}-${{ hashFiles('**/Cargo.toml') }}-${{ github.sha }} restore-keys: | cargo-${{ runner.os }}-${{ hashFiles('**/Cargo.toml') }}- cargo-${{ runner.os }}- path: | ~/.cargo/registry ~/.cargo/git ================================================ FILE: .gitignore ================================================ .env /target .direnv /.vscode *.swp ================================================ FILE: CODE_OF_CONDUCT.md ================================================ # ngrok Code of Conduct ## Our Pledge In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to make participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. ## Our Standards Examples of behavior that contributes to creating a positive environment include: - Using welcoming and inclusive language - Being respectful of differing viewpoints and experiences - Gracefully accepting constructive criticism - Focusing on what is best for the community - Showing empathy towards other community members Examples of unacceptable behavior by participants include: - The use of sexualized language or imagery and unwelcome sexual attention or advances - Trolling, insulting/derogatory comments, and personal or political attacks - Public or private harassment - Publishing others' private information, such as a physical or electronic address, without explicit permission - Other conduct which could reasonably be considered inappropriate in a professional setting ## Our Responsibilities The ngrok documentation team is responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. The ngrok documentation team has the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. ## Scope This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the ngrok docs project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at [support@ngrok.com](mailto:support@ngrok.com). All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. ## Attribution This Code of Conduct is adapted from the Contributor Covenant, version 2.1, available at [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. For answers to common questions about this code of conduct, see https://www.contributor-covenant.org/faq ================================================ FILE: CONTRIBUTING.md ================================================ # Contributing to ngrok-rust Thank you for deciding to contribute to ngrok-rust! ## Reporting a bug To report a bug, please [open a new issue](https://github.com/ngrok/ngrok-rust/issues/new) with clear reproduction steps. We will triage and investigate these issues at a regular interval. ## Contributing code Bugfixes and small improvements are always appreciated! For any larger changes or features, please [open a new issue](https://github.com/ngrok/ngrok-rust/issues/new) first to discuss whether the change makes sense. When in doubt, it's always okay to open an issue first. ================================================ FILE: Cargo.toml ================================================ [workspace] resolver = "2" members = [ "muxado", "ngrok", "cargo-doc-ngrok", ] [profile.release] debug = 1 ================================================ FILE: LICENSE-APACHE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: LICENSE-MIT ================================================ Copyright 2022 ngrok, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: cargo-doc-ngrok/Cargo.toml ================================================ [package] name = "cargo-doc-ngrok" version = "0.2.2" edition = "2021" license = "MIT OR Apache-2.0" description = "A cargo subcommand to build and serve documentation via ngrok" repository = "https://github.com/ngrok/ngrok-rust" [dependencies] awaitdrop = "0.1.2" axum = "0.7.4" bstr = "1.4.0" cargo_metadata = "0.15.2" clap = { version = "4.0.29", features = ["derive"] } futures = "0.3.25" http = "1.0.0" hyper = { version = "1.1.0", features = ["server"] } hyper-staticfile = "0.10.0" hyper-util = { version = "0.1.3", features = ["server", "tokio", "server-auto", "http1"] } ngrok = { path = "../ngrok", version = "0.18", features = ["hyper", "axum"] } tokio = { version = "1.23.0", features = ["full"] } watchexec = "2.3.0" # watchexec-signals 1.0.1 causes a compilation error. # this will likely be ironed out as they release watchexec 3.0.0 components. # https://github.com/watchexec/watchexec/issues/701 watchexec-signals = "=1.0.0" ================================================ FILE: cargo-doc-ngrok/src/main.rs ================================================ use std::{ io, path::PathBuf, process::Stdio, sync::Arc, }; use axum::BoxError; use clap::{ Args, Parser, Subcommand, }; use futures::TryStreamExt; use hyper::service::service_fn; use hyper_util::{ rt::TokioExecutor, server, }; use ngrok::prelude::*; use watchexec::{ action::{ Action, Outcome, }, command::Command, config::{ InitConfig, RuntimeConfig, }, error::CriticalError, handler::PrintDebug, signal::source::MainSignal, Watchexec, }; #[derive(Parser, Debug)] struct Cargo { #[command(subcommand)] cmd: Cmd, } #[derive(Debug, Subcommand)] enum Cmd { DocNgrok(DocNgrok), } #[derive(Debug, Args)] struct DocNgrok { #[arg(short)] package: Option, #[arg(long, short)] domain: Option, #[arg(long, short)] watch: bool, #[arg(last = true)] doc_args: Vec, } #[tokio::main] async fn main() -> Result<(), BoxError> { let Cmd::DocNgrok(args) = Cargo::parse().cmd; std::process::Command::new("cargo") .arg("doc") .args(args.doc_args.iter()) .stderr(Stdio::inherit()) .stdout(Stdio::inherit()) .spawn()? .wait()?; let meta = cargo_metadata::MetadataCommand::new().exec()?; let default_package = args .package .or(meta.root_package().map(|p| p.name.clone())) .ok_or("No default package found. You must provide one with -p")?; let root_dir = meta.workspace_root; let target_dir = meta.target_directory; let doc_dir = target_dir.join("doc"); let sess = ngrok::Session::builder() .authtoken_from_env() .connect() .await?; let mut listen_cfg = sess.http_endpoint(); if let Some(domain) = args.domain { listen_cfg.domain(domain); } let mut listener = listen_cfg.listen().await?; let service = service_fn(move |req| { let stat = hyper_staticfile::Static::new(&doc_dir); stat.serve(req) }); println!( "serving docs on: {}/{}/", listener.url(), default_package.replace('-', "_") ); let server = async move { let (dropref, waiter) = awaitdrop::awaitdrop(); // Continuously accept new connections. while let Some(conn) = listener.try_next().await? { let service = service.clone(); let dropref = dropref.clone(); // Spawn a task to handle the connection. That way we can multiple connections // concurrently. tokio::spawn(async move { if let Err(err) = server::conn::auto::Builder::new(TokioExecutor::new()) .serve_connection(conn, service) .await { eprintln!("failed to serve connection: {err:#}"); } drop(dropref); }); } // Wait until all children have finished, not just the listener. drop(dropref); waiter.await; Ok::<(), BoxError>(()) }; if args.watch { let we = make_watcher(args.doc_args, root_dir, target_dir)?; we.main().await??; } else { server.await?; } Ok(()) } fn make_watcher( args: Vec, root_dir: impl Into, target_dir: impl Into, ) -> Result, Box> { let target_dir = target_dir.into(); let root_dir = root_dir.into(); let mut init = InitConfig::default(); init.on_error(PrintDebug(std::io::stderr())); let mut runtime = RuntimeConfig::default(); runtime.pathset([root_dir]); runtime.command(Command::Exec { prog: "cargo".into(), args: [String::from("doc")].into_iter().chain(args).collect(), }); runtime.on_action({ move |action: Action| { let target_dir = target_dir.clone(); async move { let sigs = action .events .iter() .flat_map(|event| event.signals()) .collect::>(); if sigs.iter().any(|sig| sig == &MainSignal::Interrupt) { action.outcome(Outcome::Exit); } else if action .events .iter() .any(|e| e.paths().any(|(p, _)| !p.starts_with(&target_dir))) { action.outcome(Outcome::if_running( Outcome::both(Outcome::Stop, Outcome::Start), Outcome::Start, )); } Result::<_, io::Error>::Ok(()) } } }); Watchexec::new(init, runtime).map_err(Box::new) } ================================================ FILE: flake.nix ================================================ { description = "ngrok agent library in Rust"; inputs = { nixpkgs.url = "github:nixos/nixpkgs/nixpkgs-unstable"; # Note: fenix packages are cached via cachix: # cachix use nix-community fenix-flake = { url = "github:nix-community/fenix"; inputs.nixpkgs.follows = "nixpkgs"; }; flake-utils = { url = "github:numtide/flake-utils"; inputs.nixpkgs.follows = "nixpkgs"; }; }; outputs = { self, nixpkgs, fenix-flake, flake-utils }: flake-utils.lib.eachDefaultSystem (system: let pkgs = import nixpkgs { inherit system; overlays = [ fenix-flake.overlays.default ]; }; toolchain = pkgs.fenix.complete.withComponents [ "cargo" "clippy" "rust-src" "rustc" "rustfmt" "rust-analyzer" ]; fix-n-fmt = pkgs.writeShellScriptBin "fix-n-fmt" '' set -euf -o pipefail ${toolchain}/bin/cargo clippy --fix --allow-staged --allow-dirty --all-targets --all-features ${toolchain}/bin/cargo fmt ''; pre-commit = pkgs.writeShellScript "pre-commit" '' cargo clippy --workspace --all-targets --all-features -- -D warnings result=$? if [[ ''${result} -ne 0 ]] ; then cat <<\EOF There are some linting issues, try `fix-n-fmt` to fix. EOF exit 1 fi # Use a dedicated sub-target-dir for udeps. For some reason, it fights with clippy over the cache. CARGO_TARGET_DIR=$(git rev-parse --show-toplevel)/target/udeps cargo udeps --workspace --all-targets --all-features result=$? if [[ ''${result} -ne 0 ]] ; then cat <<\EOF There are some unused dependencies. EOF exit 1 fi diff=$(cargo fmt -- --check) result=$? if [[ ''${result} -ne 0 ]] ; then cat <<\EOF There are some code style issues, run `fix-n-fmt` first. EOF exit 1 fi exit 0 ''; setup-hooks = pkgs.writeShellScriptBin "setup-hooks" '' repo_root=$(git rev-parse --git-dir) ${toString (map (h: '' ln -sf ${h} ''${repo_root}/hooks/${h.name} '') [ pre-commit ])} ''; # Make sure that cargo semver-checks uses the stable toolchain rather # than the nightly one that we normally develop with. semver-checks = with pkgs; symlinkJoin { name = "cargo-semver-checks"; paths = [ cargo-semver-checks ]; buildInputs = [ makeWrapper ]; postBuild = '' wrapProgram $out/bin/cargo-semver-checks \ --prefix PATH : ${rustc}/bin \ --prefix PATH : ${cargo}/bin ''; }; extract-version = with pkgs; writeShellScriptBin "extract-crate-version" '' ${cargo}/bin/cargo metadata --format-version 1 --no-deps | \ ${jq}/bin/jq -r ".packages[] | select(.name == \"$1\") | .version" ''; in { devShell = pkgs.mkShell { CHALK_OVERFLOW_DEPTH = 3000; CHALK_SOLVER_MAX_SIZE = 1500; OPENSSL_LIB_DIR = "${pkgs.openssl.out}/lib"; OPENSSL_INCLUDE_DIR = "${pkgs.openssl.dev}/include"; RUSTC_WRAPPER="${pkgs.sccache}/bin/sccache"; buildInputs = with pkgs; [ toolchain fix-n-fmt setup-hooks cargo-udeps semver-checks extract-version ] ++ lib.optionals stdenv.isDarwin [ # nix darwin stdenv has broken libiconv: https://github.com/NixOS/nixpkgs/issues/158331 libiconv pkgs.darwin.apple_sdk.frameworks.CoreServices pkgs.darwin.apple_sdk.frameworks.Security ]; }; }); } ================================================ FILE: ngrok/CHANGELOG.md ================================================ ## 0.18.0 - Add support for CEL filtering when listing resources. - Add support for service users - Add support for `vault_name` on Secrets -Make `pooling_enabled` on Endpoints optional ## 0.17.0 ### Breaking Changes - **Binding is now optional**: Tests no longer hardcode `binding("public")`. The ngrok service will use its default binding configuration when not explicitly specified. - **Binding validation**: The `binding()` method now validates input values and panics on invalid values or multiple calls. ### Added - Added `Binding` enum with three variants: `Public`, `Internal`, and `Kubernetes` - Added validation for binding values - only "public", "internal", and "kubernetes" are accepted (case-insensitive) - Added `binding()` method documentation with examples for both string and typed enum usage - Added panic behavior when `binding()` is called more than once (only one binding allowed) ### Changed - `binding()` method now accepts both strings and the `Binding` enum via `Into` - Removed hardcoded "public" binding from all tests - bindings are now truly optional ## 0.15.0 - - Removes `hyper-proxy` and `ring` dependencies ## 0.14.0 - - Adds `pooling_enabled` option, allowing the endpoint to pool with other endpoints with the same host/port/binding ## 0.13.1 - Preserve the `ERR_NGROK` prefix for error codes. ## 0.13.0 - Add the `NgrokError` trait - Add the `ErrResp` type - Change the `RpcError::Response` variant to the `ErrResp` type (from `String`) - Implement `NgrokError` for `ErrResp`, `RpcError`, and `ConnectError` ## 0.12.4 - Add `Win32_Foundation` feature - Update nix for rust `1.72` ## 0.12.3 - Add `session.id()` ## 0.12.2 - Updated readme and changelog ## 0.12.1 - Add source error on reconnect - Rename repository to ngrok-rust ## 0.12.0 - Add `client_info` to SessionBuilder - Update UserAgent generation - Make `circuit_breaker` test more reliable ## 0.11.3 - Update stream forwarding logic - Add `ca_cert` option to SessionBuilder - Unpin `bstr` ## 0.11.2 - Send UserAgent when authenticating - Update readme documentation ## 0.11.0 - Include a session close method - Mark errors as non-exhaustive ## 0.10.2 - Update default forwards-to - Expose OAuth Client ID/Secret setters - Muxado: close method on the opener ## 0.10.1 - Add windows pipe support - Require tokio rt ## 0.10.0 - Some api-breaking consistency fixes for the session builder. - Update the connector to be more in-line with the other handlers and to support disconnect/reconnect error reporting. - Add support for custom heartbeat handlers. ## 0.9.0 - Update docs to match ngrok-go - Update the tls termination configuration methods to match those in ngrok-go - Remove the `_string` suffix from the cidr restriction methods ## 0.8.1 - Fix cancellation bugs causing leaked muxado/ngrok sessions. ## 0.8.0 - Some breaking changes to builder method naming for consistency. - Add dashboard command handlers ## 0.7.0 - Initial crates.io release. ## Pre-0.7.0 - There was originally a crate on crates.io named 'ngrok' that wrapped the agent binary. It can be found [here](https://github.com/nkconnor/ngrok). Thanks @nkconnor! ================================================ FILE: ngrok/Cargo.toml ================================================ [package] name = "ngrok" version = "0.18.0" edition = "2021" license = "MIT OR Apache-2.0" description = "The ngrok agent SDK" repository = "https://github.com/ngrok/ngrok-rust" [dependencies] arc-swap = "1.5.2" async-trait = "0.1.59" awaitdrop = "0.1.1" axum = { version = "0.7.4", features = ["tokio"], optional = true } axum-core = "0.4.3" base64 = "0.21.7" bitflags = "2.4.2" bytes = "1.10.1" futures = "0.3.25" futures-rustls = { version = "0.26.0", default-features = false, features = ["tls12", "logging"] } futures-util = "0.3.30" hostname = "0.3.1" hyper = { version = "^1.1.0", optional = true } hyper-http-proxy = "1.1.0" hyper-util = { version = "0.1.3", features = ["tokio"] } once_cell = "1.17.1" muxado = { path = "../muxado", version = "0.5" } pin-project = "1.1.3" parking_lot = "0.12.1" proxy-protocol = "0.5.0" regex = "1.7.3" rustls-native-certs = "0.7.0" rustls-pemfile = "2.0.0" serde = { version = "1.0.149", features = ["derive"] } serde_json = "1.0.89" thiserror = "2" tokio = { version = "1.23.0", features = [ "io-util", "net", "sync", "time", "rt", ] } tokio-retry = "0.3.0" tokio-socks = "0.5.1" tokio-util = { version = "0.7.4", features = ["compat"] } tower-service = { version = "0.3.3"} tracing = "0.1.37" url = "2.4.0" [target.'cfg(windows)'.dependencies] windows-sys = { version = "0.45.0", features = ["Win32_Foundation"] } [dev-dependencies] anyhow = "1.0.66" axum = { version = "0.7.4", features = ["tokio"] } flate2 = "1.0.25" http-body-util = "0.1.3" hyper = { version = "1.1.0", features = [ "client" ] } hyper-util = { version = "0.1.3", features = [ "tokio", "server", "http1", "http2", ]} paste = "1.0.11" rand = "0.8.5" reqwest = "0.12" tokio = { version = "1.23.0", features = ["full"] } tokio-tungstenite = { version = "0.26.2", features = [ "rustls", "rustls-tls-webpki-roots", ] } tower = { version = "0.5", features = ["util"] } tracing-subscriber = { version = "0.3.16", features = ["env-filter"] } tracing-test = "0.2.3" [[example]] name = "tls" required-features = ["axum"] [[example]] name = "axum" required-features = ["axum"] [[example]] name = "labeled" required-features = ["axum"] [[example]] name = "mingrok" required-features = ["hyper"] [features] default = ["aws-lc-rs"] hyper = ["hyper/server", "hyper/http1", "dep:hyper"] axum = ["dep:axum", "hyper"] online-tests = ["axum", "hyper"] long-tests = ["online-tests"] authenticated-tests = ["online-tests"] paid-tests = ["authenticated-tests"] aws-lc-rs = ["futures-rustls/aws-lc-rs"] ring = ["futures-rustls/ring"] [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] ================================================ FILE: ngrok/README.md ================================================ # ngrok-rust [![Crates.io][crates-badge]][crates-url] [![docs.rs][docs-badge]][docs-url] [![MIT licensed][mit-badge]][mit-url] [![Apache-2.0 licensed][apache-badge]][apache-url] [![Continuous integration][ci-badge]][ci-url] [crates-badge]: https://img.shields.io/crates/v/ngrok.svg [crates-url]: https://crates.io/crates/ngrok [docs-badge]: https://img.shields.io/docsrs/ngrok.svg [docs-url]: https://docs.rs/ngrok [ci-badge]: https://github.com/ngrok/ngrok-rust/actions/workflows/ci.yml/badge.svg [ci-url]: https://github.com/ngrok/ngrok-rust/actions/workflows/ci.yml [mit-badge]: https://img.shields.io/badge/license-MIT-blue.svg [mit-url]: https://github.com/ngrok/ngrok-rust/blob/main/LICENSE-MIT [apache-badge]: https://img.shields.io/badge/license-Apache_2.0-blue.svg [apache-url]: https://github.com/ngrok/ngrok-rust/blob/main/LICENSE-APACHE [API Docs (main)](https://ngrok.github.io/ngrok-rust/ngrok) [ngrok](https://ngrok.com) is a simplified API-first ingress-as-a-service that adds connectivity, security, and observability to your apps. ngrok-rust, our native and idiomatic crate for adding a public internet address with secure ingress traffic directly into your Rust apps 🦀. If you’ve used ngrok in the past, you can think of ngrok-rust as the ngrok agent packaged as a Rust crate. ngrok-rust lets developers serve Rust services on the internet in a single statement without setting up low-level network primitives like IPs, NAT, certificates, load balancers, and even ports! Applications using ngrok-rust listen on ngrok’s global ingress network for TCP and HTTP traffic. ngrok-rust listeners are usable with [hyper Servers](https://docs.rs/hyper/latest/hyper/server/index.html), and connections implement [tokio’s AsyncRead and AsyncWrite traits](https://docs.rs/tokio/latest/tokio/io/index.html). This makes it easy to add ngrok-rust into any application that’s built on hyper, such as the popular [axum](https://docs.rs/axum/latest/axum/) HTTP framework. See [`/ngrok/examples/`][examples] for example usage, or the tests in [`/ngrok/src/online_tests.rs`][online-tests]. [examples]: https://github.com/ngrok/ngrok-rust/blob/main/ngrok/examples [online-tests]: https://github.com/ngrok/ngrok-rust/blob/main/ngrok/src/online_tests.rs For working with the [ngrok API](https://ngrok.com/docs/api/), check out the [ngrok Rust API Client Library](https://github.com/ngrok/ngrok-api-rs). ## Installation Add `ngrok` to the `[dependencies]` section of your `Cargo.toml` with `cargo add`: ```bash $ cargo add ngrok ``` ## Quickstart Create a simple HTTP server using `ngrok` and `axum`: `Cargo.toml`: ```toml [package] name = "ngrok-rust-demo" version = "0.1.0" edition = "2021" [dependencies] ngrok = {version = "0.14.0"} tokio = { version = "1", features = [ "full" ] } axum = { version = "0.7.4", features = ["tokio"] } async-trait = "0.1.59" hyper = {version = "1", features = ["full"]} hyper-util = { version = "0.1", features = [ "full" ] } url = "2.5.4" ``` `src/main.rs`: ```rust #![deny(warnings)] use axum::{routing::get, Router}; use ngrok::config::{ForwarderBuilder, TunnelBuilder}; use std::net::SocketAddr; use url::Url; #[tokio::main] async fn main() -> Result<(), Box> { // Create Axum app let app = Router::new().route("/", get(|| async { "Hello from Axum!" })); // Spawn Axum server let addr = SocketAddr::from(([127, 0, 0, 1], 3000)); tokio::spawn(async move { axum::serve(tokio::net::TcpListener::bind(addr).await.unwrap(), app) .await .unwrap(); }); // Set up ngrok tunnel let sess1 = ngrok::Session::builder() .authtoken_from_env() .connect() .await?; let sess2 = ngrok::Session::builder() .authtoken_from_env() .connect() .await?; let _listener = sess1 .http_endpoint() .domain("/* your domain*/") .pooling_enabled(true) .listen_and_forward(Url::parse("http://localhost:3000").unwrap()) .await?; let _listener2 = sess2 .http_endpoint() .domain("/* your domain */") .pooling_enabled(true) .listen_and_forward(Url::parse("http://localhost:8000").unwrap()) .await?; // Wait indefinitely tokio::signal::ctrl_c().await?; Ok(()) } ``` # Changelog Changes to `ngrok-rust` are tracked under [CHANGELOG.md](https://github.com/ngrok/ngrok-rust/blob/main/ngrok/CHANGELOG.md). # Join the ngrok Community - Check out [our official docs](https://docs.ngrok.com) - Read about updates on [our blog](https://ngrok.com/blog) - Open an [issue](https://github.com/ngrok/ngrok-rust/issues) or [pull request](https://github.com/ngrok/ngrok-rust/pulls) - Join our [Slack community](https://ngrok.com/slack) - Follow us on [X / Twitter (@ngrokHQ)](https://twitter.com/ngrokhq) - Subscribe to our [Youtube channel (@ngrokHQ)](https://www.youtube.com/@ngrokhq) # License This project is licensed under either of - Apache License, Version 2.0, ([LICENSE-APACHE][apache-url] or ) - MIT license ([LICENSE-MIT][mit-url] or ) at your option. ### Contribution Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in ngrok by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. ================================================ FILE: ngrok/assets/ngrok.ca.crt ================================================ -----BEGIN CERTIFICATE----- MIID4TCCAsmgAwIBAgIUZqF2AkB17pISojTndgc2U5BDt74wDQYJKoZIhvcNAQEL BQAwbzEQMA4GA1UEAwwHUm9vdCBDQTENMAsGA1UECwwEcHJvZDESMBAGA1UECgwJ bmdyb2sgSW5jMRYwFAYDVQQHDA1TYW4gRnJhbmNpc2NvMRMwEQYDVQQIDApDYWxp Zm9ybmlhMQswCQYDVQQGEwJVUzAgFw0yMjA4MzExNTE3MjFaGA80NzYwMDcyODE1 MTcyMVowbzEQMA4GA1UEAwwHUm9vdCBDQTENMAsGA1UECwwEcHJvZDESMBAGA1UE CgwJbmdyb2sgSW5jMRYwFAYDVQQHDA1TYW4gRnJhbmNpc2NvMRMwEQYDVQQIDApD YWxpZm9ybmlhMQswCQYDVQQGEwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC AQoCggEBAMPkZpOguChG8QXfp1eCu21wipptiWO9U6F2DRf5ln8XXAAokZyfo4IZ 795G+KdkEbq4KxSXHehhKQFDwlFnzIkZsDu6PHabXsutAmNLmoRQzsETTdh3gMEJ JiCW+mtqmbWPH22GXnUXxe5R6dWbkXqrITy6nFpZWdFbKmo9/1VoyWdIgcXujq2D aNCWm2BoQ9seCebc5+6gF2syXzvoKVZ4qg6O1anCl1K0ZH/2mDXu/22O2U4Tr/j7 6Da1Y7TWZYDU2dIz+tyfTOMrlaxXyxxmXewzOpYjBiHisfPpz7AtrTlAzaEVVhRk c86vC2h42zqH8Jv0fjJdfMkVXe3eegECAwEAAaNzMHEwHQYDVR0OBBYEFNxeUxPI M8G7cX0DhFc81pLD4W+HMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEG MC8GA1UdHwQoMCYwJKAioCCGHmh0dHA6Ly9jcmwubmdyb2suY29tL25ncm9rLmNy bDANBgkqhkiG9w0BAQsFAAOCAQEAChXl+eYIQbn0OOHLuCBvXxDKHqccJLPaxJR1 LeWj8HjWbyLXnS405YNn84NFirpYzemeYSex+os92kjjLhBXEOIEpAE9JebDk7N5 X4xSOkS7vrOepX4JFNhqVdxut7pqEmuj1Xf7KhHtFquFM5fhLJHnWEJGWOTRbRVp KWqZI/HzaltSbgiikf3S2qu6oZHph/BpueCqLKwvJziPQGE+cYdYQzRKPJZbuorj +CnYUXd7kHC3RZzs6egVIvUYy+bGgv1CeeAm9EccL2RmPkSzThOo6oXBLR50Zlke 1x7y/5om6zp9vGTW4PWVAW/VWw1x4zxtSQ7NrP1Ldh7Xmnb7sw== -----END CERTIFICATE----- ================================================ FILE: ngrok/assets/policy-inbound.json ================================================ { "inbound": [ { "name": "test_in", "expressions": [ "req.Method == 'PUT'" ], "actions": [ { "type": "deny" } ] } ] } ================================================ FILE: ngrok/assets/policy.json ================================================ { "inbound": [ { "name": "test_in", "expressions": [ "req.Method == 'PUT'" ], "actions": [ { "type": "deny" } ] } ], "outbound": [ { "name": "test_out", "expressions": [ "res.StatusCode == '200'" ], "actions": [ { "type": "custom-response", "config": { "status_code": 201 } } ] } ] } ================================================ FILE: ngrok/examples/axum.rs ================================================ use std::{ convert::Infallible, net::SocketAddr, }; use axum::{ extract::ConnectInfo, routing::get, Router, }; use axum_core::BoxError; use futures::stream::TryStreamExt; use hyper::{ body::Incoming, Request, }; use hyper_util::{ rt::TokioExecutor, server, }; use ngrok::prelude::*; use tower::{ util::ServiceExt, Service, }; #[tokio::main] async fn main() -> Result<(), BoxError> { // build our application with a single route let app = Router::new().route( "/", get( |ConnectInfo(remote_addr): ConnectInfo| async move { format!("Hello, {remote_addr:?}!\r\n") }, ), ); let mut listener = ngrok::Session::builder() .authtoken_from_env() .connect() .await? .http_endpoint() // .allow_cidr("0.0.0.0/0") // .basic_auth("ngrok", "online1line") // .circuit_breaker(0.5) // .compression() // .deny_cidr("10.1.1.1/32") // .verify_upstream_tls(false) // .domain(".ngrok.io") // .forwards_to("example rust") // .mutual_tlsca(CA_CERT.into()) // .oauth( // OauthOptions::new("google") // .allow_email("@") // .allow_domain("") // .scope(""), // ) // .oidc( // OidcOptions::new("", "", "") // .allow_email("@") // .allow_domain("") // .scope(""), // ) // .traffic_policy(POLICY_JSON) // .pooling_enabled(false) // .proxy_proto(ProxyProto::None) // .remove_request_header("X-Req-Nope") // .remove_response_header("X-Res-Nope") // .request_header("X-Req-Yup", "true") // .response_header("X-Res-Yup", "true") // .scheme(ngrok::Scheme::HTTPS) // .websocket_tcp_conversion() // .webhook_verification("twilio", "asdf"), .metadata("example tunnel metadata from rust") .listen() .await?; println!("Listener started on URL: {:?}", listener.url()); let mut make_service = app.into_make_service_with_connect_info::(); let server = async move { while let Some(conn) = listener.try_next().await? { let remote_addr = conn.remote_addr(); let tower_service = unwrap_infallible(make_service.call(remote_addr).await); tokio::spawn(async move { let hyper_service = hyper::service::service_fn(move |request: Request| { tower_service.clone().oneshot(request) }); if let Err(err) = server::conn::auto::Builder::new(TokioExecutor::new()) .serve_connection_with_upgrades(conn, hyper_service) .await { eprintln!("failed to serve connection: {err:#}"); } }); } Ok::<(), BoxError>(()) }; server.await?; Ok(()) } #[allow(dead_code)] const POLICY_JSON: &str = r###"{ "inbound":[ { "name":"deny_put", "expressions":["req.Method == 'PUT'"], "actions":[{"Type":"deny"}] }], "outbound":[ { "name":"change success response", "expressions":["res.StatusCode == '200'"], "actions":[{ "type":"custom-response", "config":{ "status_code":201, "content": "Custom 200 response.", "headers": { "content_type": "text/html" } } }] }] }"###; #[allow(dead_code)] const POLICY_YAML: &str = r###" --- inbound: - name: "deny_put" expressions: - "req.Method == 'PUT'" actions: - type: "deny" outbound: - name: "change success response" expressions: - "res.StatusCode == '200'" actions: - type: "custom-response" config: status_code: 201 content: "Custom 200 response." headers: content_type: "text/html" "###; #[allow(dead_code)] fn create_policy() -> Result { Ok(Policy::new() .add_inbound( Rule::new("deny_put") .add_expression("req.Method == 'PUT'") .add_action(Action::new("deny", None)?), ) .add_outbound( Rule::new("200_response") .add_expression("res.StatusCode == '200'") .add_action(Action::new( "custom-response", Some( r###"{ "status_code": 200, "content_type": "text/html", "content": "Custom 200 response." }"###, ), )?), ) .to_owned()) } // const CA_CERT: &[u8] = include_bytes!("ca.crt"); fn unwrap_infallible(result: Result) -> T { match result { Ok(value) => value, Err(err) => match err {}, } } ================================================ FILE: ngrok/examples/connect.rs ================================================ use futures::TryStreamExt; use ngrok::prelude::*; use tokio::io::{ self, AsyncBufReadExt, AsyncWriteExt, BufReader, }; use tracing::info; use tracing_subscriber::fmt::format::FmtSpan; #[tokio::main] async fn main() -> anyhow::Result<()> { tracing_subscriber::fmt() .pretty() .with_span_events(FmtSpan::ENTER) .with_env_filter(std::env::var("RUST_LOG").unwrap_or_default()) .init(); let sess = ngrok::Session::builder() .authtoken_from_env() .metadata("Online in One Line") // .root_cas("trusted")? .connect() .await?; let tunnel = sess .tcp_endpoint() // .allow_cidr("0.0.0.0/0") // .deny_cidr("10.1.1.1/32") // .verify_upstream_tls(false) // .pooling_enabled(false) // .forwards_to("example rust"), // .proxy_proto(ProxyProto::None) // .remote_addr(".tcp.ngrok.io:

") .metadata("example tunnel metadata from rust") .listen() .await?; handle_tunnel(tunnel, sess); futures::future::pending().await } fn handle_tunnel(mut tunnel: impl EndpointInfo + Tunnel, sess: ngrok::Session) { info!("bound new tunnel: {}", tunnel.url()); tokio::spawn(async move { loop { let stream = if let Some(stream) = tunnel.try_next().await? { stream } else { info!("tunnel closed!"); break; }; let sess = sess.clone(); let id: String = tunnel.id().into(); tokio::spawn(async move { info!("accepted connection: {:?}", stream.remote_addr()); let (rx, mut tx) = io::split(stream); let mut lines = BufReader::new(rx); loop { let mut buf = String::new(); let len = lines.read_line(&mut buf).await?; if len == 0 { break; } if buf.contains("bye!") { info!("unbind requested"); tx.write_all("later!".as_bytes()).await?; sess.close_tunnel(id).await?; return Ok(()); } else if buf.contains("another!") { info!("another requested"); let new_tunnel = sess.tcp_endpoint().listen().await?; tx.write_all(new_tunnel.url().as_bytes()).await?; handle_tunnel(new_tunnel, sess.clone()); } else { info!("read line: {}", buf); tx.write_all(buf.as_bytes()).await?; info!("echoed line"); } tx.flush().await?; info!("flushed"); } Result::<(), anyhow::Error>::Ok(()) }); } anyhow::Result::<()>::Ok(()) }); } ================================================ FILE: ngrok/examples/domain.crt ================================================ -----BEGIN CERTIFICATE----- MIIC+jCCAeICCQDobWtly6PonjANBgkqhkiG9w0BAQsFADA/MQswCQYDVQQGEwJV UzERMA8GA1UECgwITm90IFJlYWwxHTAbBgNVBAMMFHJ1c3Qtc2RrLmV4YW1wbGUu Y29tMB4XDTIyMTIwMjE4MzMxM1oXDTMyMTEyOTE4MzMxM1owPzELMAkGA1UEBhMC VVMxETAPBgNVBAoMCE5vdCBSZWFsMR0wGwYDVQQDDBRydXN0LXNkay5leGFtcGxl LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKhsx8tZWzaqaz9i gnyU9O/dCEX8qgCvU2yoeJBfGhwCnlFNQBUdBGlV+Cjf19ozagYlY6Cunu214AUR CDHTZsgTmMhtHkJ3kWD0wgDu+uyUuW6akP1+o39lebDc6CbDV7j1ySBoPMROp5dB pX+ltpH42CmJM6ciwfTD1uuW5LXJvb9d4HISZp2RWyHqb3a6pI7E+XLqXg/Yy9MY eqQESZMrYCjC+Sn4blGhcQhjTVU2rM5ChoDtZuL8OJQ0UYmchlch8CNc5Lvj9hAT BiafEAscGrdIAZkK50kjpcIOWPPSfjCRqz8elSQqoKFq/uQnHBF5NwmsEqE0sXhw 4UdngRMCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAKieeE6gzuxHGjVT2NKL5BFjL XKxdQhI/Tt7ClKu39Ay62fXDRznTBpGRfyWsJ5r3wmsHFogw46a2HYZHyuTMfyPY lKhE/9EPMf/faqhIa33nMBASNzuGB5yfcPaod4KJX6DBKZtIpgkm2+S6BivpuSEo DJ0lNtlR80mcVPma9KR57A0oh/UIsHXxL0qIKdaxyZYOZ1Zhtm+hzZcZA4wHkqzN olNk3SOfhC5vVFudg+5KtxPBZ/efS9sqDUstH8hmE1JnxCF9OBlHdKI4yUMnsEf7 aOy11K5g7Oc3m7EB1twEQkufBAJeYzMOCji17GyJHDojNuOLkrmoLgcgDym5LQ== -----END CERTIFICATE----- ================================================ FILE: ngrok/examples/domain.key ================================================ -----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAqGzHy1lbNqprP2KCfJT0790IRfyqAK9TbKh4kF8aHAKeUU1A FR0EaVX4KN/X2jNqBiVjoK6e7bXgBREIMdNmyBOYyG0eQneRYPTCAO767JS5bpqQ /X6jf2V5sNzoJsNXuPXJIGg8xE6nl0Glf6W2kfjYKYkzpyLB9MPW65bktcm9v13g chJmnZFbIepvdrqkjsT5cupeD9jL0xh6pARJkytgKML5KfhuUaFxCGNNVTaszkKG gO1m4vw4lDRRiZyGVyHwI1zku+P2EBMGJp8QCxwat0gBmQrnSSOlwg5Y89J+MJGr Px6VJCqgoWr+5CccEXk3CawSoTSxeHDhR2eBEwIDAQABAoIBADKQLc8brWmU8gue bGQwZ/RW3DP+rZ71A8ucLE3Tb0g3dQYddf6groFdINpMkUXdp5few7Eqm2Xr8ywy N86Vk8a/M2AAelQkB04fTNrw4/4AjEbrOloQGc+WTFlPiJaSkJRjnZUQFiYtIt0j BSd0PYJHPcYCfbJQmf/8h1pE+7ajNJlvEWrJ8UjDCjUuPPxq1aCOIA48aN8awDaO 2R0AeSBws6+6UgyBgy2juat0t8PvS+AiLv4rK3RGMD+x96KoPEoVVgOQLr5YTqRP Q+HYrs5cSXx9Jb2cmuJzvPUJmE3HKhoshWrK7fz5Z8wVAqTGhX6dbuHoqMJnAdla FFSBEokCgYEA0BAsrDrnSkls1uC54iqzrxPMvITj4UnBR+PK504NrtP2brlcVIDP e0dTKPTqjIC0vpDIg2fhPPvKkeoyuL6huiUWL/DdYVphUlwTf2Mu6PUm3o4M1MWN S7q09cqUp4HWCUbzN3MIJ8sOPY17Lq+fxi1Wf4mNh+8IIXcJQ4HgUiUCgYEAzzqx L7ck6pBUTtpUFYFTCUQDOYdzPE72zOzHK/LpoWJEssQ479srKlmSnRPRZbPZGMGE EXvhWROonux96rRrZjiBI4B5G4rzeY0Rs24kClEh+7s5Zw4xmfSJu5oSdLqiy+O+ IKMVhOm9qq+8+y9LwKyajwR27srLdHSijJoXNNcCgYEAgtc5EJH2MwwbisFFg8mw t0+vN3omR91203uXdH/sMN4Qoa6lNmrOj0raK+5gtTyW7SPlRGWGCjCZQctSXEVd NM7vtfQ1c2w/uWg3xqsbq9nGuLwBq6gT4+SkudDMTM5kR+87Mcp//W4/JUwcg85j nl+Sfp+Exk/1//14cOByrZUCgYEAjrr7HUVEfPbJysHf1iwL2D7rBa3AdhJhNIYF LMUTm59Gd+Zk3PeUxIeLTvs+Z5E2/zESWMR9UtASfNugYo6/xlk2wRAU2h6bUeYT AgXjduOox2yNvehty389emRFP/boeAw1gN8yzCf+BdkjDdLmlx+LGORXUmOFPIG1 D6h2QWMCgYA0WysR3XMcRH/8GDAgNVry5JvKoxlVXTPqVScTjMRj3VAzPYPCV+ql lNN6yh/TuJwdvNs+uhKd1Wu4cDIb9GqxkBbUTKoKBrVL1YB93IC7QIR5wVjhJF/i lrFW1ogr3535UzHzyDD1oXvcnWV/JnTdadHf2oA3Em8n2oTQvXQAog== -----END RSA PRIVATE KEY----- ================================================ FILE: ngrok/examples/labeled.rs ================================================ use std::{ convert::Infallible, error::Error, net::SocketAddr, }; use axum::{ extract::ConnectInfo, routing::get, BoxError, Router, }; use futures::TryStreamExt; use hyper::{ body::Incoming, Request, }; use hyper_util::{ rt::TokioExecutor, server, }; use ngrok::prelude::*; use tower::{ util::ServiceExt, Service, }; #[tokio::main] async fn main() -> Result<(), Box> { // build our application with a single route let app = Router::new().route( "/", get( |ConnectInfo(remote_addr): ConnectInfo| async move { format!("Hello, {remote_addr:?}!\r\n") }, ), ); let sess = ngrok::Session::builder() .authtoken_from_env() .connect() .await?; let mut listener = sess .labeled_tunnel() // .app_protocol("http2") // .verify_upstream_tls(false) .label("edge", "edghts_") .metadata("example tunnel metadata from rust") .listen() .await?; println!("Labeled listener started!"); let mut make_service = app.into_make_service_with_connect_info::(); let server = async move { while let Some(conn) = listener.try_next().await? { let remote_addr = conn.remote_addr(); let tower_service = unwrap_infallible(make_service.call(remote_addr).await); tokio::spawn(async move { let hyper_service = hyper::service::service_fn(move |request: Request| { tower_service.clone().oneshot(request) }); if let Err(err) = server::conn::auto::Builder::new(TokioExecutor::new()) .serve_connection_with_upgrades(conn, hyper_service) .await { eprintln!("failed to serve connection: {err:#}"); } }); } Ok::<(), BoxError>(()) }; server.await?; Ok(()) } fn unwrap_infallible(result: Result) -> T { match result { Ok(value) => value, Err(err) => match err {}, } } ================================================ FILE: ngrok/examples/mingrok.rs ================================================ use std::sync::{ Arc, Mutex, }; use anyhow::Error; use futures::{ prelude::*, select, }; use ngrok::prelude::*; use tokio::sync::oneshot; use tracing::info; use url::Url; #[tokio::main] async fn main() -> Result<(), Error> { tracing_subscriber::fmt() .pretty() .with_env_filter(std::env::var("RUST_LOG").unwrap_or_else(|_| "info".into())) .init(); let forwards_to = std::env::args() .nth(1) .ok_or_else(|| anyhow::anyhow!("missing forwarding address")) .and_then(|s| Ok(Url::parse(&s)?))?; loop { let (stop_tx, stop_rx) = oneshot::channel(); let stop_tx = Arc::new(Mutex::new(Some(stop_tx))); let (restart_tx, restart_rx) = oneshot::channel(); let restart_tx = Arc::new(Mutex::new(Some(restart_tx))); let mut fwd = ngrok::Session::builder() .authtoken_from_env() .handle_stop_command(move |req| { let stop_tx = stop_tx.clone(); async move { info!(?req, "received stop command"); let _ = stop_tx.lock().unwrap().take().unwrap().send(()); Ok(()) } }) .handle_restart_command(move |req| { let restart_tx = restart_tx.clone(); async move { info!(?req, "received restart command"); let _ = restart_tx.lock().unwrap().take().unwrap().send(()); Ok(()) } }) .handle_update_command(|req| async move { info!(?req, "received update command"); Err("unable to update".into()) }) .connect() .await? .http_endpoint() .listen_and_forward(forwards_to.clone()) .await?; info!(url = fwd.url(), %forwards_to, "started forwarder"); let mut fut = fwd.join().fuse(); let mut stop_rx = stop_rx.fuse(); let mut restart_rx = restart_rx.fuse(); select! { res = fut => info!("{:?}", res?), _ = stop_rx => return Ok(()), _ = restart_rx => { drop(fut); let _ = fwd.close().await; continue }, } } } ================================================ FILE: ngrok/examples/tls.rs ================================================ use std::{ convert::Infallible, error::Error, net::SocketAddr, }; use axum::{ extract::ConnectInfo, routing::get, BoxError, Router, }; use futures::TryStreamExt; use hyper::{ body::Incoming, Request, }; use hyper_util::{ rt::TokioExecutor, server, }; use ngrok::prelude::*; use tower::{ util::ServiceExt, Service, }; const CERT: &[u8] = include_bytes!("domain.crt"); const KEY: &[u8] = include_bytes!("domain.key"); // const CA_CERT: &[u8] = include_bytes!("ca.crt"); #[tokio::main] async fn main() -> Result<(), Box> { // build our application with a single route let app = Router::new().route( "/", get( |ConnectInfo(remote_addr): ConnectInfo| async move { format!("Hello, {remote_addr:?}!\r\n") }, ), ); let sess = ngrok::Session::builder() .authtoken_from_env() .connect() .await?; let mut listener = sess .tls_endpoint() // .allow_cidr("0.0.0.0/0") // .deny_cidr("10.1.1.1/32") // .verify_upstream_tls(false) // .domain(".ngrok.io") // .forwards_to("example rust"), // .mutual_tlsca(CA_CERT.into()) // .proxy_proto(ProxyProto::None) .termination(CERT.into(), KEY.into()) .metadata("example tunnel metadata from rust") .listen() .await?; let mut make_service = app.into_make_service_with_connect_info::(); let server = async move { while let Some(conn) = listener.try_next().await? { let remote_addr = conn.remote_addr(); let tower_service = unwrap_infallible(make_service.call(remote_addr).await); tokio::spawn(async move { let hyper_service = hyper::service::service_fn(move |request: Request| { tower_service.clone().oneshot(request) }); if let Err(err) = server::conn::auto::Builder::new(TokioExecutor::new()) .serve_connection_with_upgrades(conn, hyper_service) .await { eprintln!("failed to serve connection: {err:#}"); } }); } Ok::<(), BoxError>(()) }; server.await?; Ok(()) } fn unwrap_infallible(result: Result) -> T { match result { Ok(value) => value, Err(err) => match err {}, } } ================================================ FILE: ngrok/src/config/common.rs ================================================ use std::{ collections::HashMap, env, process, }; use async_trait::async_trait; use once_cell::sync::OnceCell; use url::Url; pub use crate::internals::proto::ProxyProto; use crate::{ config::policies::Policy, forwarder::Forwarder, internals::proto::{ BindExtra, BindOpts, IpRestriction, MutualTls, }, session::RpcError, Session, Tunnel, }; /// Represents the ingress configuration for an ngrok endpoint. /// /// Bindings determine where and how your endpoint is exposed. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum Binding { /// Publicly accessible endpoint (default for most configurations). Public, /// Internal-only endpoint, not accessible from the public internet. Internal, /// Kubernetes cluster binding for service mesh integration. Kubernetes, } impl Binding { /// Returns the string representation of this binding. pub fn as_str(&self) -> &'static str { match self { Binding::Public => "public", Binding::Internal => "internal", Binding::Kubernetes => "kubernetes", } } /// Validates if a string is a recognized binding value. pub(crate) fn validate(s: &str) -> Result<(), String> { match s.to_lowercase().as_str() { "public" | "internal" | "kubernetes" => Ok(()), _ => Err(format!( "Invalid binding value '{}'. Expected 'public', 'internal', or 'kubernetes'", s )), } } } impl From for String { fn from(binding: Binding) -> String { binding.as_str().to_string() } } impl std::str::FromStr for Binding { type Err = String; fn from_str(s: &str) -> Result { match s.to_lowercase().as_str() { "public" => Ok(Binding::Public), "internal" => Ok(Binding::Internal), "kubernetes" => Ok(Binding::Kubernetes), _ => Err(format!( "Invalid binding value '{}'. Expected 'public', 'internal', or 'kubernetes'", s )), } } } impl std::fmt::Display for Binding { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.as_str()) } } pub(crate) fn default_forwards_to() -> &'static str { static FORWARDS_TO: OnceCell = OnceCell::new(); FORWARDS_TO .get_or_init(|| { let hostname = hostname::get() .unwrap_or("".into()) .to_string_lossy() .into_owned(); let exe = env::current_exe() .unwrap_or("".into()) .to_string_lossy() .into_owned(); let pid = process::id(); format!("app://{hostname}/{exe}?pid={pid}") }) .as_str() } /// Trait representing things that can be built into an ngrok tunnel. #[async_trait] pub trait TunnelBuilder: From { /// The ngrok tunnel type that this builder produces. type Tunnel: Tunnel; /// Begin listening for new connections on this tunnel. async fn listen(&self) -> Result; } /// Trait representing things that can be built into an ngrok tunnel and then /// forwarded to a provided URL. #[async_trait] pub trait ForwarderBuilder: TunnelBuilder { /// Start listening for new connections on this tunnel and forward all /// connections to the provided URL. /// /// This will also set the `forwards_to` metadata for the tunnel. async fn listen_and_forward(&self, to_url: Url) -> Result, RpcError>; } macro_rules! impl_builder { ($(#[$m:meta])* $name:ident, $opts:ty, $tun:ident, $edgepoint:tt) => { $(#[$m])* #[derive(Clone)] pub struct $name { options: $opts, // Note: This is only optional for testing purposes. session: Option, } mod __builder_impl { use $crate::forwarder::Forwarder; use $crate::config::common::ForwarderBuilder; use $crate::config::common::TunnelBuilder; use $crate::session::RpcError; use async_trait::async_trait; use url::Url; use super::*; impl From for $name { fn from(session: Session) -> Self { $name { options: Default::default(), session: session.into(), } } } #[async_trait] impl TunnelBuilder for $name { type Tunnel = $tun; async fn listen(&self) -> Result<$tun, RpcError> { Ok($tun { inner: self .session .as_ref() .unwrap() .start_tunnel(&self.options) .await?, }) } } #[async_trait] impl ForwarderBuilder for $name { async fn listen_and_forward(&self, to_url: Url) -> Result, RpcError> { let mut cfg = self.clone(); cfg.for_forwarding_to(&to_url).await; let tunnel = cfg.listen().await?; let info = tunnel.make_info(); $crate::forwarder::forward(tunnel, info, to_url) } } } }; } /// Tunnel configuration trait, implemented by our top-level config objects. pub(crate) trait TunnelConfig { /// The "forwards to" metadata. /// /// Only for display/informational purposes. fn forwards_to(&self) -> String; /// The L7 protocol the upstream service expects fn forwards_proto(&self) -> String; /// Whether to disable certificate verification for this tunnel. fn verify_upstream_tls(&self) -> bool; /// Internal-only, extra data sent when binding a tunnel. fn extra(&self) -> BindExtra; /// The protocol for this tunnel. fn proto(&self) -> String; /// The middleware and other configuration options for this tunnel. fn opts(&self) -> Option; /// The labels for this tunnel. fn labels(&self) -> HashMap; } // delegate references impl TunnelConfig for &T where T: TunnelConfig, { fn forwards_to(&self) -> String { (**self).forwards_to() } fn forwards_proto(&self) -> String { (**self).forwards_proto() } fn verify_upstream_tls(&self) -> bool { (**self).verify_upstream_tls() } fn extra(&self) -> BindExtra { (**self).extra() } fn proto(&self) -> String { (**self).proto() } fn opts(&self) -> Option { (**self).opts() } fn labels(&self) -> HashMap { (**self).labels() } } /// Restrictions placed on the origin of incoming connections to the edge. #[derive(Clone, Default)] pub(crate) struct CidrRestrictions { /// Rejects connections that do not match the given CIDRs pub(crate) allowed: Vec, /// Rejects connections that match the given CIDRs and allows all other CIDRs. pub(crate) denied: Vec, } impl CidrRestrictions { pub(crate) fn allow(&mut self, cidr: impl Into) { self.allowed.push(cidr.into()); } pub(crate) fn deny(&mut self, cidr: impl Into) { self.denied.push(cidr.into()); } } // Common #[derive(Default, Clone)] pub(crate) struct CommonOpts { // Restrictions placed on the origin of incoming connections to the edge. pub(crate) cidr_restrictions: CidrRestrictions, // The version of PROXY protocol to use with this tunnel, zero if not // using. pub(crate) proxy_proto: ProxyProto, // Tunnel-specific opaque metadata. Viewable via the API. pub(crate) metadata: Option, // Tunnel backend metadata. Viewable via the dashboard and API, but has no // bearing on tunnel behavior. pub(crate) forwards_to: Option, // Tunnel L7 app protocol pub(crate) forwards_proto: Option, // Whether to disable certificate verification for this tunnel. verify_upstream_tls: Option, // DEPRECATED: use traffic_policy instead. pub(crate) policy: Option, // Policy that defines rules that should be applied to incoming or outgoing // connections to the edge. pub(crate) traffic_policy: Option, // Allows the endpoint to pool with other endpoints with the same host/port/binding pub(crate) pooling_enabled: Option, } impl CommonOpts { // Get the proto version of cidr restrictions pub(crate) fn ip_restriction(&self) -> Option { (!self.cidr_restrictions.allowed.is_empty() || !self.cidr_restrictions.denied.is_empty()) .then_some(self.cidr_restrictions.clone().into()) } pub(crate) fn for_forwarding_to(&mut self, to_url: &Url) -> &mut Self { self.forwards_to = Some(to_url.as_str().into()); self } pub(crate) fn set_verify_upstream_tls(&mut self, verify_upstream_tls: bool) { self.verify_upstream_tls = Some(verify_upstream_tls) } pub(crate) fn verify_upstream_tls(&self) -> bool { self.verify_upstream_tls.unwrap_or(true) } } // transform into the wire protocol format impl From for IpRestriction { fn from(cr: CidrRestrictions) -> Self { IpRestriction { allow_cidrs: cr.allowed, deny_cidrs: cr.denied, } } } impl From<&[bytes::Bytes]> for MutualTls { fn from(b: &[bytes::Bytes]) -> Self { let mut aggregated = Vec::new(); b.iter().for_each(|c| aggregated.extend(c)); MutualTls { mutual_tls_ca: aggregated, } } } ================================================ FILE: ngrok/src/config/headers.rs ================================================ use std::collections::HashMap; use crate::internals::proto::Headers as HeaderProto; /// HTTP Headers to modify at the ngrok edge. #[derive(Clone, Default)] pub(crate) struct Headers { /// Headers to add to requests or responses at the ngrok edge. added: HashMap, /// Header names to remove from requests or responses at the ngrok edge. removed: Vec, } impl Headers { pub(crate) fn add(&mut self, name: impl Into, value: impl Into) { self.added.insert(name.into().to_lowercase(), value.into()); } pub(crate) fn remove(&mut self, name: impl Into) { self.removed.push(name.into().to_lowercase()); } pub(crate) fn has_entries(&self) -> bool { !self.added.is_empty() || !self.removed.is_empty() } } // transform into the wire protocol format impl From for HeaderProto { fn from(headers: Headers) -> Self { HeaderProto { add: headers .added .iter() .map(|a| format!("{}:{}", a.0, a.1)) .collect(), remove: headers.removed, add_parsed: HashMap::new(), // unused in this context } } } ================================================ FILE: ngrok/src/config/http.rs ================================================ use std::{ borrow::Borrow, collections::HashMap, convert::From, str::FromStr, }; use bytes::Bytes; use thiserror::Error; use url::Url; use super::{ common::ProxyProto, Policy, }; // These are used for doc comment links. #[allow(unused_imports)] use crate::config::{ ForwarderBuilder, TunnelBuilder, }; use crate::{ config::{ common::{ default_forwards_to, Binding, CommonOpts, TunnelConfig, }, headers::Headers, oauth::OauthOptions, oidc::OidcOptions, webhook_verification::WebhookVerification, }, internals::proto::{ BasicAuth, BasicAuthCredential, BindExtra, BindOpts, CircuitBreaker, Compression, HttpEndpoint, UserAgentFilter, WebsocketTcpConverter, }, tunnel::HttpTunnel, Session, }; /// Error representing invalid string for Scheme #[derive(Debug, Clone, Error)] #[error("invalid scheme string: {}", .0)] pub struct InvalidSchemeString(String); /// The URL scheme for this HTTP endpoint. /// /// [Scheme::HTTPS] will enable TLS termination at the ngrok edge. #[derive(Clone, Default, Eq, PartialEq)] pub enum Scheme { /// The `http` URL scheme. HTTP, /// The `https` URL scheme. #[default] HTTPS, } impl FromStr for Scheme { type Err = InvalidSchemeString; fn from_str(s: &str) -> Result { use Scheme::*; Ok(match s.to_uppercase().as_str() { "HTTP" => HTTP, "HTTPS" => HTTPS, _ => return Err(InvalidSchemeString(s.into())), }) } } /// Restrictions placed on the origin of incoming connections to the edge. #[derive(Clone, Default)] pub(crate) struct UaFilter { /// Rejects connections that do not match the given regular expression pub(crate) allow: Vec, /// Rejects connections that match the given regular expression and allows /// all other regular expressions. pub(crate) deny: Vec, } impl UaFilter { pub(crate) fn allow(&mut self, allow: impl Into) { self.allow.push(allow.into()); } pub(crate) fn deny(&mut self, deny: impl Into) { self.deny.push(deny.into()); } } impl From for UserAgentFilter { fn from(ua: UaFilter) -> Self { UserAgentFilter { allow: ua.allow, deny: ua.deny, } } } /// The options for a HTTP edge. #[derive(Default, Clone)] struct HttpOptions { pub(crate) common_opts: CommonOpts, pub(crate) scheme: Scheme, pub(crate) domain: Option, pub(crate) mutual_tlsca: Vec, pub(crate) compression: bool, pub(crate) websocket_tcp_conversion: bool, pub(crate) circuit_breaker: f64, pub(crate) request_headers: Headers, pub(crate) response_headers: Headers, pub(crate) rewrite_host: bool, pub(crate) basic_auth: Vec<(String, String)>, pub(crate) oauth: Option, pub(crate) oidc: Option, pub(crate) webhook_verification: Option, // Flitering placed on the origin of incoming connections to the edge. pub(crate) user_agent_filter: UaFilter, pub(crate) bindings: Vec, } impl HttpOptions { fn user_agent_filter(&self) -> Option { (!self.user_agent_filter.allow.is_empty() || !self.user_agent_filter.deny.is_empty()) .then_some(self.user_agent_filter.clone().into()) } } impl TunnelConfig for HttpOptions { fn forwards_to(&self) -> String { self.common_opts .forwards_to .clone() .unwrap_or(default_forwards_to().into()) } fn forwards_proto(&self) -> String { self.common_opts.forwards_proto.clone().unwrap_or_default() } fn verify_upstream_tls(&self) -> bool { self.common_opts.verify_upstream_tls() } fn extra(&self) -> BindExtra { BindExtra { token: Default::default(), ip_policy_ref: Default::default(), metadata: self.common_opts.metadata.clone().unwrap_or_default(), bindings: self.bindings.clone(), pooling_enabled: self.common_opts.pooling_enabled.unwrap_or(false), } } fn proto(&self) -> String { if self.scheme == Scheme::HTTP { return "http".into(); } "https".into() } fn opts(&self) -> Option { let http_endpoint = HttpEndpoint { proxy_proto: self.common_opts.proxy_proto, domain: self.domain.clone().unwrap_or_default(), hostname: String::new(), compression: self.compression.then_some(Compression {}), circuit_breaker: (self.circuit_breaker != 0f64).then_some(CircuitBreaker { error_threshold: self.circuit_breaker, }), ip_restriction: self.common_opts.ip_restriction(), basic_auth: (!self.basic_auth.is_empty()).then_some(self.basic_auth.as_slice().into()), oauth: self.oauth.clone().map(From::from), oidc: self.oidc.clone().map(From::from), webhook_verification: self.webhook_verification.clone().map(From::from), mutual_tls_ca: (!self.mutual_tlsca.is_empty()) .then_some(self.mutual_tlsca.as_slice().into()), request_headers: self .request_headers .has_entries() .then_some(self.request_headers.clone().into()), response_headers: self .response_headers .has_entries() .then_some(self.response_headers.clone().into()), websocket_tcp_converter: self .websocket_tcp_conversion .then_some(WebsocketTcpConverter {}), user_agent_filter: self.user_agent_filter(), traffic_policy: if self.common_opts.traffic_policy.is_some() { self.common_opts.traffic_policy.clone().map(From::from) } else if self.common_opts.policy.is_some() { self.common_opts.policy.clone().map(From::from) } else { None }, ..Default::default() }; Some(BindOpts::Http(http_endpoint)) } fn labels(&self) -> HashMap { HashMap::new() } } // transform into the wire protocol format impl From<&[(String, String)]> for BasicAuth { fn from(v: &[(String, String)]) -> Self { BasicAuth { credentials: v.iter().cloned().map(From::from).collect(), } } } // transform into the wire protocol format impl From<(String, String)> for BasicAuthCredential { fn from(b: (String, String)) -> Self { BasicAuthCredential { username: b.0, cleartext_password: b.1, hashed_password: vec![], // unused in this context } } } impl_builder! { /// A builder for a tunnel backing an HTTP endpoint. /// /// https://ngrok.com/docs/http/ HttpTunnelBuilder, HttpOptions, HttpTunnel, endpoint } impl HttpTunnelBuilder { /// Add the provided CIDR to the allowlist. /// /// https://ngrok.com/docs/http/ip-restrictions/ pub fn allow_cidr(&mut self, cidr: impl Into) -> &mut Self { self.options.common_opts.cidr_restrictions.allow(cidr); self } /// Add the provided CIDR to the denylist. /// /// https://ngrok.com/docs/http/ip-restrictions/ pub fn deny_cidr(&mut self, cidr: impl Into) -> &mut Self { self.options.common_opts.cidr_restrictions.deny(cidr); self } /// Sets the PROXY protocol version for connections over this tunnel. pub fn proxy_proto(&mut self, proxy_proto: ProxyProto) -> &mut Self { self.options.common_opts.proxy_proto = proxy_proto; self } /// Sets the opaque metadata string for this tunnel. /// /// https://ngrok.com/docs/api/resources/tunnels/#tunnel-fields pub fn metadata(&mut self, metadata: impl Into) -> &mut Self { self.options.common_opts.metadata = Some(metadata.into()); self } /// Sets the ingress configuration for this endpoint. /// /// Valid binding values are: /// - `"public"` - Publicly accessible endpoint /// - `"internal"` - Internal-only endpoint /// - `"kubernetes"` - Kubernetes cluster binding /// /// If not specified, the ngrok service will use its default binding configuration. /// /// # Panics /// /// Panics if called more than once or if an invalid binding value is provided. /// /// # Examples /// /// ```no_run /// # use ngrok::Session; /// # use ngrok::config::TunnelBuilder; /// # async fn example() -> Result<(), Box> { /// let session = Session::builder().authtoken_from_env().connect().await?; /// /// // Using string /// let tunnel = session.http_endpoint().binding("internal").listen().await?; /// /// // Using typed enum /// use ngrok::config::Binding; /// let tunnel = session.http_endpoint().binding(Binding::Public).listen().await?; /// # Ok(()) /// # } /// ``` pub fn binding(&mut self, binding: impl Into) -> &mut Self { if !self.options.bindings.is_empty() { panic!("binding() can only be called once"); } let binding_str = binding.into(); if let Err(e) = Binding::validate(&binding_str) { panic!("{}", e); } self.options.bindings.push(binding_str); self } /// Sets the ForwardsTo string for this tunnel. This can be viewed via the /// API or dashboard. /// /// This overrides the default process info if using /// [TunnelBuilder::listen], and is in turn overridden by the url provided /// to [ForwarderBuilder::listen_and_forward]. /// /// https://ngrok.com/docs/api/resources/tunnels/#tunnel-fields pub fn forwards_to(&mut self, forwards_to: impl Into) -> &mut Self { self.options.common_opts.forwards_to = Some(forwards_to.into()); self } /// Sets the L7 protocol for this tunnel. pub fn app_protocol(&mut self, app_protocol: impl Into) -> &mut Self { self.options.common_opts.forwards_proto = Some(app_protocol.into()); self } /// Disables backend TLS certificate verification for forwards from this tunnel. pub fn verify_upstream_tls(&mut self, verify_upstream_tls: bool) -> &mut Self { self.options .common_opts .set_verify_upstream_tls(verify_upstream_tls); self } /// Sets the scheme for this edge. pub fn scheme(&mut self, scheme: Scheme) -> &mut Self { self.options.scheme = scheme; self } /// Sets the domain to request for this edge. /// /// https://ngrok.com/docs/network-edge/domains-and-tcp-addresses/#domains pub fn domain(&mut self, domain: impl Into) -> &mut Self { self.options.domain = Some(domain.into()); self } /// Adds a certificate in PEM format to use for mutual TLS authentication. /// /// These will be used to authenticate client certificates for requests at /// the ngrok edge. /// /// https://ngrok.com/docs/http/mutual-tls/ pub fn mutual_tlsca(&mut self, mutual_tlsca: Bytes) -> &mut Self { self.options.mutual_tlsca.push(mutual_tlsca); self } /// Enables gzip compression. /// /// https://ngrok.com/docs/http/compression/ pub fn compression(&mut self) -> &mut Self { self.options.compression = true; self } /// Enables the websocket-to-tcp converter. /// /// https://ngrok.com/docs/http/websocket-tcp-converter/ pub fn websocket_tcp_conversion(&mut self) -> &mut Self { self.options.websocket_tcp_conversion = true; self } /// Sets the 5XX response ratio at which the ngrok edge will stop sending /// requests to this tunnel. /// /// https://ngrok.com/docs/http/circuit-breaker/ pub fn circuit_breaker(&mut self, circuit_breaker: f64) -> &mut Self { self.options.circuit_breaker = circuit_breaker; self } /// Automatically rewrite the host header to the one in the provided URL /// when calling [ForwarderBuilder::listen_and_forward]. Does nothing if /// using [TunnelBuilder::listen]. Defaults to `false`. /// /// If you need to set the host header to a specific value, use /// `cfg.request_header("host", "some.host.com")` instead. pub fn host_header_rewrite(&mut self, rewrite: bool) -> &mut Self { self.options.rewrite_host = rewrite; self } /// Adds a header to all requests to this edge. /// /// https://ngrok.com/docs/http/request-headers/ pub fn request_header( &mut self, name: impl Into, value: impl Into, ) -> &mut Self { self.options.request_headers.add(name, value); self } /// Adds a header to all responses coming from this edge. /// /// https://ngrok.com/docs/http/response-headers/ pub fn response_header( &mut self, name: impl Into, value: impl Into, ) -> &mut Self { self.options.response_headers.add(name, value); self } /// Removes a header from requests to this edge. /// /// https://ngrok.com/docs/http/request-headers/ pub fn remove_request_header(&mut self, name: impl Into) -> &mut Self { self.options.request_headers.remove(name); self } /// Removes a header from responses from this edge. /// /// https://ngrok.com/docs/http/response-headers/ pub fn remove_response_header(&mut self, name: impl Into) -> &mut Self { self.options.response_headers.remove(name); self } /// Adds the provided credentials to the list of basic authentication /// credentials. /// /// https://ngrok.com/docs/http/basic-auth/ pub fn basic_auth( &mut self, username: impl Into, password: impl Into, ) -> &mut Self { self.options .basic_auth .push((username.into(), password.into())); self } /// Set the OAuth configuraton for this edge. /// /// https://ngrok.com/docs/http/oauth/ pub fn oauth(&mut self, oauth: impl Borrow) -> &mut Self { self.options.oauth = Some(oauth.borrow().to_owned()); self } /// Set the OIDC configuration for this edge. /// /// https://ngrok.com/docs/http/openid-connect/ pub fn oidc(&mut self, oidc: impl Borrow) -> &mut Self { self.options.oidc = Some(oidc.borrow().to_owned()); self } /// Configures webhook verification for this edge. /// /// https://ngrok.com/docs/http/webhook-verification/ pub fn webhook_verification( &mut self, provider: impl Into, secret: impl Into, ) -> &mut Self { self.options.webhook_verification = Some(WebhookVerification { provider: provider.into(), secret: secret.into().into(), }); self } /// Add the provided regex to the allowlist. /// /// https://ngrok.com/docs/http/user-agent-filter/ pub fn allow_user_agent(&mut self, regex: impl Into) -> &mut Self { self.options.user_agent_filter.allow(regex); self } /// Add the provided regex to the denylist. /// /// https://ngrok.com/docs/http/user-agent-filter/ pub fn deny_user_agent(&mut self, regex: impl Into) -> &mut Self { self.options.user_agent_filter.deny(regex); self } /// DEPRECATED: use traffic_policy instead. pub fn policy(&mut self, s: S) -> Result<&mut Self, S::Error> where S: TryInto, { self.options.common_opts.policy = Some(s.try_into()?); Ok(self) } /// Set policy for this edge. pub fn traffic_policy(&mut self, policy_str: impl Into) -> &mut Self { self.options.common_opts.traffic_policy = Some(policy_str.into()); self } pub(crate) async fn for_forwarding_to(&mut self, to_url: &Url) -> &mut Self { self.options.common_opts.for_forwarding_to(to_url); if let Some(host) = to_url.host_str().filter(|_| self.options.rewrite_host) { self.request_header("host", host); } self } /// Allows the endpoint to pool with other endpoints with the same host/port/binding pub fn pooling_enabled(&mut self, pooling_enabled: impl Into) -> &mut Self { self.options.common_opts.pooling_enabled = Some(pooling_enabled.into()); self } } #[cfg(test)] mod test { use super::*; use crate::config::policies::test::POLICY_JSON; const METADATA: &str = "testmeta"; const TEST_FORWARD: &str = "testforward"; const TEST_FORWARD_PROTO: &str = "http2"; const ALLOW_CIDR: &str = "0.0.0.0/0"; const DENY_CIDR: &str = "10.1.1.1/32"; const CA_CERT: &[u8] = "test ca cert".as_bytes(); const CA_CERT2: &[u8] = "test ca cert2".as_bytes(); const DOMAIN: &str = "test domain"; const ALLOW_AGENT: &str = r"bar/(\d)+"; const DENY_AGENT: &str = r"foo/(\d)+"; #[test] fn test_interface_to_proto() { // pass to a function accepting the trait to avoid // "creates a temporary which is freed while still in use" tunnel_test( &HttpTunnelBuilder { session: None, options: Default::default(), } .allow_user_agent(ALLOW_AGENT) .deny_user_agent(DENY_AGENT) .allow_cidr(ALLOW_CIDR) .deny_cidr(DENY_CIDR) .proxy_proto(ProxyProto::V2) .metadata(METADATA) .scheme(Scheme::from_str("hTtPs").unwrap()) .domain(DOMAIN) .mutual_tlsca(CA_CERT.into()) .mutual_tlsca(CA_CERT2.into()) .compression() .websocket_tcp_conversion() .circuit_breaker(0.5) .request_header("X-Req-Yup", "true") .response_header("X-Res-Yup", "true") .remove_request_header("X-Req-Nope") .remove_response_header("X-Res-Nope") .oauth(OauthOptions::new("google")) .oauth( OauthOptions::new("google") .allow_email("@") .allow_domain("") .scope(""), ) .oidc(OidcOptions::new("", "", "")) .oidc( OidcOptions::new("", "", "") .allow_email("@") .allow_domain("") .scope(""), ) .webhook_verification("twilio", "asdf") .basic_auth("ngrok", "online1line") .forwards_to(TEST_FORWARD) .app_protocol("http2") .policy(POLICY_JSON) .unwrap() .options, ); } fn tunnel_test(tunnel_cfg: C) where C: TunnelConfig, { assert_eq!(TEST_FORWARD, tunnel_cfg.forwards_to()); assert_eq!(TEST_FORWARD_PROTO, tunnel_cfg.forwards_proto()); let extra = tunnel_cfg.extra(); assert_eq!(String::default(), *extra.token); assert_eq!(METADATA, extra.metadata); assert_eq!(Vec::::new(), extra.bindings); assert_eq!(String::default(), extra.ip_policy_ref); assert_eq!("https", tunnel_cfg.proto()); let opts = tunnel_cfg.opts().unwrap(); assert!(matches!(opts, BindOpts::Http { .. })); if let BindOpts::Http(endpoint) = opts { assert_eq!(DOMAIN, endpoint.domain); assert_eq!(String::default(), endpoint.subdomain); assert!(matches!(endpoint.proxy_proto, ProxyProto::V2)); let ip_restriction = endpoint.ip_restriction.unwrap(); assert_eq!(Vec::from([ALLOW_CIDR]), ip_restriction.allow_cidrs); assert_eq!(Vec::from([DENY_CIDR]), ip_restriction.deny_cidrs); let mutual_tls = endpoint.mutual_tls_ca.unwrap(); let mut agg = CA_CERT.to_vec(); agg.extend(CA_CERT2.to_vec()); assert_eq!(agg, mutual_tls.mutual_tls_ca); assert!(endpoint.compression.is_some()); assert!(endpoint.websocket_tcp_converter.is_some()); assert_eq!(0.5f64, endpoint.circuit_breaker.unwrap().error_threshold); let request_headers = endpoint.request_headers.unwrap(); assert_eq!(["x-req-yup:true"].to_vec(), request_headers.add); assert_eq!(["x-req-nope"].to_vec(), request_headers.remove); let response_headers = endpoint.response_headers.unwrap(); assert_eq!(["x-res-yup:true"].to_vec(), response_headers.add); assert_eq!(["x-res-nope"].to_vec(), response_headers.remove); let webhook = endpoint.webhook_verification.unwrap(); assert_eq!("twilio", webhook.provider); assert_eq!("asdf", *webhook.secret); assert!(webhook.sealed_secret.is_empty()); let creds = endpoint.basic_auth.unwrap().credentials; assert_eq!(1, creds.len()); assert_eq!("ngrok", creds[0].username); assert_eq!("online1line", creds[0].cleartext_password); assert!(creds[0].hashed_password.is_empty()); let oauth = endpoint.oauth.unwrap(); assert_eq!("google", oauth.provider); assert_eq!(["@"].to_vec(), oauth.allow_emails); assert_eq!([""].to_vec(), oauth.allow_domains); assert_eq!([""].to_vec(), oauth.scopes); assert_eq!(String::default(), oauth.client_id); assert_eq!(String::default(), *oauth.client_secret); assert!(oauth.sealed_client_secret.is_empty()); let oidc = endpoint.oidc.unwrap(); assert_eq!("", oidc.issuer_url); assert_eq!(["@"].to_vec(), oidc.allow_emails); assert_eq!([""].to_vec(), oidc.allow_domains); assert_eq!([""].to_vec(), oidc.scopes); assert_eq!("", oidc.client_id); assert_eq!("", *oidc.client_secret); assert!(oidc.sealed_client_secret.is_empty()); let user_agent_filter = endpoint.user_agent_filter.unwrap(); assert_eq!(Vec::from([ALLOW_AGENT]), user_agent_filter.allow); assert_eq!(Vec::from([DENY_AGENT]), user_agent_filter.deny); } assert_eq!(HashMap::new(), tunnel_cfg.labels()); } #[test] fn test_binding_valid_values() { let mut builder = HttpTunnelBuilder { session: None, options: Default::default(), }; // Test "public" builder.binding("public"); assert_eq!(vec!["public"], builder.options.bindings); // Test "internal" let mut builder = HttpTunnelBuilder { session: None, options: Default::default(), }; builder.binding("internal"); assert_eq!(vec!["internal"], builder.options.bindings); // Test "kubernetes" let mut builder = HttpTunnelBuilder { session: None, options: Default::default(), }; builder.binding("kubernetes"); assert_eq!(vec!["kubernetes"], builder.options.bindings); // Test with Binding enum let mut builder = HttpTunnelBuilder { session: None, options: Default::default(), }; builder.binding(Binding::Internal); assert_eq!(vec!["internal"], builder.options.bindings); } #[test] #[should_panic(expected = "Invalid binding value")] fn test_binding_invalid_value() { let mut builder = HttpTunnelBuilder { session: None, options: Default::default(), }; builder.binding("invalid"); } #[test] #[should_panic(expected = "binding() can only be called once")] fn test_binding_called_twice() { let mut builder = HttpTunnelBuilder { session: None, options: Default::default(), }; builder.binding("public"); builder.binding("internal"); } #[test] fn test_binding_with_domain() { let mut builder = HttpTunnelBuilder { session: None, options: Default::default(), }; builder.binding("internal").domain("foo.internal"); // Check that both binding and domain are set assert_eq!(vec!["internal"], builder.options.bindings); assert_eq!(Some("foo.internal".to_string()), builder.options.domain); // Check that they're properly included in extra() and opts() let extra = builder.options.extra(); assert_eq!(vec!["internal"], extra.bindings); let opts = builder.options.opts().unwrap(); if let BindOpts::Http(endpoint) = opts { assert_eq!("foo.internal", endpoint.domain); } else { panic!("Expected Http endpoint"); } } } ================================================ FILE: ngrok/src/config/labeled.rs ================================================ use std::collections::HashMap; use url::Url; // These are used for doc comment links. #[allow(unused_imports)] use crate::config::{ ForwarderBuilder, TunnelBuilder, }; use crate::{ config::common::{ default_forwards_to, CommonOpts, TunnelConfig, }, internals::proto::{ BindExtra, BindOpts, }, tunnel::LabeledTunnel, Session, }; /// Options for labeled tunnels. #[derive(Default, Clone)] struct LabeledOptions { pub(crate) common_opts: CommonOpts, pub(crate) labels: HashMap, } impl TunnelConfig for LabeledOptions { fn forwards_to(&self) -> String { self.common_opts .forwards_to .clone() .unwrap_or(default_forwards_to().into()) } fn forwards_proto(&self) -> String { self.common_opts.forwards_proto.clone().unwrap_or_default() } fn verify_upstream_tls(&self) -> bool { self.common_opts.verify_upstream_tls() } fn extra(&self) -> BindExtra { BindExtra { token: Default::default(), ip_policy_ref: Default::default(), metadata: self.common_opts.metadata.clone().unwrap_or_default(), bindings: Vec::new(), pooling_enabled: self.common_opts.pooling_enabled.unwrap_or(false), } } fn proto(&self) -> String { "".into() } fn opts(&self) -> Option { None } fn labels(&self) -> HashMap { self.labels.clone() } } impl_builder! { /// A builder for a labeled tunnel. LabeledTunnelBuilder, LabeledOptions, LabeledTunnel, edge } impl LabeledTunnelBuilder { /// Sets the opaque metadata string for this tunnel. /// Viewable via the API. /// /// https://ngrok.com/docs/api/resources/tunnels/#tunnel-fields pub fn metadata(&mut self, metadata: impl Into) -> &mut Self { self.options.common_opts.metadata = Some(metadata.into()); self } /// Add a label, value pair for this tunnel. /// /// https://ngrok.com/docs/network-edge/edges/#tunnel-group pub fn label(&mut self, label: impl Into, value: impl Into) -> &mut Self { self.options.labels.insert(label.into(), value.into()); self } /// Sets the ForwardsTo string for this tunnel. This can be viewed via the /// API or dashboard. /// /// This overrides the default process info if using /// [TunnelBuilder::listen], and is in turn overridden by the url provided /// to [ForwarderBuilder::listen_and_forward]. /// /// https://ngrok.com/docs/api/resources/tunnels/#tunnel-fields pub fn forwards_to(&mut self, forwards_to: impl Into) -> &mut Self { self.options.common_opts.forwards_to = forwards_to.into().into(); self } /// Sets the L7 protocol string for this tunnel. pub fn app_protocol(&mut self, app_protocol: impl Into) -> &mut Self { self.options.common_opts.forwards_proto = Some(app_protocol.into()); self } /// Disables backend TLS certificate verification for forwards from this tunnel. pub fn verify_upstream_tls(&mut self, verify_upstream_tls: bool) -> &mut Self { self.options .common_opts .set_verify_upstream_tls(verify_upstream_tls); self } pub(crate) async fn for_forwarding_to(&mut self, to_url: &Url) -> &mut Self { self.options.common_opts.for_forwarding_to(to_url); self } } #[cfg(test)] mod test { use super::*; const METADATA: &str = "testmeta"; const LABEL_KEY: &str = "edge"; const LABEL_VAL: &str = "edghts_2IC6RJ6CQnuh7waciWyaGKc50Nt"; #[test] fn test_interface_to_proto() { // pass to a function accepting the trait to avoid // "creates a temporary which is freed while still in use" tunnel_test( &LabeledTunnelBuilder { session: None, options: Default::default(), } .metadata(METADATA) .label(LABEL_KEY, LABEL_VAL) .options, ); } fn tunnel_test(tunnel_cfg: &C) where C: TunnelConfig, { assert_eq!(default_forwards_to(), tunnel_cfg.forwards_to()); let extra = tunnel_cfg.extra(); assert_eq!(String::default(), *extra.token); assert_eq!(METADATA, extra.metadata); assert_eq!(String::default(), extra.ip_policy_ref); assert_eq!("", tunnel_cfg.proto()); assert!(tunnel_cfg.opts().is_none()); let mut labels: HashMap = HashMap::new(); labels.insert(LABEL_KEY.into(), LABEL_VAL.into()); assert_eq!(labels, tunnel_cfg.labels()); } } ================================================ FILE: ngrok/src/config/oauth.rs ================================================ use crate::internals::proto::{ Oauth, SecretString, }; /// Oauth Options configuration /// /// https://ngrok.com/docs/http/oauth/ #[derive(Clone, Default)] pub struct OauthOptions { /// The OAuth provider to use provider: String, /// The client ID, if a custom one is being used client_id: String, /// The client secret, if a custom one is being used client_secret: SecretString, /// Email addresses of users to authorize. allow_emails: Vec, /// Email domains of users to authorize. allow_domains: Vec, /// OAuth scopes to request from the provider. scopes: Vec, } impl OauthOptions { /// Create a new [OauthOptions] for the given provider. pub fn new(provider: impl Into) -> Self { OauthOptions { provider: provider.into(), ..Default::default() } } /// Provide an OAuth client ID for custom apps. pub fn client_id(&mut self, id: impl Into) -> &mut Self { self.client_id = id.into(); self } /// Provide an OAuth client secret for custom apps. pub fn client_secret(&mut self, secret: impl Into) -> &mut Self { self.client_secret = SecretString::from(secret.into()); self } /// Append an email address to the list of allowed emails. pub fn allow_email(&mut self, email: impl Into) -> &mut Self { self.allow_emails.push(email.into()); self } /// Append an email domain to the list of allowed domains. pub fn allow_domain(&mut self, domain: impl Into) -> &mut Self { self.allow_domains.push(domain.into()); self } /// Append a scope to the list of scopes to request. pub fn scope(&mut self, scope: impl Into) -> &mut Self { self.scopes.push(scope.into()); self } } // transform into the wire protocol format impl From for Oauth { fn from(o: OauthOptions) -> Self { Oauth { provider: o.provider, client_id: o.client_id, client_secret: o.client_secret, sealed_client_secret: Default::default(), // unused in this context allow_emails: o.allow_emails, allow_domains: o.allow_domains, scopes: o.scopes, } } } ================================================ FILE: ngrok/src/config/oidc.rs ================================================ use crate::internals::proto::{ Oidc, SecretString, }; /// Oidc Options configuration /// /// https://ngrok.com/docs/http/openid-connect/ #[derive(Clone, Default)] pub struct OidcOptions { issuer_url: String, client_id: String, client_secret: SecretString, allow_emails: Vec, allow_domains: Vec, scopes: Vec, } impl OidcOptions { /// Create a new [OidcOptions] with the given issuer and client information. pub fn new( issuer_url: impl Into, client_id: impl Into, client_secret: impl Into, ) -> Self { OidcOptions { issuer_url: issuer_url.into(), client_id: client_id.into(), client_secret: client_secret.into().into(), ..Default::default() } } /// Allow the oidc user with the given email to access the tunnel. pub fn allow_email(&mut self, email: impl Into) -> &mut Self { self.allow_emails.push(email.into()); self } /// Allow the oidc user with the given email domain to access the tunnel. pub fn allow_domain(&mut self, domain: impl Into) -> &mut Self { self.allow_domains.push(domain.into()); self } /// Request the given scope from the oidc provider. pub fn scope(&mut self, scope: impl Into) -> &mut Self { self.scopes.push(scope.into()); self } } // transform into the wire protocol format impl From for Oidc { fn from(o: OidcOptions) -> Self { Oidc { issuer_url: o.issuer_url, client_id: o.client_id, client_secret: o.client_secret, sealed_client_secret: Default::default(), // unused in this context allow_emails: o.allow_emails, allow_domains: o.allow_domains, scopes: o.scopes, } } } ================================================ FILE: ngrok/src/config/policies.rs ================================================ use std::{ fs::read_to_string, io, }; use serde::{ Deserialize, Serialize, }; use thiserror::Error; use crate::internals::proto; /// A policy that defines rules that should be applied to incoming or outgoing /// connections to the edge. #[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)] #[serde(default)] pub struct Policy { inbound: Vec, outbound: Vec, } /// A policy rule that should be applied #[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)] #[serde(default)] pub struct Rule { name: String, expressions: Vec, actions: Vec, } /// An action that should be taken if the rule matches #[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)] #[serde(default)] pub struct Action { #[serde(rename = "type")] type_: String, config: Option, } /// Errors in creating or serializing Policies #[derive(Debug, Error)] pub enum InvalidPolicy { /// Error representing an invalid string for a Policy #[error("failure to parse or generate policy")] SerializationError(#[from] serde_json::Error), /// An error loading a Policy from a file #[error("failure to read policy file '{}'", .1)] FileReadError(#[source] io::Error, String), } impl Policy { /// Create a new empty [Policy] struct pub fn new() -> Self { Policy { ..Default::default() } } /// Create a new [Policy] from a json string fn from_json(json: impl AsRef) -> Result { serde_json::from_str(json.as_ref()).map_err(InvalidPolicy::SerializationError) } /// Create a new [Policy] from a json file pub fn from_file(json_file_path: impl AsRef) -> Result { Policy::from_json( read_to_string(json_file_path.as_ref()).map_err(|e| { InvalidPolicy::FileReadError(e, json_file_path.as_ref().to_string()) })?, ) } /// Convert [Policy] to json string pub fn to_json(&self) -> Result { serde_json::to_string(&self).map_err(InvalidPolicy::SerializationError) } /// Add an inbound policy pub fn add_inbound(&mut self, rule: impl Into) -> &mut Self { self.inbound.push(rule.into()); self } /// Add an outbound policy pub fn add_outbound(&mut self, rule: impl Into) -> &mut Self { self.outbound.push(rule.into()); self } } impl TryFrom<&Policy> for Policy { type Error = InvalidPolicy; fn try_from(other: &Policy) -> Result { Ok(other.clone()) } } impl TryFrom> for Policy { type Error = InvalidPolicy; fn try_from(other: Result) -> Result { other } } impl TryFrom<&str> for Policy { type Error = InvalidPolicy; fn try_from(other: &str) -> Result { Policy::from_json(other) } } impl Rule { /// Create a new [Rule] pub fn new(name: impl Into) -> Self { Rule { name: name.into(), ..Default::default() } } /// Convert [Rule] to json string pub fn to_json(&self) -> Result { serde_json::to_string(&self).map_err(InvalidPolicy::SerializationError) } /// Add an expression pub fn add_expression(&mut self, expression: impl Into) -> &mut Self { self.expressions.push(expression.into()); self } /// Add an action pub fn add_action(&mut self, action: Action) -> &mut Self { self.actions.push(action); self } } impl From<&mut Rule> for Rule { fn from(other: &mut Rule) -> Self { other.to_owned() } } impl Action { /// Create a new [Action] pub fn new(type_: impl Into, config: Option<&str>) -> Result { Ok(Action { type_: type_.into(), config: config .map(|c| serde_json::from_str(c).map_err(InvalidPolicy::SerializationError)) .transpose()?, }) } /// Convert [Action] to json string pub fn to_json(&self) -> Result { serde_json::to_string(&self).map_err(InvalidPolicy::SerializationError) } } impl From for proto::PolicyWrapper { fn from(value: Policy) -> Self { proto::PolicyWrapper::Policy(value.into()) } } // transform into the wire protocol format impl From for proto::Policy { fn from(o: Policy) -> Self { proto::Policy { inbound: o.inbound.into_iter().map(|p| p.into()).collect(), outbound: o.outbound.into_iter().map(|p| p.into()).collect(), } } } impl From for proto::Rule { fn from(p: Rule) -> Self { proto::Rule { name: p.name, expressions: p.expressions, actions: p.actions.into_iter().map(|a| a.into()).collect(), } } } impl From for proto::Action { fn from(a: Action) -> Self { proto::Action { type_: a.type_, config: a .config .map(|c| c.to_string().into_bytes()) .unwrap_or_default(), } } } #[cfg(test)] pub(crate) mod test { use super::*; pub(crate) const POLICY_JSON: &str = r###" {"inbound": [ { "name": "test_in", "expressions": ["req.Method == 'PUT'"], "actions": [{"type": "deny"}] } ], "outbound": [ { "name": "test_out", "expressions": ["res.StatusCode == '200'"], "actions": [{"type": "custom-response", "config": {"status_code":201}}] } ]} "###; #[test] fn test_json_to_policy() { let policy: Policy = Policy::from_json(POLICY_JSON).unwrap(); assert_eq!(1, policy.inbound.len()); assert_eq!(1, policy.outbound.len()); let inbound = &policy.inbound[0]; let outbound = &policy.outbound[0]; assert_eq!("test_in", inbound.name); assert_eq!(1, inbound.expressions.len()); assert_eq!(1, inbound.actions.len()); assert_eq!("req.Method == 'PUT'", inbound.expressions[0]); assert_eq!("deny", inbound.actions[0].type_); assert_eq!(None, inbound.actions[0].config); assert_eq!("test_out", outbound.name); assert_eq!(1, outbound.expressions.len()); assert_eq!(1, outbound.actions.len()); assert_eq!("res.StatusCode == '200'", outbound.expressions[0]); assert_eq!("custom-response", outbound.actions[0].type_); assert_eq!( "{\"status_code\":201}", outbound.actions[0].config.as_ref().unwrap().to_string() ); } #[test] fn test_empty_json_to_policy() { let policy: Policy = Policy::from_json("{}").unwrap(); assert_eq!(0, policy.inbound.len()); assert_eq!(0, policy.outbound.len()); } #[test] fn test_policy_to_json() { let policy = Policy::from_json(POLICY_JSON).unwrap(); let json = policy.to_json().unwrap(); let policy2 = Policy::from_json(json).unwrap(); assert_eq!(policy, policy2); } #[test] fn test_policy_to_json_error() { let error = Policy::from_json("asdf").err().unwrap(); assert!(matches!(error, InvalidPolicy::SerializationError { .. })); } #[test] fn test_rule_to_json() { let policy = Policy::from_json(POLICY_JSON).unwrap(); let rule = &policy.outbound[0]; let json = rule.to_json().unwrap(); let parsed: serde_json::Value = serde_json::from_str(&json).unwrap(); let rule_map = parsed.as_object().unwrap(); assert_eq!("test_out", rule_map["name"]); // expressions let expressions = rule_map["expressions"].as_array().unwrap(); assert_eq!(1, expressions.len()); assert_eq!("res.StatusCode == '200'", expressions[0]); // actions let actions = rule_map["actions"].as_array().unwrap(); assert_eq!(1, actions.len()); assert_eq!("custom-response", actions[0]["type"]); assert_eq!(201, actions[0]["config"]["status_code"]); } #[test] fn test_action_to_json() { let policy = Policy::from_json(POLICY_JSON).unwrap(); let action = &policy.outbound[0].actions[0]; let json = action.to_json().unwrap(); let parsed: serde_json::Value = serde_json::from_str(&json).unwrap(); let action_map = parsed.as_object().unwrap(); assert_eq!("custom-response", action_map["type"]); assert_eq!(201, action_map["config"]["status_code"]); } #[test] fn test_builders() { let policy = Policy::from_json(POLICY_JSON).unwrap(); let policy2 = Policy::new() .add_inbound( Rule::new("test_in") .add_expression("req.Method == 'PUT'") .add_action(Action::new("deny", None).unwrap()), ) .add_outbound( Rule::new("test_out") .add_expression("res.StatusCode == '200'") // .add_action(Action::new("deny", "")) .add_action( Action::new("custom-response", Some("{\"status_code\":201}")).unwrap(), ), ) .to_owned(); assert_eq!(policy, policy2); } #[test] fn test_load_file() { let policy = Policy::from_json(POLICY_JSON).unwrap(); let policy2 = Policy::from_file("assets/policy.json").unwrap(); assert_eq!("test_in", policy2.inbound[0].name); assert_eq!("test_out", policy2.outbound[0].name); assert_eq!(policy, policy2); } #[test] fn test_load_inbound_file() { let policy = Policy::from_file("assets/policy-inbound.json").unwrap(); assert_eq!("test_in", policy.inbound[0].name); assert_eq!(0, policy.outbound.len()); } #[test] fn test_load_file_error() { let error = Policy::from_file("assets/absent.json").err().unwrap(); assert!(matches!(error, InvalidPolicy::FileReadError { .. })); } } ================================================ FILE: ngrok/src/config/tcp.rs ================================================ use std::{ collections::HashMap, convert::From, }; use url::Url; use super::{ common::ProxyProto, Policy, }; // These are used for doc comment links. #[allow(unused_imports)] use crate::config::{ ForwarderBuilder, TunnelBuilder, }; use crate::{ config::common::{ default_forwards_to, Binding, CommonOpts, TunnelConfig, }, internals::proto::{ self, BindExtra, BindOpts, }, tunnel::TcpTunnel, Session, }; /// The options for a TCP edge. #[derive(Default, Clone)] struct TcpOptions { pub(crate) common_opts: CommonOpts, pub(crate) remote_addr: Option, pub(crate) bindings: Vec, } impl TunnelConfig for TcpOptions { fn forwards_to(&self) -> String { self.common_opts .forwards_to .clone() .unwrap_or(default_forwards_to().into()) } fn extra(&self) -> BindExtra { BindExtra { token: Default::default(), ip_policy_ref: Default::default(), metadata: self.common_opts.metadata.clone().unwrap_or_default(), bindings: self.bindings.clone(), pooling_enabled: self.common_opts.pooling_enabled.unwrap_or(false), } } fn proto(&self) -> String { "tcp".into() } fn forwards_proto(&self) -> String { // not supported String::new() } fn verify_upstream_tls(&self) -> bool { self.common_opts.verify_upstream_tls() } fn opts(&self) -> Option { // fill out all the options, translating to proto here let mut tcp_endpoint = proto::TcpEndpoint::default(); if let Some(remote_addr) = self.remote_addr.as_ref() { tcp_endpoint.addr = remote_addr.clone(); } tcp_endpoint.proxy_proto = self.common_opts.proxy_proto; tcp_endpoint.ip_restriction = self.common_opts.ip_restriction(); tcp_endpoint.traffic_policy = if self.common_opts.traffic_policy.is_some() { self.common_opts.traffic_policy.clone().map(From::from) } else if self.common_opts.policy.is_some() { self.common_opts.policy.clone().map(From::from) } else { None }; Some(BindOpts::Tcp(tcp_endpoint)) } fn labels(&self) -> HashMap { HashMap::new() } } impl_builder! { /// A builder for a tunnel backing a TCP endpoint. /// /// https://ngrok.com/docs/tcp/ TcpTunnelBuilder, TcpOptions, TcpTunnel, endpoint } /// The options for a TCP edge. impl TcpTunnelBuilder { /// Add the provided CIDR to the allowlist. /// /// https://ngrok.com/docs/tcp/ip-restrictions/ pub fn allow_cidr(&mut self, cidr: impl Into) -> &mut Self { self.options.common_opts.cidr_restrictions.allow(cidr); self } /// Add the provided CIDR to the denylist. /// /// https://ngrok.com/docs/tcp/ip-restrictions/ pub fn deny_cidr(&mut self, cidr: impl Into) -> &mut Self { self.options.common_opts.cidr_restrictions.deny(cidr); self } /// Sets the PROXY protocol version for connections over this tunnel. pub fn proxy_proto(&mut self, proxy_proto: ProxyProto) -> &mut Self { self.options.common_opts.proxy_proto = proxy_proto; self } /// Sets the opaque metadata string for this tunnel. /// /// https://ngrok.com/docs/api/resources/tunnels/#tunnel-fields pub fn metadata(&mut self, metadata: impl Into) -> &mut Self { self.options.common_opts.metadata = Some(metadata.into()); self } /// Sets the ingress configuration for this endpoint. /// /// Valid binding values are: /// - `"public"` - Publicly accessible endpoint /// - `"internal"` - Internal-only endpoint /// - `"kubernetes"` - Kubernetes cluster binding /// /// If not specified, the ngrok service will use its default binding configuration. /// /// # Panics /// /// Panics if called more than once or if an invalid binding value is provided. /// /// # Examples /// /// ```no_run /// # use ngrok::Session; /// # use ngrok::config::TunnelBuilder; /// # async fn example() -> Result<(), Box> { /// let session = Session::builder().authtoken_from_env().connect().await?; /// /// // Using string /// let tunnel = session.tcp_endpoint().binding("internal").listen().await?; /// /// // Using typed enum /// use ngrok::config::Binding; /// let tunnel = session.tcp_endpoint().binding(Binding::Public).listen().await?; /// # Ok(()) /// # } /// ``` pub fn binding(&mut self, binding: impl Into) -> &mut Self { if !self.options.bindings.is_empty() { panic!("binding() can only be called once"); } let binding_str = binding.into(); if let Err(e) = Binding::validate(&binding_str) { panic!("{}", e); } self.options.bindings.push(binding_str); self } /// Sets the ForwardsTo string for this tunnel. This can be viewed via the /// API or dashboard. /// /// This overrides the default process info if using /// [TunnelBuilder::listen], and is in turn overridden by the url provided /// to [ForwarderBuilder::listen_and_forward]. /// /// https://ngrok.com/docs/api/resources/tunnels/#tunnel-fields pub fn forwards_to(&mut self, forwards_to: impl Into) -> &mut Self { self.options.common_opts.forwards_to = Some(forwards_to.into()); self } /// Disables backend TLS certificate verification for forwards from this tunnel. pub fn verify_upstream_tls(&mut self, verify_upstream_tls: bool) -> &mut Self { self.options .common_opts .set_verify_upstream_tls(verify_upstream_tls); self } /// Sets the TCP address to request for this edge. /// /// https://ngrok.com/docs/network-edge/domains-and-tcp-addresses/#tcp-addresses pub fn remote_addr(&mut self, remote_addr: impl Into) -> &mut Self { self.options.remote_addr = Some(remote_addr.into()); self } /// DEPRECATED: use traffic_policy instead. pub fn policy(&mut self, s: S) -> Result<&mut Self, S::Error> where S: TryInto, { self.options.common_opts.policy = Some(s.try_into()?); Ok(self) } /// Set policy for this edge. pub fn traffic_policy(&mut self, policy_str: impl Into) -> &mut Self { self.options.common_opts.traffic_policy = Some(policy_str.into()); self } pub(crate) async fn for_forwarding_to(&mut self, to_url: &Url) -> &mut Self { self.options.common_opts.for_forwarding_to(to_url); self } /// Allows the endpoint to pool with other endpoints with the same host/port/binding pub fn pooling_enabled(&mut self, pooling_enabled: impl Into) -> &mut Self { self.options.common_opts.pooling_enabled = Some(pooling_enabled.into()); self } } #[cfg(test)] mod test { use super::*; use crate::config::policies::test::POLICY_JSON; const METADATA: &str = "testmeta"; const TEST_FORWARD: &str = "testforward"; const REMOTE_ADDR: &str = "4.tcp.ngrok.io:1337"; const ALLOW_CIDR: &str = "0.0.0.0/0"; const DENY_CIDR: &str = "10.1.1.1/32"; #[test] fn test_interface_to_proto() { // pass to a function accepting the trait to avoid // "creates a temporary which is freed while still in use" tunnel_test( &TcpTunnelBuilder { session: None, options: Default::default(), } .allow_cidr(ALLOW_CIDR) .deny_cidr(DENY_CIDR) .proxy_proto(ProxyProto::V2) .metadata(METADATA) .remote_addr(REMOTE_ADDR) .forwards_to(TEST_FORWARD) .policy(POLICY_JSON) .unwrap() .options, ); } fn tunnel_test(tunnel_cfg: &C) where C: TunnelConfig, { assert_eq!(TEST_FORWARD, tunnel_cfg.forwards_to()); let extra = tunnel_cfg.extra(); assert_eq!(String::default(), *extra.token); assert_eq!(METADATA, extra.metadata); assert_eq!(Vec::::new(), extra.bindings); assert_eq!(String::default(), extra.ip_policy_ref); assert_eq!("tcp", tunnel_cfg.proto()); let opts = tunnel_cfg.opts().unwrap(); assert!(matches!(opts, BindOpts::Tcp { .. })); if let BindOpts::Tcp(endpoint) = opts { assert_eq!(REMOTE_ADDR, endpoint.addr); assert!(matches!(endpoint.proxy_proto, ProxyProto::V2)); let ip_restriction = endpoint.ip_restriction.unwrap(); assert_eq!(Vec::from([ALLOW_CIDR]), ip_restriction.allow_cidrs); assert_eq!(Vec::from([DENY_CIDR]), ip_restriction.deny_cidrs); } assert_eq!(HashMap::new(), tunnel_cfg.labels()); } #[test] fn test_binding_valid_values() { let mut builder = TcpTunnelBuilder { session: None, options: Default::default(), }; // Test "public" builder.binding("public"); assert_eq!(vec!["public"], builder.options.bindings); // Test "internal" let mut builder = TcpTunnelBuilder { session: None, options: Default::default(), }; builder.binding("internal"); assert_eq!(vec!["internal"], builder.options.bindings); // Test "kubernetes" let mut builder = TcpTunnelBuilder { session: None, options: Default::default(), }; builder.binding("kubernetes"); assert_eq!(vec!["kubernetes"], builder.options.bindings); // Test with Binding enum let mut builder = TcpTunnelBuilder { session: None, options: Default::default(), }; builder.binding(Binding::Public); assert_eq!(vec!["public"], builder.options.bindings); } #[test] #[should_panic(expected = "Invalid binding value")] fn test_binding_invalid_value() { let mut builder = TcpTunnelBuilder { session: None, options: Default::default(), }; builder.binding("invalid"); } #[test] #[should_panic(expected = "binding() can only be called once")] fn test_binding_called_twice() { let mut builder = TcpTunnelBuilder { session: None, options: Default::default(), }; builder.binding("public"); builder.binding("internal"); } } ================================================ FILE: ngrok/src/config/tls.rs ================================================ use std::collections::HashMap; use bytes::Bytes; use url::Url; use super::{ common::ProxyProto, Policy, }; // These are used for doc comment links. #[allow(unused_imports)] use crate::config::{ ForwarderBuilder, TunnelBuilder, }; use crate::{ config::common::{ default_forwards_to, Binding, CommonOpts, TunnelConfig, }, internals::proto::{ self, BindExtra, BindOpts, TlsTermination, }, tunnel::TlsTunnel, Session, }; /// The options for TLS edges. #[derive(Default, Clone)] struct TlsOptions { pub(crate) common_opts: CommonOpts, pub(crate) domain: Option, pub(crate) mutual_tlsca: Vec, pub(crate) key_pem: Option, pub(crate) cert_pem: Option, pub(crate) bindings: Vec, } impl TunnelConfig for TlsOptions { fn forwards_to(&self) -> String { self.common_opts .forwards_to .clone() .unwrap_or(default_forwards_to().into()) } fn forwards_proto(&self) -> String { // not supported String::new() } fn verify_upstream_tls(&self) -> bool { self.common_opts.verify_upstream_tls() } fn extra(&self) -> BindExtra { BindExtra { token: Default::default(), ip_policy_ref: Default::default(), metadata: self.common_opts.metadata.clone().unwrap_or_default(), bindings: self.bindings.clone(), pooling_enabled: self.common_opts.pooling_enabled.unwrap_or(false), } } fn proto(&self) -> String { "tls".into() } fn opts(&self) -> Option { // fill out all the options, translating to proto here let mut tls_endpoint = proto::TlsEndpoint::default(); if let Some(domain) = self.domain.as_ref() { tls_endpoint.domain = domain.clone(); } tls_endpoint.proxy_proto = self.common_opts.proxy_proto; // doing some backflips to check both cert_pem and key_pem are set, and avoid unwrapping let tls_termination = self .cert_pem .as_ref() .zip(self.key_pem.as_ref()) .map(|(c, k)| TlsTermination { cert: c.to_vec(), key: k.to_vec().into(), sealed_key: Vec::new(), }); tls_endpoint.ip_restriction = self.common_opts.ip_restriction(); tls_endpoint.mutual_tls_at_edge = (!self.mutual_tlsca.is_empty()).then_some(self.mutual_tlsca.as_slice().into()); tls_endpoint.tls_termination = tls_termination; tls_endpoint.traffic_policy = if self.common_opts.traffic_policy.is_some() { self.common_opts.traffic_policy.clone().map(From::from) } else if self.common_opts.policy.is_some() { self.common_opts.policy.clone().map(From::from) } else { None }; Some(BindOpts::Tls(tls_endpoint)) } fn labels(&self) -> HashMap { HashMap::new() } } impl_builder! { /// A builder for a tunnel backing a TCP endpoint. /// /// https://ngrok.com/docs/tls/ TlsTunnelBuilder, TlsOptions, TlsTunnel, endpoint } impl TlsTunnelBuilder { /// Add the provided CIDR to the allowlist. /// /// https://ngrok.com/docs/tls/ip-restrictions/ pub fn allow_cidr(&mut self, cidr: impl Into) -> &mut Self { self.options.common_opts.cidr_restrictions.allow(cidr); self } /// Add the provided CIDR to the denylist. /// /// https://ngrok.com/docs/tls/ip-restrictions/ pub fn deny_cidr(&mut self, cidr: impl Into) -> &mut Self { self.options.common_opts.cidr_restrictions.deny(cidr); self } /// Sets the PROXY protocol version for connections over this tunnel. pub fn proxy_proto(&mut self, proxy_proto: ProxyProto) -> &mut Self { self.options.common_opts.proxy_proto = proxy_proto; self } /// Sets the opaque metadata string for this tunnel. /// /// https://ngrok.com/docs/api/resources/tunnels/#tunnel-fields pub fn metadata(&mut self, metadata: impl Into) -> &mut Self { self.options.common_opts.metadata = Some(metadata.into()); self } /// Sets the ingress configuration for this endpoint. /// /// Valid binding values are: /// - `"public"` - Publicly accessible endpoint /// - `"internal"` - Internal-only endpoint /// - `"kubernetes"` - Kubernetes cluster binding /// /// If not specified, the ngrok service will use its default binding configuration. /// /// # Panics /// /// Panics if called more than once or if an invalid binding value is provided. /// /// # Examples /// /// ```no_run /// # use ngrok::Session; /// # use ngrok::config::TunnelBuilder; /// # async fn example() -> Result<(), Box> { /// let session = Session::builder().authtoken_from_env().connect().await?; /// /// // Using string /// let tunnel = session.tls_endpoint().binding("internal").listen().await?; /// /// // Using typed enum /// use ngrok::config::Binding; /// let tunnel = session.tls_endpoint().binding(Binding::Public).listen().await?; /// # Ok(()) /// # } /// ``` pub fn binding(&mut self, binding: impl Into) -> &mut Self { if !self.options.bindings.is_empty() { panic!("binding() can only be called once"); } let binding_str = binding.into(); if let Err(e) = Binding::validate(&binding_str) { panic!("{}", e); } self.options.bindings.push(binding_str); self } /// Sets the ForwardsTo string for this tunnel. This can be viewed via the /// API or dashboard. /// /// This overrides the default process info if using /// [TunnelBuilder::listen], and is in turn overridden by the url provided /// to [ForwarderBuilder::listen_and_forward]. /// /// https://ngrok.com/docs/api/resources/tunnels/#tunnel-fields pub fn forwards_to(&mut self, forwards_to: impl Into) -> &mut Self { self.options.common_opts.forwards_to = Some(forwards_to.into()); self } /// Disables backend TLS certificate verification for forwards from this tunnel. pub fn verify_upstream_tls(&mut self, verify_upstream_tls: bool) -> &mut Self { self.options .common_opts .set_verify_upstream_tls(verify_upstream_tls); self } /// Sets the domain to request for this edge. /// /// https://ngrok.com/docs/network-edge/domains-and-tcp-addresses/#domains pub fn domain(&mut self, domain: impl Into) -> &mut Self { self.options.domain = Some(domain.into()); self } /// Adds a certificate in PEM format to use for mutual TLS authentication. /// /// These will be used to authenticate client certificates for requests at /// the ngrok edge. /// /// https://ngrok.com/docs/tls/mutual-tls/ pub fn mutual_tlsca(&mut self, mutual_tlsca: Bytes) -> &mut Self { self.options.mutual_tlsca.push(mutual_tlsca); self } /// Sets the key and certificate in PEM format for TLS termination at the /// ngrok edge. /// /// https://ngrok.com/docs/tls/tls-termination/ pub fn termination(&mut self, cert_pem: Bytes, key_pem: Bytes) -> &mut Self { self.options.key_pem = Some(key_pem); self.options.cert_pem = Some(cert_pem); self } /// DEPRECATED: use traffic_policy instead. pub fn policy(&mut self, s: S) -> Result<&mut Self, S::Error> where S: TryInto, { self.options.common_opts.policy = Some(s.try_into()?); Ok(self) } /// Set policy for this edge. pub fn traffic_policy(&mut self, policy_str: impl Into) -> &mut Self { self.options.common_opts.traffic_policy = Some(policy_str.into()); self } pub(crate) async fn for_forwarding_to(&mut self, to_url: &Url) -> &mut Self { self.options.common_opts.for_forwarding_to(to_url); self } /// Allows the endpoint to pool with other endpoints with the same host/port/binding pub fn pooling_enabled(&mut self, pooling_enabled: impl Into) -> &mut Self { self.options.common_opts.pooling_enabled = Some(pooling_enabled.into()); self } } #[cfg(test)] mod test { use super::*; use crate::config::policies::test::POLICY_JSON; const METADATA: &str = "testmeta"; const TEST_FORWARD: &str = "testforward"; const ALLOW_CIDR: &str = "0.0.0.0/0"; const DENY_CIDR: &str = "10.1.1.1/32"; const CA_CERT: &[u8] = "test ca cert".as_bytes(); const CA_CERT2: &[u8] = "test ca cert2".as_bytes(); const KEY: &[u8] = "test cert".as_bytes(); const CERT: &[u8] = "test cert".as_bytes(); const DOMAIN: &str = "test domain"; #[test] fn test_interface_to_proto() { // pass to a function accepting the trait to avoid // "creates a temporary which is freed while still in use" tunnel_test( &TlsTunnelBuilder { session: None, options: Default::default(), } .allow_cidr(ALLOW_CIDR) .deny_cidr(DENY_CIDR) .proxy_proto(ProxyProto::V2) .metadata(METADATA) .domain(DOMAIN) .mutual_tlsca(CA_CERT.into()) .mutual_tlsca(CA_CERT2.into()) .termination(CERT.into(), KEY.into()) .forwards_to(TEST_FORWARD) .policy(POLICY_JSON) .unwrap() .options, ); } fn tunnel_test(tunnel_cfg: C) where C: TunnelConfig, { assert_eq!(TEST_FORWARD, tunnel_cfg.forwards_to()); let extra = tunnel_cfg.extra(); assert_eq!(String::default(), *extra.token); assert_eq!(METADATA, extra.metadata); assert_eq!(Vec::::new(), extra.bindings); assert_eq!(String::default(), extra.ip_policy_ref); assert_eq!("tls", tunnel_cfg.proto()); let opts = tunnel_cfg.opts().unwrap(); assert!(matches!(opts, BindOpts::Tls { .. })); if let BindOpts::Tls(endpoint) = opts { assert_eq!(DOMAIN, endpoint.domain); assert_eq!(String::default(), endpoint.subdomain); assert!(matches!(endpoint.proxy_proto, ProxyProto::V2)); assert!(!endpoint.mutual_tls_at_agent); let ip_restriction = endpoint.ip_restriction.unwrap(); assert_eq!(Vec::from([ALLOW_CIDR]), ip_restriction.allow_cidrs); assert_eq!(Vec::from([DENY_CIDR]), ip_restriction.deny_cidrs); let tls_termination = endpoint.tls_termination.unwrap(); assert_eq!(CERT, tls_termination.cert); assert_eq!(KEY, *tls_termination.key); assert!(tls_termination.sealed_key.is_empty()); let mutual_tls = endpoint.mutual_tls_at_edge.unwrap(); let mut agg = CA_CERT.to_vec(); agg.extend(CA_CERT2.to_vec()); assert_eq!(agg, mutual_tls.mutual_tls_ca); } assert_eq!(HashMap::new(), tunnel_cfg.labels()); } #[test] fn test_binding_valid_values() { let mut builder = TlsTunnelBuilder { session: None, options: Default::default(), }; // Test "public" builder.binding("public"); assert_eq!(vec!["public"], builder.options.bindings); // Test "internal" let mut builder = TlsTunnelBuilder { session: None, options: Default::default(), }; builder.binding("internal"); assert_eq!(vec!["internal"], builder.options.bindings); // Test "kubernetes" let mut builder = TlsTunnelBuilder { session: None, options: Default::default(), }; builder.binding("kubernetes"); assert_eq!(vec!["kubernetes"], builder.options.bindings); // Test with Binding enum let mut builder = TlsTunnelBuilder { session: None, options: Default::default(), }; builder.binding(Binding::Kubernetes); assert_eq!(vec!["kubernetes"], builder.options.bindings); } #[test] #[should_panic(expected = "Invalid binding value")] fn test_binding_invalid_value() { let mut builder = TlsTunnelBuilder { session: None, options: Default::default(), }; builder.binding("invalid"); } #[test] #[should_panic(expected = "binding() can only be called once")] fn test_binding_called_twice() { let mut builder = TlsTunnelBuilder { session: None, options: Default::default(), }; builder.binding("public"); builder.binding("internal"); } } ================================================ FILE: ngrok/src/config/webhook_verification.rs ================================================ use crate::internals::proto::{ SecretString, WebhookVerification as WebhookProto, }; /// Configuration for webhook verification. #[derive(Clone)] pub(crate) struct WebhookVerification { /// The webhook provider pub(crate) provider: String, /// The secret for verifying webhooks from this provider. pub(crate) secret: SecretString, } impl WebhookVerification {} // transform into the wire protocol format impl From for WebhookProto { fn from(wv: WebhookVerification) -> Self { WebhookProto { provider: wv.provider, secret: wv.secret, sealed_secret: vec![], // unused in this context } } } ================================================ FILE: ngrok/src/conn.rs ================================================ use std::{ net::SocketAddr, pin::Pin, task::{ Context, Poll, }, }; // Support for axum's connection info trait. #[cfg(feature = "axum")] use axum::extract::connect_info::Connected; #[cfg(feature = "hyper")] use hyper::rt::{ Read as HyperRead, Write as HyperWrite, }; use muxado::typed::TypedStream; use tokio::io::{ AsyncRead, AsyncWrite, }; use crate::{ config::ProxyProto, internals::proto::{ EdgeType, ProxyHeader, }, }; /// A connection from an ngrok tunnel. /// /// This implements [AsyncRead]/[AsyncWrite], as well as providing access to the /// address from which the connection to the ngrok edge originated. pub(crate) struct ConnInner { pub(crate) info: Info, pub(crate) stream: TypedStream, } #[derive(Clone)] pub(crate) struct Info { pub(crate) header: ProxyHeader, pub(crate) remote_addr: SocketAddr, pub(crate) proxy_proto: ProxyProto, pub(crate) app_protocol: Option, pub(crate) verify_upstream_tls: bool, } impl ConnInfo for Info { fn remote_addr(&self) -> SocketAddr { self.remote_addr } } impl EdgeConnInfo for Info { fn edge_type(&self) -> EdgeType { self.header.edge_type } fn passthrough_tls(&self) -> bool { self.header.passthrough_tls } } impl EndpointConnInfo for Info { fn proto(&self) -> &str { self.header.proto.as_str() } } // This codgen indirect is required to make the hyper io trait bounds // dependent on the hyper feature. You can't put a #[cfg] on a single bound, so // we're putting the whole trait def in a macro. Gross, but gets the job done. macro_rules! conn_trait { ($($hyper_bound:tt)*) => { /// An incoming connection over an ngrok tunnel. /// Effectively a trait alias for async read+write, plus connection info. pub trait Conn: ConnInfo + AsyncRead + AsyncWrite $($hyper_bound)* + Unpin + Send + 'static {} } } #[cfg(not(feature = "hyper"))] conn_trait!(); #[cfg(feature = "hyper")] conn_trait! { + hyper::rt::Read + hyper::rt::Write } /// Information common to all ngrok connections. pub trait ConnInfo { /// Returns the client address that initiated the connection to the ngrok /// edge. fn remote_addr(&self) -> SocketAddr; } /// Information about connections via ngrok edges. pub trait EdgeConnInfo { /// Returns the edge type for this connection. fn edge_type(&self) -> EdgeType; /// Returns whether the connection includes the tls handshake and encrypted /// stream. fn passthrough_tls(&self) -> bool; } /// Information about connections via ngrok endpoints. pub trait EndpointConnInfo { /// Returns the endpoint protocol. fn proto(&self) -> &str; } macro_rules! make_conn_type { (info EdgeConnInfo, $wrapper:tt) => { impl EdgeConnInfo for $wrapper { fn edge_type(&self) -> EdgeType { self.inner.info.edge_type() } fn passthrough_tls(&self) -> bool { self.inner.info.passthrough_tls() } } }; (info EndpointConnInfo, $wrapper:tt) => { impl EndpointConnInfo for $wrapper { fn proto(&self) -> &str { self.inner.info.proto() } } }; ($(#[$outer:meta])* $wrapper:ident, $($m:tt),*) => { $(#[$outer])* pub struct $wrapper { pub(crate) inner: ConnInner, } impl Conn for $wrapper {} impl ConnInfo for $wrapper { fn remote_addr(&self) -> SocketAddr { self.inner.info.remote_addr() } } impl AsyncRead for $wrapper { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut tokio::io::ReadBuf<'_>, ) -> Poll> { Pin::new(&mut *self.inner.stream).poll_read(cx, buf) } } #[cfg(feature = "hyper")] impl HyperRead for $wrapper { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, mut buf: hyper::rt::ReadBufCursor<'_>, ) -> Poll> { let mut tokio_buf = tokio::io::ReadBuf::uninit(unsafe{ buf.as_mut() }); let res = std::task::ready!(Pin::new(&mut *self.inner.stream).poll_read(cx, &mut tokio_buf)); let filled = tokio_buf.filled().len(); unsafe { buf.advance(filled) }; Poll::Ready(res) } } #[cfg(feature = "hyper")] impl HyperWrite for $wrapper { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { Pin::new(&mut *self.inner.stream).poll_write(cx, buf) } fn poll_flush( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { Pin::new(&mut *self.inner.stream).poll_flush(cx) } fn poll_shutdown( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { Pin::new(&mut *self.inner.stream).poll_shutdown(cx) } } impl AsyncWrite for $wrapper { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { Pin::new(&mut *self.inner.stream).poll_write(cx, buf) } fn poll_flush( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { Pin::new(&mut *self.inner.stream).poll_flush(cx) } fn poll_shutdown( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { Pin::new(&mut *self.inner.stream).poll_shutdown(cx) } } #[cfg_attr(docsrs, doc(cfg(feature = "axum")))] #[cfg(feature = "axum")] impl Connected<&$wrapper> for SocketAddr { fn connect_info(target: &$wrapper) -> Self { target.inner.info.remote_addr() } } $( make_conn_type!(info $m, $wrapper); )* }; } make_conn_type! { /// A connection via an ngrok Edge. EdgeConn, EdgeConnInfo } make_conn_type! { /// A connection via an ngrok Endpoint. EndpointConn, EndpointConnInfo } ================================================ FILE: ngrok/src/forwarder.rs ================================================ use std::{ collections::HashMap, error::Error as StdError, }; use async_trait::async_trait; use tokio::task::JoinHandle; use url::Url; use crate::{ prelude::{ EdgeInfo, EndpointInfo, TunnelCloser, TunnelInfo, }, session::RpcError, Tunnel, }; /// An ngrok forwarder. /// /// Represents a tunnel that is being forwarded to a URL. pub struct Forwarder { pub(crate) join: JoinHandle>>, pub(crate) inner: T, } impl Forwarder { /// Wait for the forwarding task to exit. pub fn join(&mut self) -> &mut JoinHandle>> { &mut self.join } } #[async_trait] impl TunnelCloser for Forwarder where T: TunnelCloser + Send, { async fn close(&mut self) -> Result<(), RpcError> { self.inner.close().await } } impl TunnelInfo for Forwarder where T: TunnelInfo, { fn id(&self) -> &str { self.inner.id() } fn forwards_to(&self) -> &str { self.inner.forwards_to() } fn metadata(&self) -> &str { self.inner.metadata() } } impl EndpointInfo for Forwarder where T: EndpointInfo, { fn proto(&self) -> &str { self.inner.proto() } fn url(&self) -> &str { self.inner.url() } } impl EdgeInfo for Forwarder where T: EdgeInfo, { fn labels(&self) -> &HashMap { self.inner.labels() } } pub(crate) fn forward(mut listener: T, info: T, to_url: Url) -> Result, RpcError> where T: Tunnel + Send + 'static, ::Conn: crate::tunnel_ext::ConnExt, { let handle = tokio::spawn( async move { Ok(crate::tunnel_ext::forward_tunnel(&mut listener, to_url).await?) }, ); Ok(Forwarder { join: handle, inner: info, }) } ================================================ FILE: ngrok/src/internals/proto.rs ================================================ use std::{ collections::HashMap, error, fmt, io, ops::{ Deref, DerefMut, }, str::FromStr, string::FromUtf8Error, sync::Arc, }; use muxado::typed::StreamType; use serde::{ de::{ DeserializeOwned, Visitor, }, Deserialize, Serialize, Serializer, }; use thiserror::Error; use tokio::io::{ AsyncRead, AsyncReadExt, }; use tracing::debug; pub const AUTH_REQ: StreamType = StreamType::clamp(0); pub const BIND_REQ: StreamType = StreamType::clamp(1); pub const UNBIND_REQ: StreamType = StreamType::clamp(2); pub const PROXY_REQ: StreamType = StreamType::clamp(3); pub const RESTART_REQ: StreamType = StreamType::clamp(4); pub const STOP_REQ: StreamType = StreamType::clamp(5); pub const UPDATE_REQ: StreamType = StreamType::clamp(6); pub const BIND_LABELED_REQ: StreamType = StreamType::clamp(7); pub const STOP_TUNNEL_REQ: StreamType = StreamType::clamp(9); pub const VERSION: &[&str] = &["3", "2"]; // integers in priority order /// An error that may have an ngrok error code. /// All ngrok error codes are documented at https://ngrok.com/docs/errors pub trait Error: error::Error { /// Return the ngrok error code, if one exists for this error. fn error_code(&self) -> Option<&str> { None } /// Return the error message minus the ngrok error code. /// If this error has no error code, this is equivalent to /// `format!("{error}")`. fn msg(&self) -> String { format!("{self}") } } impl Error for Box where E: Error, { fn error_code(&self) -> Option<&str> { ::error_code(self) } fn msg(&self) -> String { ::msg(self) } } impl Error for Arc where E: Error, { fn error_code(&self) -> Option<&str> { ::error_code(self) } fn msg(&self) -> String { ::msg(self) } } impl Error for &E where E: Error, { fn error_code(&self) -> Option<&str> { ::error_code(self) } fn msg(&self) -> String { ::msg(self) } } #[derive(Serialize, Deserialize, Debug, Clone, Default)] pub struct ErrResp { pub msg: String, pub error_code: Option, } impl<'a> From<&'a str> for ErrResp { fn from(value: &'a str) -> Self { let mut error_code = None; let mut msg_lines = vec![]; for line in value.lines().filter(|l| !l.is_empty()) { if line.starts_with("ERR_NGROK_") { error_code = Some(line.trim().into()); } else { msg_lines.push(line); } } ErrResp { error_code, msg: msg_lines.join("\n"), } } } impl error::Error for ErrResp {} const ERR_URL: &str = "https://ngrok.com/docs/errors"; impl fmt::Display for ErrResp { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.msg.fmt(f)?; if let Some(code) = self.error_code.as_ref().map(|s| s.to_lowercase()) { write!(f, "\n\n{ERR_URL}/{code}")?; } Ok(()) } } impl Error for ErrResp { fn error_code(&self) -> Option<&str> { self.error_code.as_deref() } fn msg(&self) -> String { self.msg.clone() } } #[derive(Serialize, Deserialize, Debug, Clone, Default)] #[serde(rename_all = "PascalCase")] pub struct Auth { pub version: Vec, // protocol versions supported, ordered by preference pub client_id: String, // empty for new sessions pub extra: AuthExtra, // clients may add whatever data the like to auth messages } #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize, Default)] #[serde(transparent)] pub struct SecretBytes(#[serde(with = "base64bytes")] Vec); impl Deref for SecretBytes { type Target = Vec; fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for SecretBytes { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl<'a> From<&'a [u8]> for SecretBytes { fn from(other: &'a [u8]) -> Self { SecretBytes(other.into()) } } impl From> for SecretBytes { fn from(other: Vec) -> Self { SecretBytes(other) } } impl fmt::Display for SecretBytes { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "********") } } impl fmt::Debug for SecretBytes { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "********") } } #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize, Default)] #[serde(transparent)] pub struct SecretString(String); impl Deref for SecretString { type Target = String; fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for SecretString { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl<'a> From<&'a str> for SecretString { fn from(other: &'a str) -> Self { SecretString(other.into()) } } impl From for SecretString { fn from(other: String) -> Self { SecretString(other) } } impl fmt::Display for SecretString { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "********") } } impl fmt::Debug for SecretString { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "********") } } #[derive(Serialize, Deserialize, Debug, Clone, Default)] #[serde(rename_all = "PascalCase")] pub struct AuthExtra { #[serde(rename = "OS")] pub os: String, pub arch: String, pub auth_token: SecretString, pub version: String, pub hostname: String, pub user_agent: String, pub metadata: String, pub cookie: SecretString, pub heartbeat_interval: i64, pub heartbeat_tolerance: i64, // for each remote operation, these variables define whether the ngrok // client is capable of executing that operation. each capability // is transmitted as a pointer to String, with the following meanings: // // null -> operation disallow beause the ngrok agent version is too old. // this is true because older clients will never set this value // // "" (empty String) -> the operation is supported // // non-empty String -> the operation is not supported and this value is the user-facing // error message describing why it is not supported pub update_unsupported_error: Option, pub stop_unsupported_error: Option, pub restart_unsupported_error: Option, pub proxy_type: String, #[serde(rename = "MutualTLS")] pub mutual_tls: bool, pub service_run: bool, pub config_version: String, pub custom_interface: bool, #[serde(rename = "CustomCAs")] pub custom_cas: bool, pub client_type: String, } #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "PascalCase")] pub struct AuthResp { pub version: String, pub client_id: String, #[serde(default)] pub extra: AuthRespExtra, } rpc_req!(Auth, AuthResp, AUTH_REQ); #[derive(Serialize, Deserialize, Debug, Clone, Default)] #[serde(rename_all = "PascalCase")] pub struct AuthRespExtra { pub version: Option, pub region: Option, pub cookie: Option, pub account_name: Option, pub session_duration: Option, pub plan_name: Option, pub banner: Option, } #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "PascalCase")] pub struct Bind { #[serde(rename = "Id")] pub client_id: String, pub proto: String, pub forwards_to: String, pub forwards_proto: String, pub opts: T, pub extra: BindExtra, } #[derive(Debug, Clone)] // allowing this since these aren't persistent values. #[allow(clippy::large_enum_variant)] pub enum BindOpts { Http(HttpEndpoint), Tcp(TcpEndpoint), Tls(TlsEndpoint), } #[derive(Serialize, Deserialize, Debug, Clone, Default)] #[serde(rename_all = "PascalCase")] pub struct BindExtra { pub token: SecretString, #[serde(rename = "IPPolicyRef")] pub ip_policy_ref: String, pub metadata: String, pub bindings: Vec, #[serde(rename = "PoolingEnabled")] pub pooling_enabled: bool, } #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "PascalCase")] pub struct BindResp { #[serde(rename = "Id")] pub client_id: String, #[serde(rename = "URL")] pub url: String, pub proto: String, #[serde(rename = "Opts")] pub bind_opts: T, pub extra: BindRespExtra, } #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "PascalCase")] pub struct BindRespExtra { pub token: SecretString, } rpc_req!(Bind, BindResp, BIND_REQ; T: std::fmt::Debug + Serialize + DeserializeOwned + Clone); #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "PascalCase")] pub struct StartTunnelWithLabel { pub labels: HashMap, pub forwards_to: String, pub forwards_proto: String, pub metadata: String, } #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "PascalCase")] pub struct StartTunnelWithLabelResp { pub id: String, } rpc_req!( StartTunnelWithLabel, StartTunnelWithLabelResp, BIND_LABELED_REQ ); #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "PascalCase")] pub struct Unbind { #[serde(rename = "Id")] pub client_id: String, // extra: not sure what this field actually contains } #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "PascalCase")] pub struct UnbindResp { // extra: not sure what this field actually contains } rpc_req!(Unbind, UnbindResp, UNBIND_REQ); #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "PascalCase")] pub struct ProxyHeader { pub id: String, pub client_addr: String, pub proto: String, pub edge_type: EdgeType, #[serde(rename = "PassthroughTLS")] pub passthrough_tls: bool, } #[derive(Error, Debug)] #[non_exhaustive] pub enum ReadHeaderError { #[error("error reading proxy header")] Io(#[from] io::Error), #[error("invalid utf-8 in proxy header")] InvalidUtf8(#[from] FromUtf8Error), #[error("invalid proxy header json")] InvalidHeader(#[from] serde_json::Error), } impl ProxyHeader { pub async fn read_from_stream( mut stream: impl AsyncRead + Unpin, ) -> Result { let size = stream.read_i64_le().await?; let mut buf = vec![0u8; size as usize]; stream.read_exact(&mut buf).await?; let header = String::from_utf8(buf)?; debug!(?header, "read header"); Ok(serde_json::from_str(&header)?) } } /// The edge type for an incomming connection. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum EdgeType { /// EdgeType Undefined Undefined, /// A TCP Edge Tcp, /// A TLS Edge Tls, /// A HTTPs Edge Https, } impl FromStr for EdgeType { type Err = (); fn from_str(s: &str) -> Result { Ok(match s { "1" => EdgeType::Tcp, "2" => EdgeType::Tls, "3" => EdgeType::Https, _ => EdgeType::Undefined, }) } } impl EdgeType { pub(crate) fn as_str(self) -> &'static str { match self { EdgeType::Undefined => "0", EdgeType::Tcp => "1", EdgeType::Tls => "2", EdgeType::Https => "3", } } } impl Serialize for EdgeType { fn serialize(&self, serializer: S) -> Result where S: serde::Serializer, { serializer.serialize_str(self.as_str()) } } struct EdgeTypeVisitor; impl<'de> Visitor<'de> for EdgeTypeVisitor { type Value = EdgeType; fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { formatter.write_str(r#""0", "1", "2", or "3""#) } fn visit_str(self, v: &str) -> Result where E: serde::de::Error, { Ok(EdgeType::from_str(v).unwrap()) } } impl<'de> Deserialize<'de> for EdgeType { fn deserialize(deserializer: D) -> Result where D: serde::Deserializer<'de>, { deserializer.deserialize_str(EdgeTypeVisitor) } } /// A request from the ngrok dashboard for the agent to stop. #[derive(Serialize, Deserialize, Debug, Clone, Copy, Default)] #[serde(rename_all = "PascalCase")] pub struct Stop {} /// Common response structure for all remote commands originating from the ngrok /// dashboard. #[derive(Serialize, Deserialize, Debug, Clone, Default)] #[serde(rename_all = "PascalCase")] pub struct CommandResp { /// The error arising from command handling, if any. #[serde(default, skip_serializing_if = "Option::is_none")] pub error: Option, } pub type StopResp = CommandResp; rpc_req!(Stop, StopResp, STOP_REQ); /// A request from the ngrok dashboard for the agent to restart. #[derive(Serialize, Deserialize, Debug, Clone, Copy, Default)] #[serde(rename_all = "PascalCase")] pub struct Restart {} pub type RestartResp = CommandResp; rpc_req!(Restart, RestartResp, RESTART_REQ); /// A request from the ngrok dashboard for the agent to update itself. #[derive(Serialize, Deserialize, Debug, Clone, Default)] #[serde(rename_all = "PascalCase")] pub struct Update { /// The version that the agent is requested to update to. pub version: String, /// Whether or not updating to the same major version is sufficient. pub permit_major_version: bool, } /// A request from remote to stop a tunnel #[derive(Serialize, Deserialize, Debug, Clone, Default)] #[serde(rename_all = "PascalCase")] pub struct StopTunnel { /// The id of the tunnel to stop #[serde(rename = "Id")] pub client_id: String, /// The message on why this tunnel was stopped pub message: String, /// An optional ngrok error code pub error_code: Option, } pub type UpdateResp = CommandResp; rpc_req!(Update, UpdateResp, UPDATE_REQ); /// The version of [PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) /// to use with this tunnel. /// /// [ProxyProto::None] disables PROXY protocol support. #[derive(Debug, Copy, Clone, Default, Eq, PartialEq)] pub enum ProxyProto { /// No PROXY protocol #[default] None, /// PROXY protocol v1 V1, /// PROXY protocol v2 V2, } impl From for i64 { fn from(other: ProxyProto) -> Self { use ProxyProto::*; match other { None => 0, V1 => 1, V2 => 2, } } } impl From for ProxyProto { fn from(other: i64) -> Self { use ProxyProto::*; match other { 1 => V1, 2 => V2, _ => None, } } } #[derive(Debug, Clone, Error)] #[error("invalid proxyproto string: {}", .0)] pub struct InvalidProxyProtoString(String); impl FromStr for ProxyProto { type Err = InvalidProxyProtoString; fn from_str(s: &str) -> Result { use ProxyProto::*; Ok(match s { "" => None, "1" => V1, "2" => V2, _ => return Err(InvalidProxyProtoString(s.into())), }) } } impl Serialize for ProxyProto { fn serialize(&self, serializer: S) -> Result where S: serde::Serializer, { serializer.serialize_i64(i64::from(*self)) } } struct ProxyProtoVisitor; impl<'de> Visitor<'de> for ProxyProtoVisitor { type Value = ProxyProto; fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { formatter.write_str("0, 1, or 2") } fn visit_i64(self, v: i64) -> Result where E: serde::de::Error, { Ok(ProxyProto::from(v)) } fn visit_u64(self, v: u64) -> Result where E: serde::de::Error, { Ok(ProxyProto::from(v as i64)) } } impl<'de> Deserialize<'de> for ProxyProto { fn deserialize(deserializer: D) -> Result where D: serde::Deserializer<'de>, { deserializer.deserialize_i64(ProxyProtoVisitor) } } #[derive(Debug, Serialize, Deserialize, Clone)] #[serde(untagged)] pub enum PolicyWrapper { #[serde(serialize_with = "serialize_policy")] Policy(Policy), String(String), } impl From for PolicyWrapper { fn from(value: String) -> Self { PolicyWrapper::String(value) } } #[derive(Serialize, Deserialize, Debug, Clone, Default)] #[serde(rename_all = "PascalCase")] pub struct HttpEndpoint { #[serde(default)] pub domain: String, pub hostname: String, pub auth: String, pub subdomain: String, pub host_header_rewrite: bool, pub local_url_scheme: Option, pub proxy_proto: ProxyProto, pub compression: Option, pub circuit_breaker: Option, #[serde(rename = "IPRestriction")] pub ip_restriction: Option, pub basic_auth: Option, #[serde(rename = "OAuth")] pub oauth: Option, #[serde(rename = "OIDC")] pub oidc: Option, pub webhook_verification: Option, #[serde(rename = "MutualTLSCA")] pub mutual_tls_ca: Option, #[serde(default)] pub request_headers: Option, #[serde(default)] pub response_headers: Option, #[serde(rename = "WebsocketTCPConverter")] pub websocket_tcp_converter: Option, #[serde(rename = "UserAgentFilter")] pub user_agent_filter: Option, #[serde(rename = "TrafficPolicy")] pub traffic_policy: Option, } #[derive(Debug, Clone, Copy, Serialize, Deserialize)] pub struct Compression {} fn is_default(v: &T) -> bool where T: PartialEq + Default, { T::default() == *v } #[derive(Debug, Clone, Copy, Serialize, Deserialize)] pub struct CircuitBreaker { #[serde(default, skip_serializing_if = "is_default")] pub error_threshold: f64, } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct BasicAuth { #[serde(default, skip_serializing_if = "is_default")] pub credentials: Vec, } #[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] pub struct BasicAuthCredential { pub username: String, #[serde(default, skip_serializing_if = "is_default")] pub cleartext_password: String, #[serde(default, skip_serializing_if = "is_default")] #[serde(with = "base64bytes")] pub hashed_password: Vec, } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct IpRestriction { #[serde(default, skip_serializing_if = "is_default")] pub allow_cidrs: Vec, #[serde(default, skip_serializing_if = "is_default")] pub deny_cidrs: Vec, } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Oauth { pub provider: String, #[serde(default, skip_serializing_if = "is_default")] pub client_id: String, #[serde(default, skip_serializing_if = "is_default")] pub client_secret: SecretString, #[serde(default, skip_serializing_if = "is_default")] #[serde(with = "base64bytes")] pub sealed_client_secret: Vec, #[serde(default, skip_serializing_if = "is_default")] pub allow_emails: Vec, #[serde(default, skip_serializing_if = "is_default")] pub allow_domains: Vec, #[serde(default, skip_serializing_if = "is_default")] pub scopes: Vec, } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Oidc { pub issuer_url: String, #[serde(default, skip_serializing_if = "is_default")] pub client_id: String, #[serde(default, skip_serializing_if = "is_default")] pub client_secret: SecretString, #[serde(default, skip_serializing_if = "is_default")] #[serde(with = "base64bytes")] pub sealed_client_secret: Vec, #[serde(default, skip_serializing_if = "is_default")] pub allow_emails: Vec, #[serde(default, skip_serializing_if = "is_default")] pub allow_domains: Vec, #[serde(default, skip_serializing_if = "is_default")] pub scopes: Vec, } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct WebhookVerification { pub provider: String, #[serde(default, skip_serializing_if = "is_default")] pub secret: SecretString, #[serde(default, skip_serializing_if = "is_default")] #[serde(with = "base64bytes")] pub sealed_secret: Vec, } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct MutualTls { #[serde(default, skip_serializing_if = "is_default")] #[serde(with = "base64bytes")] // this is snake-case on the wire pub mutual_tls_ca: Vec, } #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Headers { #[serde(default, skip_serializing_if = "is_default")] pub add: Vec, #[serde(default, skip_serializing_if = "is_default")] pub remove: Vec, #[serde(default, skip_serializing_if = "is_default")] pub add_parsed: HashMap, } #[derive(Debug, Clone, Copy, Serialize, Deserialize)] pub struct WebsocketTcpConverter {} #[derive(Debug, Clone, Serialize, Deserialize)] pub struct UserAgentFilter { #[serde(default, skip_serializing_if = "is_default")] pub allow: Vec, #[serde(default, skip_serializing_if = "is_default")] pub deny: Vec, } #[derive(Serialize, Deserialize, Debug, Clone, Default)] #[serde(rename_all = "PascalCase")] pub struct TcpEndpoint { pub addr: String, pub proxy_proto: ProxyProto, #[serde(rename = "IPRestriction")] pub ip_restriction: Option, #[serde(rename = "TrafficPolicy")] pub traffic_policy: Option, } #[derive(Serialize, Deserialize, Debug, Clone, Default)] #[serde(rename_all = "PascalCase")] pub struct TlsEndpoint { #[serde(default)] pub domain: String, pub hostname: String, pub subdomain: String, pub proxy_proto: ProxyProto, #[serde(rename = "MutualTLSAtAgent")] pub mutual_tls_at_agent: bool, #[serde(rename = "MutualTLSAtEdge")] pub mutual_tls_at_edge: Option, #[serde(rename = "TLSTermination")] pub tls_termination: Option, #[serde(rename = "IPRestriction")] pub ip_restriction: Option, #[serde(rename = "TrafficPolicy")] pub traffic_policy: Option, } #[derive(Serialize, Deserialize, Debug, Clone, Default)] pub struct TlsTermination { #[serde(default, with = "base64bytes", skip_serializing_if = "is_default")] pub cert: Vec, #[serde(skip_serializing_if = "is_default", default)] pub key: SecretBytes, #[serde(default, with = "base64bytes", skip_serializing_if = "is_default")] pub sealed_key: Vec, } #[derive(Serialize, Deserialize, Debug, Clone, Default)] #[serde(rename_all = "PascalCase", default)] pub struct Policy { pub inbound: Vec, pub outbound: Vec, } #[derive(Serialize, Deserialize, Debug, Clone, Default)] #[serde(rename_all = "PascalCase", default)] pub struct Rule { pub name: String, pub expressions: Vec, pub actions: Vec, } #[derive(Serialize, Deserialize, Debug, Clone, Default)] #[serde(rename_all = "PascalCase", default)] pub struct Action { #[serde(rename = "Type")] pub type_: String, #[serde(default, with = "vec_to_json", skip_serializing_if = "is_default")] pub config: Vec, } // This function converts a Policy into a valid JSON string. This is used so legacy configurations will still work // using the new string "TrafficPolicy" field. fn serialize_policy(v: &Policy, s: S) -> Result { let abc = match serde_json::to_string(v) { Ok(t) => t, Err(_) => { return Err(serde::ser::Error::custom( "policy could not be converted to valid json", )) } }; s.serialize_str(&abc) } // These are helpers to convert base64 strings to full, real json. The serialize helper also ensures that the resulting // representation isn't a string-escaped string. mod vec_to_json { use serde::{ Deserialize, Deserializer, Serialize, Serializer, }; pub fn serialize(v: &[u8], s: S) -> Result { let u: serde_json::Value = match serde_json::from_slice(v) { Ok(k) => k, Err(_) => return Err(serde::ser::Error::custom("Config is invalid JSON")), }; u.serialize(s) } pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result, D::Error> { let s = serde_json::Map::deserialize(d)?; let v = serde_json::to_vec(&s).unwrap(); Ok(v) } } // These are helpers to facilitate the Vec <-> base64-encoded bytes // representation that the Go messages use mod base64bytes { use base64::prelude::*; use serde::{ Deserialize, Deserializer, Serialize, Serializer, }; pub fn serialize(v: &Vec, s: S) -> Result { BASE64_STANDARD.encode(v).serialize(s) } pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result, D::Error> { let s = String::deserialize(d)?; BASE64_STANDARD .decode(s.as_bytes()) .map_err(serde::de::Error::custom) } } #[cfg(test)] mod test { use super::*; #[test] fn test_proxy_proto_serde() { let input = "2"; let p: ProxyProto = serde_json::from_str(input).unwrap(); assert!(matches!(p, ProxyProto::V2)); assert_eq!(serde_json::to_string(&p).unwrap(), "2"); } pub(crate) const POLICY_JSON: &str = r###"{"Inbound":[{"Name":"test_in","Expressions":["req.Method == 'PUT'"],"Actions":[{"Type":"deny"}]}],"Outbound":[{"Name":"test_out","Expressions":["res.StatusCode == '200'"],"Actions":[{"Type":"custom-response","Config":{"status_code":201}}]}]}"###; #[test] fn test_policy_proto_serde() { let policy: Policy = serde_json::from_str(POLICY_JSON).unwrap(); // mainly just interested in checking outbound, as that has the // special vec serialization assert_eq!(1, policy.outbound.len()); let outbound = &policy.outbound[0]; assert_eq!(1, outbound.actions.len()); let action = &outbound.actions[0]; assert_eq!(r#"{"status_code":201}"#.as_bytes(), action.config); assert_eq!(serde_json::to_string(&policy).unwrap(), POLICY_JSON); } } ================================================ FILE: ngrok/src/internals/raw_session.rs ================================================ use std::{ collections::HashMap, fmt::Debug, future::Future, io, ops::{ Deref, DerefMut, }, sync::Arc, }; use async_trait::async_trait; use muxado::{ heartbeat::{ HeartbeatConfig, HeartbeatCtl, }, typed::{ StreamType, TypedAccept, TypedOpenClose, TypedSession, TypedStream, }, Error as MuxadoError, SessionBuilder, }; use serde::{ de::DeserializeOwned, Deserialize, }; use thiserror::Error; use tokio::{ io::{ AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, }, runtime::Handle, }; use tokio_util::either::Either; use tracing::{ debug, instrument, warn, }; use super::{ proto::{ Auth, AuthExtra, AuthResp, Bind, BindExtra, BindOpts, BindResp, CommandResp, ErrResp, Error, ProxyHeader, ReadHeaderError, Restart, StartTunnelWithLabel, StartTunnelWithLabelResp, Stop, StopTunnel, Unbind, UnbindResp, Update, PROXY_REQ, RESTART_REQ, STOP_REQ, STOP_TUNNEL_REQ, UPDATE_REQ, VERSION, }, rpc::RpcRequest, }; use crate::{ tunnel::AcceptError::ListenerClosed, Session, }; /// Errors arising from tunneling protocol RPC calls. #[derive(Error, Debug)] #[non_exhaustive] pub enum RpcError { /// Failed to open a new stream to start the RPC call. #[error("failed to open muxado stream")] Open(#[source] MuxadoError), /// Some non-Open transport error occurred #[error("transport error")] Transport(#[source] MuxadoError), /// Failed to send the request over the stream. #[error("error sending rpc request")] Send(#[source] io::Error), /// Failed to read the RPC response from the stream. #[error("error reading rpc response")] Receive(#[source] io::Error), /// The RPC response was invalid. #[error("failed to deserialize rpc response")] InvalidResponse(#[from] serde_json::Error), /// There was an error in the RPC response. #[error("rpc error response:\n{0}")] Response(ErrResp), } impl Error for RpcError { fn error_code(&self) -> Option<&str> { match self { RpcError::Response(resp) => resp.error_code(), _ => None, } } fn msg(&self) -> String { match self { RpcError::Response(resp) => resp.msg(), _ => format!("{self}"), } } } #[derive(Error, Debug)] #[non_exhaustive] pub enum StartSessionError { #[error("failed to start heartbeat task")] StartHeartbeat(#[from] io::Error), } #[derive(Error, Debug)] #[non_exhaustive] pub enum AcceptError { #[error("transport error when accepting connection")] Transport(#[from] MuxadoError), #[error(transparent)] Header(#[from] ReadHeaderError), #[error("invalid stream type: {0}")] InvalidType(StreamType), } pub struct RpcClient { // This is held so that the heartbeat task doesn't get shutdown. Eventually // we may use it to request heartbeats via the `Session`. _heartbeat: HeartbeatCtl, open: Box, } pub struct IncomingStreams { runtime: Handle, handlers: CommandHandlers, pub(crate) session: Option, accept: Box, } pub struct RawSession { client: RpcClient, incoming: IncomingStreams, } impl Deref for RawSession { type Target = RpcClient; fn deref(&self) -> &Self::Target { &self.client } } impl DerefMut for RawSession { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.client } } /// Trait for a type that can handle a command from the ngrok dashboard. #[async_trait] pub trait CommandHandler: Send + Sync + 'static { /// Handle the remote command. async fn handle_command(&self, req: T) -> Result<(), String>; } #[async_trait] impl CommandHandler for T where R: Send + 'static, T: Fn(R) -> F + Send + Sync + 'static, F: Future> + Send, { async fn handle_command(&self, req: R) -> Result<(), String> { self(req).await } } #[derive(Default, Clone)] pub struct CommandHandlers { pub on_restart: Option>>, pub on_update: Option>>, pub on_stop: Option>>, } impl RawSession { pub async fn start( io_stream: S, heartbeat: HeartbeatConfig, handlers: H, ) -> Result where S: AsyncRead + AsyncWrite + Send + 'static, H: Into>, { let mux_sess = SessionBuilder::new(io_stream).start(); let handlers = handlers.into().unwrap_or_default(); let typed = muxado::typed::Typed::new(mux_sess); let (heartbeat, hbctl) = muxado::heartbeat::Heartbeat::start(typed, heartbeat).await?; let (open, accept) = heartbeat.split_typed(); let runtime = Handle::current(); let sess = RawSession { client: RpcClient { _heartbeat: hbctl, open: Box::new(open), }, incoming: IncomingStreams { runtime, handlers, session: None, accept: Box::new(accept), }, }; Ok(sess) } pub fn split(self) -> (RpcClient, IncomingStreams) { (self.client, self.incoming) } } impl RpcClient { #[instrument(level = "debug", skip(self))] async fn rpc(&mut self, req: R) -> Result { let mut stream = self .open .open_typed(R::TYPE) .await .map_err(RpcError::Open)?; let s = serde_json::to_string(&req) // This should never happen, since we control the request types and // know that they will always serialize correctly. Just in case // though, call them "Send" errors. .map_err(io::Error::other) .map_err(RpcError::Send)?; stream .write_all(s.as_bytes()) .await .map_err(RpcError::Send)?; let mut buf = Vec::new(); stream .read_to_end(&mut buf) .await .map_err(RpcError::Receive)?; #[derive(Debug, Deserialize)] struct ErrResp { #[serde(rename = "Error")] error: String, } let ok_resp = serde_json::from_slice::(&buf); let err_resp = serde_json::from_slice::(&buf); if let Ok(err) = err_resp { if !err.error.is_empty() { debug!(?err, "decoded rpc error response"); return Err(RpcError::Response(err.error.as_str().into())); } } debug!(resp = ?ok_resp, "decoded rpc response"); Ok(ok_resp?) } /// Close the raw ngrok session with a "None" muxado error. pub async fn close(&mut self) -> Result<(), RpcError> { self.open .close(MuxadoError::None, "".into()) .await .map_err(RpcError::Transport)?; Ok(()) } #[instrument(level = "debug", skip(self))] pub async fn auth( &mut self, id: impl Into + Debug, extra: AuthExtra, ) -> Result { let id = id.into(); let req = Auth { client_id: id.clone(), extra, version: VERSION.iter().map(|&x| x.into()).collect(), }; let resp = self.rpc(req).await?; Ok(resp) } #[instrument(level = "debug", skip(self))] pub async fn listen( &mut self, protocol: impl Into + Debug, opts: BindOpts, extra: BindExtra, id: impl Into + Debug, forwards_to: impl Into + Debug, forwards_proto: impl Into + Debug, ) -> Result, RpcError> { // Sorry, this is awful. Serde untagged unions are pretty fraught and // hard to debug, so we're using this macro to specialize this call // based on the enum variant. It drops down to the type wrapped in the // enum for the actual request/response, and then re-wraps it on the way // back out in the same variant. // It's probably an artifact of the go -> rust translation, and could be // fixed with enough refactoring and rearchitecting. But it works well // enough for now and is pretty localized. macro_rules! match_variant { ($v:expr, $($var:tt),*) => { match opts { $(BindOpts::$var (opts) => { let req = Bind { client_id: id.into(), proto: protocol.into(), forwards_to: forwards_to.into(), forwards_proto: forwards_proto.into(), opts, extra, }; let resp = self.rpc(req).await?; BindResp { bind_opts: BindOpts::$var(resp.bind_opts), client_id: resp.client_id, url: resp.url, extra: resp.extra, proto: resp.proto, } })* } }; } Ok(match_variant!(opts, Http, Tcp, Tls)) } #[instrument(level = "debug", skip(self))] pub async fn listen_label( &mut self, labels: HashMap, metadata: impl Into + Debug, forwards_to: impl Into + Debug, forwards_proto: impl Into + Debug, ) -> Result { let req = StartTunnelWithLabel { labels, metadata: metadata.into(), forwards_to: forwards_to.into(), forwards_proto: forwards_proto.into(), }; self.rpc(req).await } #[instrument(level = "debug", skip(self))] pub async fn unlisten( &mut self, id: impl Into + Debug, ) -> Result { self.rpc(Unbind { client_id: id.into(), }) .await } } pub const NOT_IMPLEMENTED: &str = "the agent has not defined a callback for this operation"; async fn read_req(stream: &mut TypedStream) -> Result> where T: DeserializeOwned + Debug + 'static, { debug!("reading request from stream"); let mut buf = vec![]; let req = serde_json::from_value(loop { let mut tmp = vec![0u8; 256]; let bytes = stream.read(&mut tmp).await.map_err(Either::Left)?; buf.extend_from_slice(&tmp[..bytes]); if let Ok(obj) = serde_json::from_slice::(&buf) { break obj; } }) .map_err(Either::Right)?; debug!(?req, "read request from stream"); Ok(req) } async fn handle_req( handler: Option>>, mut stream: TypedStream, ) -> Result<(), Either> where T: DeserializeOwned + Debug + 'static, { let res = async { let req = read_req(&mut stream).await?; let resp = if let Some(handler) = handler { debug!("running command handler"); handler.handle_command(req).await.err() } else { Some(NOT_IMPLEMENTED.into()) }; debug!(?resp, "writing response to stream"); let resp_json = serde_json::to_vec(&CommandResp { error: resp }).map_err(Either::Right)?; stream .write_all(resp_json.as_slice()) .await .map_err(Either::Left)?; Ok(()) } .await; if let Err(e) = &res { warn!(?e, "error when handling dashboard command"); } res } impl IncomingStreams { pub async fn accept(&mut self) -> Result { Ok(loop { let mut stream = self.accept.accept_typed().await?; match stream.typ() { RESTART_REQ => { self.runtime .spawn(handle_req(self.handlers.on_restart.clone(), stream)); } UPDATE_REQ => { self.runtime .spawn(handle_req(self.handlers.on_update.clone(), stream)); } STOP_REQ => { self.runtime .spawn(handle_req(self.handlers.on_stop.clone(), stream)); } STOP_TUNNEL_REQ => { // close the tunnel through the session if let Some(session) = &self.session { let req = read_req::(&mut stream) .await .map_err(|e| match e { Either::Left(err) => ReadHeaderError::from(err), Either::Right(err) => ReadHeaderError::from(err), })?; session .close_tunnel_with_error( req.client_id, ListenerClosed { message: req.message, error_code: req.error_code, }, ) .await; } } PROXY_REQ => { let header = ProxyHeader::read_from_stream(&mut *stream).await?; break TunnelStream { header, stream }; } t => return Err(AcceptError::InvalidType(t)), } }) } } pub struct TunnelStream { pub header: ProxyHeader, pub stream: TypedStream, } ================================================ FILE: ngrok/src/internals/rpc.rs ================================================ use std::fmt::Debug; use muxado::typed::StreamType; use serde::{ de::DeserializeOwned, Serialize, }; pub trait RpcRequest: Serialize + Debug { type Response: DeserializeOwned + Debug; const TYPE: StreamType; } macro_rules! rpc_req { ($req:ty, $resp:ty, $typ:expr; $($t:tt)*) => { impl <$($t)*> $crate::internals::rpc::RpcRequest for $req { type Response = $resp; const TYPE: StreamType = $typ; } }; ($req:ty, $resp:ty, $typ:expr) => { impl $crate::internals::rpc::RpcRequest for $req { type Response = $resp; const TYPE: StreamType = $typ; } }; } ================================================ FILE: ngrok/src/lib.rs ================================================ #![doc = include_str!("../README.md")] #![warn(missing_docs)] #![cfg_attr(docsrs, feature(doc_cfg))] mod internals { #[macro_use] pub mod rpc; pub mod proto; pub mod raw_session; } /// Tunnel and endpoint configuration types. pub mod config { #[macro_use] mod common; pub use common::*; mod headers; mod http; pub use self::http::*; mod labeled; pub use labeled::*; mod oauth; pub use oauth::*; mod oidc; pub use policies::*; mod policies; pub use oidc::*; mod tcp; pub use tcp::*; mod tls; pub use tls::*; mod webhook_verification; } mod proxy_proto; /// Types for working with the ngrok session. pub mod session; /// Types for working with ngrok tunnels. pub mod tunnel; /// Types for working with ngrok connections. pub mod conn; /// Types for working with connection forwarders. pub mod forwarder; mod tunnel_ext; #[doc(inline)] pub use conn::{ Conn, EdgeConn, EndpointConn, }; #[doc(inline)] pub use internals::proto::Error; #[doc(inline)] pub use session::Session; #[doc(inline)] pub use tunnel::Tunnel; /// A prelude of traits for working with ngrok types. pub mod prelude { #[allow(deprecated)] #[doc(inline)] pub use crate::{ config::{ Action, ForwarderBuilder, HttpTunnelBuilder, InvalidPolicy, LabeledTunnelBuilder, OauthOptions, OidcOptions, Policy, ProxyProto, Rule, Scheme, TcpTunnelBuilder, TlsTunnelBuilder, TunnelBuilder, }, conn::{ Conn, ConnInfo, EdgeConnInfo, EndpointConnInfo, }, internals::proto::EdgeType, internals::proto::Error, tunnel::{ EdgeInfo, EndpointInfo, Tunnel, TunnelCloser, TunnelInfo, }, tunnel_ext::TunnelExt, }; } #[cfg(all(test, feature = "online-tests"))] mod online_tests; ================================================ FILE: ngrok/src/online_tests.rs ================================================ use std::{ convert::Infallible, error::Error, io, io::prelude::*, net::SocketAddr, str::FromStr, sync::{ atomic::{ AtomicUsize, Ordering, }, Arc, }, time::Duration, }; use anyhow::anyhow; use axum::{ routing::get, BoxError, Router, }; use bytes::Bytes; use flate2::read::GzDecoder; use futures::{ channel::oneshot, prelude::*, stream::FuturesUnordered, TryStreamExt, }; use futures_rustls::rustls::{ pki_types, ClientConfig, RootCertStore, }; // use native_tls; use hyper::{ body::Incoming, HeaderMap, Request, Uri, }; use hyper_util::{ rt::TokioExecutor, server, }; use once_cell::sync::Lazy; use paste::paste; use proxy_protocol::ProxyHeader; use rand::{ distributions::Alphanumeric, thread_rng, Rng, }; use reqwest::{ header, StatusCode, }; use tokio::{ io::{ AsyncReadExt, AsyncWriteExt, }, net::TcpStream, sync::mpsc, test, }; use tokio_tungstenite::{ connect_async, tungstenite::Message, }; use tokio_util::compat::*; use tower::{ util::ServiceExt, Service, }; use tracing_test::traced_test; use url::Url; use crate::{ prelude::*, session::{ SessionBuilder, CERT_BYTES, }, Session, }; async fn setup_session() -> Result { Ok(Session::builder().authtoken_from_env().connect().await?) } #[cfg_attr(not(feature = "online-tests"), ignore)] #[test] async fn listen() -> Result<(), BoxError> { let _ = Session::builder() .authtoken_from_env() .connect() .await? .http_endpoint() .listen() .await?; Ok(()) } #[cfg_attr(not(feature = "online-tests"), ignore)] #[test] async fn tunnel() -> Result<(), BoxError> { let tun = setup_session() .await? .http_endpoint() .metadata("Hello, world!") .forwards_to("some application") .listen() .await?; assert_eq!("Hello, world!", tun.metadata()); assert_eq!("some application", tun.forwards_to()); Ok(()) } struct TunnelGuard { tx: Option>, url: String, } impl Drop for TunnelGuard { fn drop(&mut self) { let _ = self.tx.take().unwrap().send(()); } } // Spawn an http server using the provided session and tunnel options, and an // axum router. // The returned guard, when dropped, will cause the server to shut down. async fn serve_http( build_session: impl FnOnce(&mut SessionBuilder) -> &mut SessionBuilder, build_tunnel: impl FnOnce(&mut HttpTunnelBuilder) -> &mut HttpTunnelBuilder, router: axum::Router, ) -> Result { let sess = build_session(Session::builder().authtoken_from_env()) .connect() .await?; let tun = build_tunnel(&mut sess.http_endpoint()).listen().await?; Ok(start_http_server(tun, router)) } fn start_http_server(mut tun: T, router: Router) -> TunnelGuard where T: EndpointInfo + Tunnel + 'static, T::Conn: crate::tunnel_ext::ConnExt, { let url = tun.url().into(); let (tx, rx) = oneshot::channel::<()>(); let mut make_service = router.into_make_service_with_connect_info::(); let server = async move { while let Some(conn) = tun.try_next().await? { let remote_addr = conn.remote_addr(); let tower_service = unwrap_infallible(make_service.call(remote_addr).await); tokio::spawn(async move { let hyper_service = hyper::service::service_fn(move |request: Request| { tower_service.clone().oneshot(request) }); if let Err(err) = server::conn::auto::Builder::new(TokioExecutor::new()) .serve_connection_with_upgrades(conn, hyper_service) .await { eprintln!("failed to serve connection: {err:#}"); } }); } Ok::<(), BoxError>(()) }; tokio::spawn(futures::future::select(Box::pin(server), rx)); TunnelGuard { tx: tx.into(), url } } fn defaults(opts: &mut T) -> &mut T { opts } fn hello_router() -> Router { Router::new().route("/", get(|| async { "Hello, world!" })) } async fn check_body(url: impl AsRef, expected: impl AsRef) -> Result<(), BoxError> { let body: String = reqwest::get(url.as_ref()).await?.text().await?; assert_eq!(body, expected.as_ref()); Ok(()) } #[cfg_attr(not(feature = "online-tests"), ignore)] #[test] async fn https() -> Result<(), BoxError> { let tun = serve_http(defaults, defaults, hello_router()).await?; let url = tun.url.as_str(); assert!(url.starts_with("https://")); check_body(url, "Hello, world!").await?; Ok(()) } #[cfg_attr(not(feature = "online-tests"), ignore)] #[test] async fn http() -> Result<(), BoxError> { let tun = serve_http(defaults, |tun| tun.scheme(Scheme::HTTP), hello_router()).await?; let url = tun.url.as_str(); assert!(url.starts_with("http://")); check_body(url, "Hello, world!").await?; Ok(()) } #[cfg_attr(not(feature = "paid-tests"), ignore)] #[test] async fn http_compression() -> Result<(), BoxError> { let tun = serve_http(defaults, |tun| tun.compression(), hello_router()).await?; let url = tun.url.as_str(); let client = reqwest::Client::new(); let resp = client .get(url) .header(header::ACCEPT_ENCODING, "gzip") .send() .await?; assert_eq!( resp.headers().get(header::CONTENT_ENCODING).unwrap(), "gzip" ); let body_bytes = resp.bytes().await?; let mut decoder = GzDecoder::new(&*body_bytes); let mut body_string = String::new(); decoder.read_to_string(&mut body_string).unwrap(); assert_eq!(body_string, "Hello, world!"); Ok(()) } #[cfg_attr(not(feature = "paid-tests"), ignore)] #[test] async fn http_headers() -> Result<(), BoxError> { let (tx, mut rx) = mpsc::channel::(16); // For some reason, the hyper machinery keeps a clone of the `tx`, which // causes it to never look closed, even when we drop the tunnel guard, which // shuts down the hyper server. Maybe a leaked task? Work around it by // keeping only one RAII tx here, and only give the handler a weak ref to // it. let weak = tx.downgrade(); let handler = move |headers: HeaderMap| async move { let tx = weak .upgrade() .expect("no more requests after server shutdown"); if let Some(bar) = headers.get("foo") { if bar != "bar" { let _ = tx .send(format!("unexpected value for 'foo' request header: {:?}", bar).into()) .await; } } else { let _ = tx.send("missing 'foo' request header".into()).await; } if headers.get("baz").is_some() { let _ = tx.send("got 'baz' request header".into()).await; } ([("python", "lolnope")], "Hello, world!") }; let tun = serve_http( defaults, |tun| { tun.request_header("foo", "bar") .remove_request_header("baz") .response_header("spam", "eggs") .remove_response_header("python") }, Router::new().route("/", get(handler)), ) .await?; let url = &tun.url; let client = reqwest::Client::new(); let resp = client.get(url).header("baz", "bad header").send().await?; assert_eq!( resp.headers() .get("spam") .expect("'spam' header should exist"), "eggs" ); assert!(resp.headers().get("python").is_none(),); drop(tun); drop(tx); if let Some(err) = rx.recv().await { return Err(err); } Ok(()) } #[traced_test] #[cfg_attr(not(feature = "authenticated-tests"), ignore)] #[test] async fn user_agent() -> Result<(), BoxError> { let tun = serve_http( defaults, |tun| tun.allow_user_agent("foo.*").deny_user_agent(".*"), hello_router(), ) .await?; let client = reqwest::Client::new(); let resp = client.get(&tun.url).send().await?; assert_eq!(resp.status(), StatusCode::FORBIDDEN); let client = reqwest::Client::builder() .user_agent("foobarbaz") .build() .expect("build reqwest client"); let resp = client.get(&tun.url).send().await?; assert_eq!(resp.status(), StatusCode::OK); assert_eq!(resp.text().await?, "Hello, world!"); Ok(()) } #[traced_test] #[cfg_attr(not(feature = "paid-tests"), ignore)] #[test] async fn basic_auth() -> Result<(), BoxError> { let tun = serve_http( defaults, |tun| tun.basic_auth("user", "foobarbaz"), hello_router(), ) .await?; let client = reqwest::Client::new(); let resp = client.get(&tun.url).send().await?; assert_eq!(resp.status(), StatusCode::UNAUTHORIZED); let resp = client .get(&tun.url) .basic_auth("user", "foobarbaz".into()) .send() .await?; assert_eq!(resp.status(), StatusCode::OK); assert_eq!(resp.text().await?, "Hello, world!"); Ok(()) } #[traced_test] #[cfg_attr(not(feature = "paid-tests"), ignore)] #[test] async fn oauth() -> Result<(), BoxError> { let tun = serve_http( defaults, |tun| tun.oauth(OauthOptions::new("google")), hello_router(), ) .await?; let client = reqwest::Client::new(); let resp = client.get(&tun.url).send().await?; assert_eq!(resp.status(), StatusCode::OK); let body = resp.text().await?; assert_ne!(body, "Hello, world!"); assert!(body.contains("accounts.google.com")); Ok(()) } #[traced_test] #[cfg_attr(not(feature = "paid-tests"), ignore)] #[test] async fn custom_domain() -> Result<(), BoxError> { let mut rng = thread_rng(); let subdomain = (0..7) .map(|_| rng.sample(Alphanumeric) as char) .collect::() .to_lowercase(); let _tun = serve_http( defaults, |tun| tun.domain(format!("{subdomain}.ngrok.io")), hello_router(), ) .await?; check_body(format!("https://{subdomain}.ngrok.io"), "Hello, world!").await?; Ok(()) } #[traced_test] #[cfg_attr(not(feature = "paid-tests"), ignore)] #[test] async fn policy() -> Result<(), BoxError> { let tun = serve_http( defaults, |tun| tun.policy(create_policy()).unwrap(), hello_router(), ) .await?; let client = reqwest::Client::new(); let resp = client.get(&tun.url).send().await?; assert_eq!(resp.status(), 222); Ok(()) } fn create_policy() -> Result { Ok(Policy::new() .add_inbound( Rule::new("deny_put") .add_expression("req.Method == 'PUT'") .add_action(Action::new("deny", None)?), ) .add_outbound( Rule::new("222_response") .add_expression("res.StatusCode == '200'") .add_action(Action::new( "custom-response", Some("{\"status_code\": 222}"), )?), ) .to_owned()) } #[traced_test] #[cfg_attr(not(all(feature = "paid-tests", feature = "long-tests")), ignore)] #[test] async fn circuit_breaker() -> Result<(), BoxError> { let ctr = Arc::new(AtomicUsize::new(0)); let tun = serve_http( defaults, |tun| tun.circuit_breaker(0.01), Router::new().route( "/", get({ let ctr = ctr.clone(); move || { ctr.fetch_add(1, Ordering::SeqCst); async { hyper::StatusCode::INTERNAL_SERVER_ERROR } } }), ), ) .await?; let mut attempts = 0; for _ in 0..20 { let mut futs = FuturesUnordered::new(); // smaller batches to have less in-flight requests and break sooner for _ in 0..25 { attempts += 1; let url = tun.url.clone(); futs.push(async move { let resp = reqwest::get(url).await?; let status = resp.status(); tracing::debug!(?status); Result::<_, BoxError>::Ok(resp.status()) }); } let mut done = false; while let Some(res) = futs.next().await { if res? == StatusCode::SERVICE_UNAVAILABLE { // circuit breaker is working, done after this batch done = true; } } if done { break; } } // validate that some, but not all, requests were dropped let actual = ctr.load(Ordering::SeqCst); assert!(actual > 4, "expected > 4 requests, got {actual}"); assert!( actual < attempts, "expected < {attempts} requests, got {actual}" ); Ok(()) } // Shamelessly ripped from stackoverflow: // https://stackoverflow.com/questions/35901547/how-can-i-find-a-subsequence-in-a-u8-slice fn find_subsequence(haystack: &[T], needle: &[T]) -> Option where for<'a> &'a [T]: PartialEq, { haystack .windows(needle.len()) .position(|window| window == needle) } macro_rules! proxy_proto_test { (genone: $ept:ident, $vers:ident, $tun:ident, $req:expr, $cont:expr) => { paste! { #[traced_test] #[cfg_attr(not(feature = "paid-tests"), ignore)] #[test] #[allow(non_snake_case)] async fn []() -> Result<(), BoxError> { let sess = Session::builder().authtoken_from_env().connect().await?; let mut $tun = sess .[<$ept _endpoint>]() .proxy_proto(ProxyProto::$vers).listen().await?; let req = $req; tokio::spawn(req); let mut buf = vec![0u8; 12]; let mut conn = $tun .try_next() .await? .ok_or_else(|| anyhow!("tunnel closed"))?; conn.read_exact(&mut buf).await?; assert!(find_subsequence(&buf, $cont).is_some()); Ok(()) } } }; ($vers:ident, $ex:expr, [$(($ept:ident, |$tun:ident| $req:expr)),*]) => { $( proxy_proto_test!(genone: $ept, $vers, $tun, $req, $ex); )* }; ([$(($vers:ident, $ex:expr)),*] $rest:tt) => { $( proxy_proto_test!($vers, $ex, $rest); )* }; } proxy_proto_test!( [(V1, &b"PROXY TCP"[..]), (V2, &b"\x0D\x0A\x0D\x0A\x00\x0D\x0A\x51\x55\x49\x54\x0A"[..])] [ (http, |tun| { reqwest::get(tun.url().to_string()) }), (tcp, |tun| { reqwest::get(tun.url().to_string().replacen("tcp", "http", 1)) }) ] ); #[traced_test] #[test] #[cfg_attr(not(feature = "paid-tests"), ignore)] async fn http_ip_restriction() -> Result<(), BoxError> { let tun = serve_http( defaults, |tun| tun.allow_cidr("127.0.0.1/32").deny_cidr("0.0.0.0/0"), hello_router(), ) .await?; let resp = reqwest::get(&tun.url).await?; assert_eq!(resp.status(), StatusCode::FORBIDDEN); Ok(()) } #[traced_test] #[test] #[cfg_attr(not(feature = "paid-tests"), ignore)] async fn tcp_ip_restriction() -> Result<(), BoxError> { let tun = Session::builder() .authtoken_from_env() .connect() .await? .tcp_endpoint() .allow_cidr("127.0.0.1/32") .deny_cidr("0.0.0.0/0") .listen() .await?; let tun = start_http_server(tun, hello_router()); let url = tun.url.replacen("tcp", "http", 1); assert!(reqwest::get(&url).await.is_err()); Ok(()) } #[traced_test] #[test] #[cfg_attr(not(feature = "paid-tests"), ignore)] async fn websocket_conversion() -> Result<(), BoxError> { let mut tun = Session::builder() .authtoken_from_env() .connect() .await? .http_endpoint() .websocket_tcp_conversion() .listen() .await?; let url = Uri::from_str(&tun.url().replacen("https", "wss", 1))?; tokio::spawn(async move { while let Some(mut conn) = tun.try_next().await? { conn.write_all("Hello, websockets!".as_bytes()).await?; } Result::<_, BoxError>::Ok(()) }); let mut wss = connect_async(url).await.expect("connect").0; loop { let msg = wss.try_next().await.expect("read").expect("message"); match msg { Message::Binary(bs) => { assert_eq!(String::from_utf8_lossy(&bs), "Hello, websockets!"); break; } Message::Text(t) => { assert_eq!(t, "Hello, websockets!"); break; } Message::Ping(b) => { wss.send(Message::Pong(b)).await?; } Message::Close(_) => { return Err(BoxError::from("didn't get message before close")); } _ => {} } } Ok(()) } #[traced_test] #[test] #[cfg_attr(not(feature = "authenticated-tests"), ignore)] async fn tcp() -> Result<(), BoxError> { let tun = Session::builder() .authtoken_from_env() .connect() .await? .tcp_endpoint() .listen() .await?; let tun = start_http_server(tun, hello_router()); let url = tun.url.replacen("tcp", "http", 1); check_body(url, "Hello, world!").await?; Ok(()) } const CERT: &[u8] = include_bytes!("../examples/domain.crt"); const KEY: &[u8] = include_bytes!("../examples/domain.key"); #[traced_test] #[test] #[cfg_attr(not(feature = "authenticated-tests"), ignore)] async fn tls() -> Result<(), BoxError> { let tun = Session::builder() .authtoken_from_env() .connect() .await? .tls_endpoint() .termination(CERT.into(), KEY.into()) .listen() .await?; let tun = start_http_server(tun, hello_router()); let url = tun.url.replacen("tls", "http", 1); // Create a client with verbose logging and longer timeout let client = reqwest::Client::new(); let resp = client.get(url.clone()).send().await; assert!(resp.is_err()); let err = resp.err().unwrap(); // Check if the error is a certificate error let is_certificate_error = if let Some(source) = err.source() { // Try to downcast to hyper_util::client::legacy::Error if let Some(hyper_error) = source.downcast_ref::() { // Convert the entire error to a string to extract the message let error_str = hyper_error.source().unwrap().to_string(); error_str.contains("certificate") } else { // If we can't downcast to the specific error type, fall back to string matching let source_str = format!("{:?}", source); assert!(source_str.contains("certificate")); return Ok(()); } } else { // If there's no source, return an error return Err("No error source found".into()); }; assert!(is_certificate_error); Ok(()) } #[test] #[cfg_attr(not(feature = "authenticated-tests"), ignore)] async fn app_protocol() -> Result<(), BoxError> { let tun = Session::builder() .authtoken_from_env() .connect() .await? .http_endpoint() .app_protocol("http2") .listen_and_forward("https://ngrok.com".parse()?) .await?; // smoke test let client = reqwest::Client::new(); let resp = client.get(tun.url()).send().await; assert!(resp.is_ok()); Ok(()) } #[test] #[cfg_attr(not(feature = "authenticated-tests"), ignore)] async fn verify_upstream_tls() -> Result<(), BoxError> { let tun = Session::builder() .authtoken_from_env() .connect() .await? .http_endpoint() .verify_upstream_tls(false) .listen_and_forward("https://ngrok.com".parse()?) .await?; // smoke test let client = reqwest::Client::new(); let resp = client.get(tun.url()).send().await; assert!(resp.is_ok()); Ok(()) } #[cfg_attr(not(feature = "online-tests"), ignore)] #[test] async fn session_root_cas() -> Result<(), BoxError> { // host cannot validate cert let resp = Session::builder() .authtoken_from_env() .root_cas("host")? .connect() .await; assert!(resp.is_err()); let err_str = resp.err().unwrap().to_string(); tracing::debug!(?err_str); assert!(err_str.contains("tls")); // tls issue // default of 'trusted' cannot validate the marketing site let resp = Session::builder() .authtoken_from_env() .server_addr("ngrok.com:443")? .connect() .await; assert!(resp.is_err()); let err_str = resp.err().unwrap().to_string(); tracing::debug!(?err_str); assert!(err_str.contains("tls")); // tls issue // "host" certs can validate the marketing site's let's encrypt cert let resp = Session::builder() .authtoken_from_env() .root_cas("host")? .server_addr("ngrok.com:443")? .connect() .await; assert!(resp.is_err()); let err_str = resp.err().unwrap().to_string(); tracing::debug!(?err_str); assert!(!err_str.contains("tls")); // not a tls problem // use the trusted cert, this should connect Session::builder() .authtoken_from_env() .root_cas("trusted")? .connect() .await?; // use the default cert, this should connect Session::builder() .authtoken_from_env() .root_cas("assets/ngrok.ca.crt")? .connect() .await?; Ok(()) } #[cfg_attr(not(feature = "online-tests"), ignore)] #[test] async fn session_ca_cert() -> Result<(), BoxError> { // invalid cert let resp = Session::builder() .authtoken_from_env() .ca_cert(CERT.into()) .connect() .await; assert!(resp.is_err()); let err_str = resp.err().unwrap().to_string(); tracing::debug!(?err_str); assert!(err_str.contains("tls")); // use the default cert, this should connect Session::builder() .authtoken_from_env() .ca_cert(CERT_BYTES.into()) .connect() .await?; Ok(()) } #[cfg_attr(not(feature = "online-tests"), ignore)] #[test] async fn session_tls_config() -> Result<(), BoxError> { let default_tls_config = Session::builder().get_or_create_tls_config(); // invalid cert, but valid tls_config overrides Session::builder() .authtoken_from_env() .ca_cert(CERT.into()) .tls_config(default_tls_config) .connect() .await?; Ok(()) } fn tls_client_config() -> Result, &'static io::Error> { static CONFIG: Lazy, io::Error>> = Lazy::new(|| { let der_certs = rustls_native_certs::load_native_certs()? .into_iter() .collect::>(); let mut root_store = RootCertStore::empty(); root_store.add_parsable_certificates(der_certs); let config = ClientConfig::builder() .with_root_certificates(root_store) .with_no_client_auth(); Ok(Arc::new(config)) }); Ok(CONFIG.as_ref()?.clone()) } #[traced_test] #[test] async fn connect_proxy_http() -> Result<(), BoxError> { let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await?; let addr = listener.local_addr()?; let (tx, mut rx) = mpsc::channel::(1); let shutdown = tokio_util::sync::CancellationToken::new(); let ln_shutdown = shutdown.clone(); tokio::spawn(async move { let res = connect_proxy::run_proxy(listener, ln_shutdown).await; tx.send(res).await.unwrap(); }); let sess = Session::builder() .authtoken_from_env() .proxy_url(format!("http://{addr}").parse().unwrap()) .unwrap() .connect() .await?; tracing::debug!("{}", sess.id()); shutdown.cancel(); // verify we got a request let conns = rx.recv().await; assert_eq!(Some(1), conns); Ok(()) } // connect_proxy contains code for connect_proxy tests // This code is adapted from https://github.com/hyperium/hyper/blob/c449528a33d266a8ca1210baca11e5d649ca6c27/examples/http_proxy.rs#L37 // Used under the terms of the MIT license, Copyright (c) 2014-2025 Sean McArthur mod connect_proxy { use bytes::Bytes; use http_body_util::{ combinators::BoxBody, BodyExt, Empty, Full, }; use hyper::{ client::conn::http1::Builder, http, server::conn::http1, service::service_fn, upgrade::Upgraded, Method, Request, Response, }; use hyper_util::rt::TokioIo; use tokio::net::TcpStream; use tokio_util::sync::CancellationToken; pub async fn run_proxy(listener: tokio::net::TcpListener, shutdown: CancellationToken) -> u64 { // count requests so our caller can test that we received a request let mut req_count = 0; loop { let (stream, _) = match shutdown.run_until_cancelled(listener.accept()).await { None => { return req_count; } Some(r) => r.unwrap(), }; let io = TokioIo::new(stream); req_count += 1; tokio::task::spawn(async move { if let Err(err) = http1::Builder::new() .preserve_header_case(true) .title_case_headers(true) .serve_connection(io, service_fn(proxy)) .with_upgrades() .await { println!("Failed to serve connection: {:?}", err); } }); } } async fn proxy( req: Request, ) -> Result>, hyper::Error> { println!("req: {:?}", req); if Method::CONNECT == req.method() { // Received an HTTP request like: // ``` // CONNECT www.domain.com:443 HTTP/1.1 // Host: www.domain.com:443 // Proxy-Connection: Keep-Alive // ``` // // When HTTP method is CONNECT we should return an empty body // then we can eventually upgrade the connection and talk a new protocol. // // Note: only after client received an empty body with STATUS_OK can the // connection be upgraded, so we can't return a response inside // `on_upgrade` future. if let Some(addr) = host_addr(req.uri()) { tokio::task::spawn(async move { match hyper::upgrade::on(req).await { Ok(upgraded) => { if let Err(e) = tunnel(upgraded, addr).await { eprintln!("server io error: {}", e); }; } Err(e) => eprintln!("upgrade error: {}", e), } }); Ok(Response::new(empty())) } else { eprintln!("CONNECT host is not socket addr: {:?}", req.uri()); let mut resp = Response::new(full("CONNECT must be to a socket address")); *resp.status_mut() = http::StatusCode::BAD_REQUEST; Ok(resp) } } else { let host = req.uri().host().expect("uri has no host"); let port = req.uri().port_u16().unwrap_or(80); let stream = TcpStream::connect((host, port)).await.unwrap(); let io = TokioIo::new(stream); let (mut sender, conn) = Builder::new() .preserve_header_case(true) .title_case_headers(true) .handshake(io) .await?; tokio::task::spawn(async move { if let Err(err) = conn.await { println!("Connection failed: {:?}", err); } }); let resp = sender.send_request(req).await?; Ok(resp.map(|b| b.boxed())) } } fn host_addr(uri: &http::Uri) -> Option { uri.authority().map(|auth| auth.to_string()) } fn empty() -> BoxBody { Empty::::new() .map_err(|never| match never {}) .boxed() } fn full>(chunk: T) -> BoxBody { Full::new(chunk.into()) .map_err(|never| match never {}) .boxed() } // Create a TCP connection to host:port, build a tunnel between the connection and // the upgraded connection async fn tunnel(upgraded: Upgraded, addr: String) -> std::io::Result<()> { // Connect to remote server let mut server = TcpStream::connect(addr).await?; let mut upgraded = TokioIo::new(upgraded); // Proxying data let (from_client, from_server) = tokio::io::copy_bidirectional(&mut upgraded, &mut server).await?; // Print message when done println!( "client wrote {} bytes and received {} bytes", from_client, from_server ); Ok(()) } } #[traced_test] #[cfg_attr(not(feature = "paid-tests"), ignore)] #[test] async fn forward_proxy_protocol_tls() -> Result<(), BoxError> { let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await?; let addr = listener.local_addr()?; let sess = Session::builder().authtoken_from_env().connect().await?; let forwarder = sess .tls_endpoint() .proxy_proto(ProxyProto::V2) .termination(Bytes::default(), Bytes::default()) .listen_and_forward(format!("tls://{}", addr).parse()?) .await?; let tunnel_url: Url = forwarder.url().to_string().parse()?; tokio::spawn(async move { tokio::time::sleep(Duration::from_millis(500)).await; let tunnel_conn = TcpStream::connect(format!( "{}:{}", tunnel_url.host_str().unwrap(), tunnel_url.port().unwrap_or(443) )) .await?; let domain = pki_types::ServerName::try_from(tunnel_url.host_str().unwrap()) .map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))? .to_owned(); let mut tls_conn = futures_rustls::TlsConnector::from( tls_client_config().map_err(|e| io::Error::from(e.kind()))?, ) .connect(domain, tunnel_conn.compat()) .await? .compat(); tls_conn.write_all(b"Hello, world!").await }); let (conn, _) = listener.accept().await?; let mut proxy_conn = crate::proxy_proto::Stream::incoming(conn); let proxy_header = proxy_conn.proxy_header().await?.unwrap().cloned().unwrap(); match proxy_header { ProxyHeader::Version2 { .. } => {} _ => unreachable!("we configured v2"), } // TODO: actually accept the tls connection from the server side Ok(()) } fn unwrap_infallible(result: Result) -> T { match result { Ok(value) => value, Err(err) => match err {}, } } ================================================ FILE: ngrok/src/proxy_proto.rs ================================================ use std::{ io, mem, pin::{ pin, Pin, }, task::{ ready, Context, Poll, }, }; use bytes::{ Buf, BytesMut, }; use proxy_protocol::{ ParseError, ProxyHeader, }; use tokio::io::{ AsyncRead, AsyncWrite, ReadBuf, }; use tracing::instrument; // 536 is the smallest possible TCP segment, which both v1 and v2 are guaranteed // to fit into. const MAX_HEADER_LEN: usize = 536; // v2 headers start with at least 16 bytes const MIN_HEADER_LEN: usize = 16; #[derive(Debug)] enum ReadState { Reading(Option, BytesMut), Error(proxy_protocol::ParseError, BytesMut), Header(Option, BytesMut), None, } impl ReadState { fn new() -> ReadState { ReadState::Reading(None, BytesMut::with_capacity(MAX_HEADER_LEN)) } fn header(&self) -> Result, &ParseError> { match self { ReadState::Error(err, _) | ReadState::Reading(Some(err), _) => Err(err), ReadState::None | ReadState::Reading(None, _) => Ok(None), ReadState::Header(hdr, _) => Ok(hdr.as_ref()), } } /// Read the header from the stream *once*. Once a header has been read, or /// it's been determined that no header is coming, this will be a no-op. #[instrument(level = "trace", skip(reader))] fn poll_read_header_once( &mut self, cx: &mut Context, mut reader: Pin<&mut impl AsyncRead>, ) -> Poll> { loop { let read_state = mem::replace(self, ReadState::None); let (last_err, mut hdr_buf) = match read_state { // End states ReadState::None | ReadState::Header(_, _) | ReadState::Error(_, _) => { *self = read_state; return Poll::Ready(Ok(())); } ReadState::Reading(err, hdr_buf) => (err, hdr_buf), }; if hdr_buf.len() < MAX_HEADER_LEN { let mut tmp_buf = ReadBuf::uninit(hdr_buf.spare_capacity_mut()); let read_res = reader.as_mut().poll_read(cx, &mut tmp_buf); // Regardless of error, make sure we track the read bytes let filled = tmp_buf.filled().len(); if filled > 0 { let len = hdr_buf.len(); // Safety: the tmp_buf is backed by the uninitialized // portion of hdr_buf. Advancing the len to len + filled is // guaranteed to only cover the bytes initialized by the // read. unsafe { hdr_buf.set_len(len + filled) } } match read_res { // If we hit the end of the stream due to either an EOF or // an error, set the state to a terminal one and return the // result. Poll::Ready(ref res) if res.is_err() || filled == 0 => { *self = match last_err { Some(err) => ReadState::Error(err, hdr_buf), None => ReadState::Header(None, hdr_buf), }; return read_res; } // Pending leaves the last error and buffer unchanged. Poll::Pending => { *self = ReadState::Reading(last_err, hdr_buf); return read_res; } _ => {} } } // Create a view into the header buffer so that failed parse // attempts don't consume it. let mut hdr_view = &*hdr_buf; // Don't try to parse unless we have a minimum number of bytes to // avoid spurious "NotProxyHeader" errors. // Also hack around a bug in the proxy_protocol crate that results // in panics when the input ends in \r without the \n. if hdr_view.len() < MIN_HEADER_LEN || matches!(hdr_view.last(), Some(b'\r')) { *self = ReadState::Reading(last_err, hdr_buf); continue; } match proxy_protocol::parse(&mut hdr_view) { Ok(hdr) => { hdr_buf.advance(hdr_buf.len() - hdr_view.len()); *self = ReadState::Header(Some(hdr), hdr_buf); return Poll::Ready(Ok(())); } Err(ParseError::NotProxyHeader) => { *self = ReadState::Header(None, hdr_buf); return Poll::Ready(Ok(())); } // Keep track of the last error - it might not be fatal if we // simply haven't read enough Err(err) => { // If we've read too much, consider the error fatal. if hdr_buf.len() >= MAX_HEADER_LEN { *self = ReadState::Error(err, hdr_buf); } else { *self = ReadState::Reading(Some(err), hdr_buf); } continue; } } } } } #[derive(Debug)] enum WriteState { Writing(BytesMut), None, } impl WriteState { fn new(hdr: proxy_protocol::ProxyHeader) -> Result { proxy_protocol::encode(hdr).map(WriteState::Writing) } /// Write the header *once*. After its written to the stream, this will be a /// no-op. #[instrument(level = "trace", skip(writer))] fn poll_write_header_once( &mut self, cx: &mut Context, mut writer: Pin<&mut impl AsyncWrite>, ) -> Poll> { loop { let state = mem::replace(self, WriteState::None); match state { WriteState::None => return Poll::Ready(Ok(())), WriteState::Writing(mut buf) => { let write_res = writer.as_mut().poll_write(cx, &buf); match write_res { Poll::Pending | Poll::Ready(Err(_)) => { *self = WriteState::Writing(buf); ready!(write_res)?; unreachable!( "ready! will return for us on either Pending or Ready(Err)" ); } Poll::Ready(Ok(written)) => { buf.advance(written); if !buf.is_empty() { *self = WriteState::Writing(buf); continue; } else { return Ok(()).into(); } } } } } } } } #[derive(Debug)] #[pin_project::pin_project] pub struct Stream { read_state: ReadState, write_state: WriteState, #[pin] inner: S, } impl Stream { pub fn outgoing(stream: S, header: ProxyHeader) -> Result { Ok(Stream { inner: stream, write_state: WriteState::new(header)?, read_state: ReadState::None, }) } pub fn incoming(stream: S) -> Self { Stream { inner: stream, read_state: ReadState::new(), write_state: WriteState::None, } } pub fn disabled(stream: S) -> Self { Stream { inner: stream, read_state: ReadState::None, write_state: WriteState::None, } } } impl Stream where S: AsyncRead, { #[instrument(level = "debug", skip(self))] pub async fn proxy_header(&mut self) -> io::Result, &ParseError>> where Self: Unpin, { let mut this = Pin::new(self); futures::future::poll_fn(|cx| { let this = this.as_mut().project(); this.read_state.poll_read_header_once(cx, this.inner) }) .await?; Ok(this.get_mut().read_state.header()) } } impl AsyncRead for Stream where S: AsyncRead, { #[instrument(level = "trace", skip(self), fields(read_state = ?self.read_state))] fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll> { let mut this = self.project(); ready!(this .read_state .poll_read_header_once(cx, this.inner.as_mut()))?; match this.read_state { ReadState::Error(_, remainder) | ReadState::Header(_, remainder) => { if !remainder.is_empty() { let available = std::cmp::min(remainder.len(), buf.remaining()); buf.put_slice(&remainder.split_to(available)); // Make sure Ready is returned regardless of inner's state return Poll::Ready(Ok(())); } } ReadState::None => {} _ => unreachable!(), } this.inner.poll_read(cx, buf) } } impl AsyncWrite for Stream where S: AsyncWrite, { #[instrument(level = "trace", skip(self), fields(write_state = ?self.write_state))] fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { let mut this = self.project(); ready!(this .write_state .poll_write_header_once(cx, this.inner.as_mut()))?; this.inner.poll_write(cx, buf) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.project().inner.poll_flush(cx) } fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.project().inner.poll_shutdown(cx) } } #[cfg(feature = "hyper")] mod hyper { use ::hyper::rt::{ Read as HyperRead, Write as HyperWrite, }; use super::*; impl HyperWrite for Stream where S: AsyncWrite, { #[instrument(level = "trace", skip(self), fields(write_state = ?self.write_state))] fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { ::poll_write(self, cx, buf) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { ::poll_flush(self, cx) } fn poll_shutdown( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { ::poll_shutdown(self, cx) } } impl HyperRead for Stream where S: AsyncRead, { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, mut buf: ::hyper::rt::ReadBufCursor<'_>, ) -> Poll> { let mut tokio_buf = tokio::io::ReadBuf::uninit(unsafe { buf.as_mut() }); let res = ready!(::poll_read(self, cx, &mut tokio_buf)); let filled = tokio_buf.filled().len(); unsafe { buf.advance(filled) }; Poll::Ready(res) } } } #[cfg(test)] mod test { use std::{ cmp, io, pin::Pin, task::{ ready, Context, Poll, }, time::Duration, }; use bytes::{ BufMut, BytesMut, }; use proxy_protocol::{ version2::{ self, ProxyCommand, }, ProxyHeader, }; use tokio::io::{ AsyncRead, AsyncReadExt, AsyncWriteExt, ReadBuf, }; use super::Stream; #[pin_project::pin_project] struct ShortReader { #[pin] inner: S, min: usize, max: usize, } impl AsyncRead for ShortReader where S: AsyncRead, { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll> { let mut this = self.project(); let max_bytes = *this.min + cmp::max(1, rand::random::() % (*this.max - *this.min)); let mut tmp = vec![0; max_bytes]; let mut tmp_buf = ReadBuf::new(&mut tmp); let res = ready!(this.inner.as_mut().poll_read(cx, &mut tmp_buf)); buf.put_slice(tmp_buf.filled()); res?; Poll::Ready(Ok(())) } } impl ShortReader { fn new(inner: S, min: usize, max: usize) -> Self { ShortReader { inner, min, max } } } const INPUT: &str = "PROXY TCP4 192.168.0.1 192.168.0.11 56324 443\r\n"; const PARTIAL_INPUT: &str = "PROXY TCP4 192.168.0.1"; const FINAL_INPUT: &str = " 192.168.0.11 56324 443\r\n"; // Smoke test to ensure that the proxy protocol parser works as expected. // Not actually testing our code. #[test] fn test_proxy_protocol() { let mut buf = BytesMut::from(INPUT); assert!(proxy_protocol::parse(&mut buf).is_ok()); buf = BytesMut::from(PARTIAL_INPUT); assert!(proxy_protocol::parse(&mut &*buf).is_err()); buf.put_slice(FINAL_INPUT.as_bytes()); assert!(proxy_protocol::parse(&mut &*buf).is_ok()); } #[tokio::test] #[tracing_test::traced_test] async fn test_header_stream_v2() { let (left, mut right) = tokio::io::duplex(1024); let header = ProxyHeader::Version2 { command: ProxyCommand::Proxy, transport_protocol: version2::ProxyTransportProtocol::Stream, addresses: version2::ProxyAddresses::Ipv4 { source: "127.0.0.1:1".parse().unwrap(), destination: "127.0.0.2:2".parse().unwrap(), }, }; let input = proxy_protocol::encode(header).unwrap(); let mut proxy_stream = Stream::incoming(ShortReader::new(left, 2, 5)); // Chunk our writes to ensure that our reader is resilient across split inputs. tokio::spawn(async move { tokio::time::sleep(Duration::from_millis(50)).await; right.write_all(&input).await.expect("write header"); right .write_all(b"Hello, world!") .await .expect("write hello"); right.shutdown().await.expect("shutdown"); }); let hdr = proxy_stream .proxy_header() .await .expect("read header") .expect("decode header") .expect("header exists"); assert!(matches!(hdr, ProxyHeader::Version2 { .. })); let mut buf = String::new(); proxy_stream .read_to_string(&mut buf) .await .expect("read rest"); assert_eq!(buf, "Hello, world!"); // Get the header again - should be the same. let hdr = proxy_stream .proxy_header() .await .expect("read header") .expect("decode header") .expect("header exists"); assert!(matches!(hdr, ProxyHeader::Version2 { .. })); } #[tokio::test] #[tracing_test::traced_test] async fn test_header_stream() { let (left, mut right) = tokio::io::duplex(1024); let mut proxy_stream = Stream::incoming(ShortReader::new(left, 2, 5)); // Chunk our writes to ensure that our reader is resilient across split inputs. tokio::spawn(async move { tokio::time::sleep(Duration::from_millis(50)).await; right .write_all(INPUT.as_bytes()) .await .expect("write header"); right .write_all(b"Hello, world!") .await .expect("write hello"); right.shutdown().await.expect("shutdown"); }); let hdr = proxy_stream .proxy_header() .await .expect("read header") .expect("decode header") .expect("header exists"); assert!(matches!(hdr, ProxyHeader::Version1 { .. })); let mut buf = String::new(); proxy_stream .read_to_string(&mut buf) .await .expect("read rest"); assert_eq!(buf, "Hello, world!"); // Get the header again - should be the same. let hdr = proxy_stream .proxy_header() .await .expect("read header") .expect("decode header") .expect("header exists"); assert!(matches!(hdr, ProxyHeader::Version1 { .. })); } #[tokio::test] #[tracing_test::traced_test] async fn test_noheader() { let (left, mut right) = tokio::io::duplex(1024); let mut proxy_stream = Stream::incoming(left); right .write_all(b"Hello, world!") .await .expect("write stream"); right.shutdown().await.expect("shutdown"); drop(right); assert!(proxy_stream .proxy_header() .await .unwrap() .unwrap() .is_none()); let mut buf = String::new(); proxy_stream .read_to_string(&mut buf) .await .expect("read stream"); assert_eq!(buf, "Hello, world!"); } } ================================================ FILE: ngrok/src/session.rs ================================================ use std::{ collections::{ HashMap, VecDeque, }, env, io, sync::{ atomic::{ AtomicBool, Ordering, }, Arc, }, time::Duration, }; use arc_swap::ArcSwap; use async_trait::async_trait; use bytes::Bytes; use futures::{ prelude::*, FutureExt, }; use futures_rustls::rustls::{ self, pki_types, RootCertStore, }; use hyper_http_proxy::{ Intercept, Proxy, ProxyConnector, }; use hyper_util::client::legacy::connect::HttpConnector; use muxado::heartbeat::HeartbeatConfig; pub use muxado::heartbeat::HeartbeatHandler; use once_cell::sync::{ Lazy, OnceCell, }; use regex::Regex; use rustls_pemfile::Item; use thiserror::Error; use tokio::{ io::{ AsyncRead, AsyncWrite, }, runtime::Handle, sync::{ mpsc::{ channel, Sender, }, Mutex, RwLock, }, }; use tokio_retry::{ strategy::ExponentialBackoff, RetryIf, }; use tokio_util::compat::{ FuturesAsyncReadCompatExt, TokioAsyncReadCompatExt, }; use tower_service::Service; use tracing::{ debug, warn, }; use url::Url; pub use crate::internals::{ proto::{ CommandResp, Restart, Stop, StopTunnel, Update, }, raw_session::{ CommandHandler, RpcError, }, }; use crate::{ config::{ HttpTunnelBuilder, LabeledTunnelBuilder, ProxyProto, TcpTunnelBuilder, TlsTunnelBuilder, TunnelConfig, }, conn::ConnInner, internals::{ proto::{ AuthExtra, BindExtra, BindOpts, Error, HttpEndpoint, SecretString, TcpEndpoint, TlsEndpoint, }, raw_session::{ AcceptError as RawAcceptError, CommandHandlers, IncomingStreams, RawSession, RpcClient, StartSessionError, NOT_IMPLEMENTED, }, }, tunnel::{ AcceptError, TunnelInner, TunnelInnerInfo, }, }; pub(crate) const CERT_BYTES: &[u8] = include_bytes!("../assets/ngrok.ca.crt"); const CLIENT_TYPE: &str = "ngrok-rust"; const VERSION: &str = env!("CARGO_PKG_VERSION"); #[derive(Clone)] struct BoundTunnel { proto: String, opts: Option, extra: BindExtra, labels: HashMap, forwards_to: String, forwards_proto: String, verify_upstream_tls: bool, tx: Sender>, } type TunnelConns = HashMap; /// An ngrok session. /// /// Encapsulates an established session with the ngrok service. Sessions recover /// from network failures by automatically reconnecting. #[derive(Clone)] pub struct Session { // Note: this is implicitly used to detect when the session (and its // tunnels) have been dropped in order to shut down the accept loop. _dropref: awaitdrop::Ref, inner: Arc>, } struct SessionInner { runtime: Handle, client: Mutex, closed: AtomicBool, tunnels: RwLock, builder: SessionBuilder, } /// A trait alias for types that can provide the base ngrok transport, i.e. /// bidirectional byte streams. /// /// It is blanket-implemented for all types that satisfy its bounds. Most /// commonly, it will be a tls-wrapped tcp stream. pub trait IoStream: AsyncRead + AsyncWrite + Unpin + Send + 'static {} impl IoStream for T where T: AsyncRead + AsyncWrite + Unpin + Send + 'static {} /// Trait for establishing the connection to the ngrok server. #[async_trait] pub trait Connector: Sync + Send + 'static { /// The function used to establish the connection to the ngrok server. /// /// This is effectively `async |addr, tls_config, err| -> Result`. /// /// If it is being called due to a disconnect, the [AcceptError] argument will /// be populated. /// /// If it returns `Err(ConnectError::Canceled)`, reconnecting will be canceled /// and the session will be terminated. Note that this error will never be /// returned from the [default_connect] function. async fn connect( &self, host: String, port: u16, tls_config: Arc, err: Option, ) -> Result, ConnectError>; } #[async_trait] impl Connector for F where F: Fn(String, u16, Arc, Option) -> U + Send + Sync + 'static, U: Future, ConnectError>> + Send, { async fn connect( &self, host: String, port: u16, tls_config: Arc, err: Option, ) -> Result, ConnectError> { self(host, port, tls_config, err).await } } /// The default ngrok connector. /// /// Establishes a TCP connection to `addr`, and then performs a TLS handshake /// using the `tls_config`. /// /// Discards any errors during reconnect, allowing attempts to recur /// indefinitely. pub async fn default_connect( host: String, port: u16, tls_config: Arc, _: Option, ) -> Result, ConnectError> { let stream = tokio::net::TcpStream::connect(&(host.as_str(), port)) .await .map_err(ConnectError::Tcp)? .compat(); let domain = pki_types::ServerName::try_from(host) .expect("host should have been validated by SessionBuilder::server_addr"); let tls_conn = futures_rustls::TlsConnector::from(tls_config) .connect(domain, stream) .await .map_err(ConnectError::Tls)?; Ok(Box::new(tls_conn.compat()) as Box) } #[derive(Debug, Clone, Error)] #[error("unsupported proxy address: {0}")] /// An unsupported proxy address was provided. pub struct ProxyUnsupportedError(Url); fn connect_proxy(url: Url) -> Result, ProxyUnsupportedError> { Ok(match url.scheme() { "http" | "https" => Arc::new(connect_http_proxy(url)), "socks5" => { let host = url.host_str().unwrap_or_default(); let port = url.port().unwrap_or(1080); Arc::new(connect_socks_proxy(format!("{host}:{port}"))) } _ => return Err(ProxyUnsupportedError(url)), }) } fn connect_http_proxy(url: Url) -> impl Connector { move |host: String, port, tls_config, _| { let mut proxy = Proxy::new( Intercept::All, url.as_str().try_into().expect("urls should be valid uris"), ); proxy.force_connect(); let mut connector = HttpConnector::new(); connector.enforce_http(false); async move { let mut connector = ProxyConnector::from_proxy(connector, proxy) .map_err(|e| ConnectError::ProxyConnect(Box::new(e)))?; let server_uri = format!("http://{host}:{port}") .parse() .expect("host should have been validated by SessionBuilder::server_addr"); let conn = connector .call(server_uri) .await .map_err(|e| ConnectError::ProxyConnect(Box::new(e)))?; let tls_conn = futures_rustls::TlsConnector::from(tls_config) .connect( pki_types::ServerName::try_from(host) .expect("host should have been validated by SessionBuilder::server_addr"), hyper_util::rt::TokioIo::new(conn).compat(), ) .await .map_err(ConnectError::Tls)?; Ok(Box::new(tls_conn.compat()) as Box) } } } fn connect_socks_proxy(proxy_addr: String) -> impl Connector { move |server_host: String, server_port, tls_config, _| { let proxy_addr = proxy_addr.clone(); async move { let conn = tokio_socks::tcp::Socks5Stream::connect( proxy_addr.as_str(), format!("{server_host}:{server_port}"), ) .await .map_err(|e| ConnectError::ProxyConnect(Box::new(e)))? .compat(); let tls_conn = futures_rustls::TlsConnector::from(tls_config) .connect( pki_types::ServerName::try_from(server_host) .expect("host should have been validated by SessionBuilder::server_addr"), conn, ) .await .map_err(ConnectError::Tls)?; Ok(Box::new(tls_conn.compat()) as Box) } } } /// The builder for an ngrok [Session]. #[derive(Clone)] pub struct SessionBuilder { // Consuming libraries and applications can add a client type and version on // top of the "base" type and version declared by this library. versions: VecDeque<(String, String, Option)>, authtoken: Option, metadata: Option, heartbeat_interval: Option, heartbeat_tolerance: Option, heartbeat_handler: Option>, server_host: String, server_port: u16, ca_cert: Option, tls_config: Option, connector: Arc, handlers: CommandHandlers, cookie: Option, id: Option, } /// Errors arising at [SessionBuilder::connect] time. #[derive(Error, Debug)] #[non_exhaustive] pub enum ConnectError { /// An error occurred when establishing a TCP connection to the ngrok /// server. #[error("failed to establish tcp connection")] Tcp(#[source] io::Error), /// A TLS handshake error occurred. /// /// This is usually a certificate validation issue, or an attempt to connect /// to something that doesn't actually speak TLS. #[error("tls handshake error")] Tls(#[source] io::Error), /// An error occurred when starting the ngrok session. /// /// This might occur when there's a protocol mismatch interfering with the /// heartbeat routine. #[error("failed to start ngrok session")] Start(#[source] StartSessionError), /// An error occurred when attempting to authenticate. #[error("authentication failure")] Auth(#[source] RpcError), /// An error occurred when rebinding tunnels during a reconnect #[error("error rebinding tunnel after reconnect")] Rebind(#[source] RpcError), /// An error arising from a failure to connect through a proxy. #[error("failed to connect through proxy")] ProxyConnect(#[source] Box), /// The (re)connect function gave up. /// /// This will never be returned by the default connect function, and is /// instead used to cancel the reconnect loop. #[error("the connect function gave up")] Canceled, } impl Error for ConnectError { fn error_code(&self) -> Option<&str> { match self { ConnectError::Auth(resp) | ConnectError::Rebind(resp) => resp.error_code(), _ => None, } } fn msg(&self) -> String { match self { ConnectError::Auth(resp) | ConnectError::Rebind(resp) => resp.msg(), _ => format!("{self}"), } } } /// The builder specified an invalid heartbeat interval. /// /// This is most likely caused a [Duration] that's outside of the [i64::MAX] /// nanosecond range. #[derive(Copy, Clone, Debug, Error)] #[error("invalid heartbeat interval: {0}")] pub struct InvalidHeartbeatInterval(u128); /// The builder specified an invalid heartbeat tolerance. /// /// This is most likely caused a [Duration] that's outside of the [i64::MAX] /// nanosecond range. #[derive(Copy, Clone, Debug, Error)] #[error("invalid heartbeat tolerance: {0}")] pub struct InvalidHeartbeatTolerance(u128); /// The builder provided an invalid server address #[derive(Error, Debug, Clone)] #[error("invalid server address: {0}")] pub struct InvalidServerAddr(String); impl Default for SessionBuilder { fn default() -> Self { SessionBuilder { versions: [(CLIENT_TYPE.to_string(), VERSION.to_string(), None)] .into_iter() .collect(), authtoken: None, metadata: None, heartbeat_interval: None, heartbeat_tolerance: None, heartbeat_handler: None, server_host: "connect.ngrok-agent.com".into(), server_port: 443, ca_cert: None, tls_config: None, connector: Arc::new(default_connect), handlers: Default::default(), cookie: None, id: None, } } } fn sanitize_ua_string(s: impl AsRef) -> String { static UA_BANNED: OnceCell = OnceCell::new(); UA_BANNED .get_or_init(|| Regex::new("[^/!#$%&'*+-.^_`|~0-9a-zA-Z]").unwrap()) .replace_all(s.as_ref(), "#") .replace('/', "-") } impl SessionBuilder { /// Configures the session to authenticate with the provided authtoken. You /// can [find your existing authtoken] or [create a new one] in the ngrok /// dashboard. /// /// See the [authtoken parameter in the ngrok docs] for additional details. /// /// [find your existing authtoken]: https://dashboard.ngrok.com/get-started/your-authtoken /// [create a new one]: https://dashboard.ngrok.com/tunnels/authtokens /// [authtoken parameter in the ngrok docs]: https://ngrok.com/docs/ngrok-agent/config#authtoken pub fn authtoken(&mut self, authtoken: impl Into) -> &mut Self { self.authtoken = Some(authtoken.into().into()); self } /// Shortcut for calling [SessionBuilder::authtoken] with the value of the /// NGROK_AUTHTOKEN environment variable. pub fn authtoken_from_env(&mut self) -> &mut Self { self.authtoken = env::var("NGROK_AUTHTOKEN").ok().map(From::from); self } /// Configures how often the session will send heartbeat messages to the ngrok /// service to check session liveness. /// /// See the [heartbeat_interval parameter in the ngrok docs] for additional /// details. /// /// [heartbeat_interval parameter in the ngrok docs]: https://ngrok.com/docs/ngrok-agent/config#heartbeat_interval pub fn heartbeat_interval( &mut self, heartbeat_interval: Duration, ) -> Result<&mut Self, InvalidHeartbeatInterval> { let nanos = heartbeat_interval.as_nanos(); let nanos = i64::try_from(nanos).map_err(|_| InvalidHeartbeatInterval(nanos))?; self.heartbeat_interval = Some(nanos); Ok(self) } /// Configures the duration to wait for a response to a heartbeat before /// assuming the session connection is dead and attempting to reconnect. /// /// See the [heartbeat_tolerance parameter in the ngrok docs] for additional /// details. /// /// [heartbeat_tolerance parameter in the ngrok docs]: https://ngrok.com/docs/ngrok-agent/config#heartbeat_tolerance pub fn heartbeat_tolerance( &mut self, heartbeat_tolerance: Duration, ) -> Result<&mut Self, InvalidHeartbeatTolerance> { let nanos = heartbeat_tolerance.as_nanos(); let nanos = i64::try_from(nanos).map_err(|_| InvalidHeartbeatTolerance(nanos))?; self.heartbeat_tolerance = Some(nanos); Ok(self) } /// Configures the opaque, machine-readable metadata string for this session. /// Metadata is made available to you in the ngrok dashboard and the Agents API /// resource. It is a useful way to allow you to uniquely identify sessions. We /// suggest encoding the value in a structured format like JSON. /// /// See the [metdata parameter in the ngrok docs] for additional details. /// /// [metdata parameter in the ngrok docs]: https://ngrok.com/docs/ngrok-agent/config#metadata pub fn metadata(&mut self, metadata: impl Into) -> &mut Self { self.metadata = Some(metadata.into()); self } /// Configures the network address to dial to connect to the ngrok service. /// Use this option only if you are connecting to a custom agent ingress. /// /// See the [server_addr parameter in the ngrok docs] for additional details. /// /// [server_addr parameter in the ngrok docs]: https://ngrok.com/docs/ngrok-agent/config#server_addr pub fn server_addr(&mut self, addr: impl Into) -> Result<&mut Self, InvalidServerAddr> { let addr = addr.into(); let server_uri: Url = format!("http://{addr}") .parse() .map_err(|_| InvalidServerAddr(addr.clone()))?; self.server_host = server_uri .host_str() .map(String::from) .ok_or_else(|| InvalidServerAddr(addr.clone()))?; pki_types::ServerName::try_from(self.server_host.as_str()) .map_err(|_| InvalidServerAddr(addr.clone()))?; self.server_port = server_uri.port().unwrap_or(443); Ok(self) } /// Sets the file path to a default certificate in PEM format to validate ngrok Session TLS connections. /// Setting to "trusted" is the default, using the ngrok CA certificate. /// Setting to "host" will verify using the certificates on the host operating system. /// A client config set via tls_config after calling root_cas will override this value. /// /// Corresponds to the [root_cas parameter in the ngrok docs] /// /// [root_cas parameter in the ngrok docs]: https://ngrok.com/docs/ngrok-agent/config#root_cas pub fn root_cas(&mut self, root_cas: impl Into) -> Result<&mut Self, io::Error> { match root_cas.into().clone().as_str() { "trusted" => self.ca_cert = None, "host" => self.tls_config = Some(host_certs_tls_config().map_err(|e| e.kind())?), v => { std::fs::read(v).map(|root_cas| self.ca_cert = Some(Bytes::from(root_cas)))?; } } Ok(self) } /// Sets the default certificate in PEM format to validate ngrok Session TLS connections. /// A client config set via tls_config will override this value. /// /// Roughly corresponds to the "path to a certificate PEM file" option in the /// [root_cas parameter in the ngrok docs] /// /// [root_cas parameter in the ngrok docs]: https://ngrok.com/docs/ngrok-agent/config#root_cas pub fn ca_cert(&mut self, ca_cert: Bytes) -> &mut Self { self.ca_cert = Some(ca_cert); self } /// Configures the TLS client used to connect to the ngrok service while /// establishing the session. Use this option only if you are connecting through /// a man-in-the-middle or deep packet inspection proxy. Passed to the /// connect callback set with `SessionBuilder::connect`. /// /// Roughly corresponds to the [root_cas parameter in the ngrok docs], but allows /// for deeper TLS configuration. /// /// [root_cas parameter in the ngrok docs]: https://ngrok.com/docs/ngrok-agent/config#root_cas pub fn tls_config(&mut self, config: rustls::ClientConfig) -> &mut Self { self.tls_config = Some(config); self } /// Configures a function which is called to establish the connection to the /// ngrok service. Use this option if you need to connect through an outbound /// proxy. In the event of network disruptions, it will be called each time /// the session reconnects. pub fn connector(&mut self, connect: impl Connector) -> &mut Self { self.connector = Arc::new(connect); self } /// Configures the session to connect to ngrok through an outbound /// HTTP or SOCKS5 proxy. This parameter is ignored if you override the connector /// with [SessionBuilder::connector]. /// /// See the [proxy url parameter in the ngrok docs] for additional details. /// /// [proxy url parameter in the ngrok docs]: https://ngrok.com/docs/ngrok-agent/config#proxy_url pub fn proxy_url(&mut self, url: Url) -> Result<&mut Self, ProxyUnsupportedError> { self.connector = connect_proxy(url)?; Ok(self) } /// Configures a function which is called when the ngrok service requests that /// this [Session] stops. Your application may choose to interpret this callback /// as a request to terminate the [Session] or the entire process. /// /// Errors returned by this function will be visible to the ngrok dashboard or /// API as the response to the Stop operation. /// /// Do not block inside this callback. It will cause the Dashboard or API /// stop operation to time out. Do not call [std::process::exit] inside this /// callback, it will also cause the operation to time out. pub fn handle_stop_command(&mut self, handler: impl CommandHandler) -> &mut Self { self.handlers.on_stop = Some(Arc::new(handler)); self } /// Configures a function which is called when the ngrok service requests /// that this [Session] updates. Your application may choose to interpret /// this callback as a request to restart the [Session] or the entire /// process. /// /// Errors returned by this function will be visible to the ngrok dashboard or /// API as the response to the Restart operation. /// /// Do not block inside this callback. It will cause the Dashboard or API /// stop operation to time out. Do not call [std::process::exit] inside this /// callback, it will also cause the operation to time out. pub fn handle_restart_command(&mut self, handler: impl CommandHandler) -> &mut Self { self.handlers.on_restart = Some(Arc::new(handler)); self } /// Configures a function which is called when the ngrok service requests /// that this [Session] updates. Your application may choose to interpret /// this callback as a request to update its configuration, itself, or to /// invoke some other application-specific behavior. /// /// Errors returned by this function will be visible to the ngrok dashboard or /// API as the response to the Restart operation. /// /// Do not block inside this callback. It will cause the Dashboard or API /// stop operation to time out. Do not call [std::process::exit] inside this /// callback, it will also cause the operation to time out. pub fn handle_update_command(&mut self, handler: impl CommandHandler) -> &mut Self { self.handlers.on_update = Some(Arc::new(handler)); self } /// Call the provided handler whenever a heartbeat response is received. /// /// If the handler returns an error, the heartbeat task will exit, resulting /// in the session eventually dying as well. pub fn handle_heartbeat(&mut self, callback: impl HeartbeatHandler) -> &mut Self { self.heartbeat_handler = Some(Arc::new(callback)); self } /// Add client type and version information for a client application. /// /// This is a way for applications and library consumers of this crate /// identify themselves. /// /// This will add a new entry to the `User-Agent` field in the "most significant" /// (first) position. Comments must follow [RFC 7230] or a connection error may occur. /// /// [RFC 7230]: https://datatracker.ietf.org/doc/html/rfc7230#section-3.2.6 pub fn client_info( &mut self, client_type: impl Into, version: impl Into, comments: Option>, ) -> &mut Self { self.versions.push_front(( client_type.into(), version.into(), comments.map(|c| c.into()), )); self } /// Begins a new ngrok [Session] by connecting to the ngrok service. /// `connect` blocks until the session is successfully established or fails with /// an error. pub async fn connect(&self) -> Result { let (dropref, dropped) = awaitdrop::awaitdrop(); let (inner, mut incoming) = self.connect_inner(None).await?; let rt = inner.runtime.clone(); let inner = Arc::new(ArcSwap::new(inner.into())); let session = Session { _dropref: dropref, inner: inner.clone(), }; // store the session for use with StopTunnel incoming.session = Some(session.clone()); rt.spawn(future::select( accept_incoming(incoming, inner).boxed(), dropped.wait(), )); Ok(session) } pub(crate) fn get_or_create_tls_config(&self) -> rustls::ClientConfig { // if the user has provided a custom TLS config, use that if let Some(tls_config) = &self.tls_config { return tls_config.clone(); } // generate a default TLS config let mut root_store = rustls::RootCertStore::empty(); let cert_pem = self.ca_cert.as_ref().map_or(CERT_BYTES, |it| it.as_ref()); let certs = rustls_pemfile::read_all(&mut io::Cursor::new(cert_pem)) .filter_map(|it| match it { Ok(Item::X509Certificate(bs)) => Some(bs), Err(e) => { warn!(error = ?e, "skipping certificate which failed to parse"); None } Ok(_) => { warn!("skipping non-x509 certificate"); None } }) .collect::>(); root_store.add_parsable_certificates(certs); rustls::ClientConfig::builder() .with_root_certificates(root_store) .with_no_client_auth() } async fn connect_inner( &self, err: impl Into>, ) -> Result<(SessionInner, IncomingStreams), ConnectError> { let conn = self .connector .connect( self.server_host.clone(), self.server_port, Arc::new(self.get_or_create_tls_config()), err.into(), ) .await?; let mut heartbeat_config = HeartbeatConfig::default(); if let Some(interval) = self.heartbeat_interval { heartbeat_config.interval = Duration::from_nanos(interval as u64); } if let Some(tolerance) = self.heartbeat_tolerance { heartbeat_config.tolerance = Duration::from_nanos(tolerance as u64); } heartbeat_config.handler = self.heartbeat_handler.clone(); // convert these while we have ownership let heartbeat_interval = heartbeat_config.interval.as_nanos() as i64; let heartbeat_tolerance = heartbeat_config.tolerance.as_nanos() as i64; let mut raw = RawSession::start(conn, heartbeat_config, self.handlers.clone()) .await .map_err(ConnectError::Start)?; // list of possibilities: https://doc.rust-lang.org/std/env/consts/constant.OS.html let os = match env::consts::OS { "macos" => "darwin", _ => env::consts::OS, }; let user_agent = self .versions .iter() .map(|(name, version, comments)| { format!( "{}/{}{}", sanitize_ua_string(name), sanitize_ua_string(version), comments .as_ref() .map_or(String::new(), |f| format!(" ({f})")) ) }) .collect::>() .join(" "); let client_type = self.versions[0].0.clone(); let version = self.versions[0].1.clone(); let resp = raw .auth( self.id.as_deref().unwrap_or_default(), AuthExtra { version, client_type, user_agent, auth_token: self.authtoken.clone().unwrap_or_default(), metadata: self.metadata.clone().unwrap_or_default(), os: os.into(), arch: std::env::consts::ARCH.into(), heartbeat_interval, heartbeat_tolerance, restart_unsupported_error: self .handlers .on_restart .is_none() .then_some(NOT_IMPLEMENTED.into()) .or(Some("".into())), stop_unsupported_error: self .handlers .on_stop .is_none() .then_some(NOT_IMPLEMENTED.into()) .or(Some("".into())), update_unsupported_error: self .handlers .on_update .is_none() .then_some(NOT_IMPLEMENTED.into()) .or(Some("".into())), cookie: self.cookie.clone().unwrap_or_default(), ..Default::default() }, ) .await .map_err(ConnectError::Auth)?; let (client, incoming) = raw.split(); let builder = SessionBuilder { cookie: resp.extra.cookie, id: resp.client_id.into(), ..self.clone() }; Ok(( SessionInner { runtime: Handle::current(), client: client.into(), tunnels: Default::default(), closed: Default::default(), builder, }, incoming, )) } } impl Session { /// Create a new [SessionBuilder] to configure a new ngrok session. pub fn builder() -> SessionBuilder { SessionBuilder::default() } /// Start building a tunnel for an HTTP endpoint. /// /// https://ngrok.com/docs/http/ pub fn http_endpoint(&self) -> HttpTunnelBuilder { self.clone().into() } /// Start building a tunnel for a TCP endpoint. /// /// https://ngrok.com/docs/tcp/ pub fn tcp_endpoint(&self) -> TcpTunnelBuilder { self.clone().into() } /// Start building a tunnel for a TLS endpoint. /// /// https://ngrok.com/docs/tls/ pub fn tls_endpoint(&self) -> TlsTunnelBuilder { self.clone().into() } /// Start building a labeled tunnel. /// /// https://ngrok.com/docs/network-edge/edges/#tunnel-group pub fn labeled_tunnel(&self) -> LabeledTunnelBuilder { self.clone().into() } /// Get the unique ID of this session. pub fn id(&self) -> String { self.inner .load() .builder .id .as_ref() .expect("Session ID not set") .clone() } /// Start a new tunnel in this session. pub(crate) async fn start_tunnel(&self, tunnel_cfg: C) -> Result where C: TunnelConfig, { let inner = self.inner.load(); let mut client = inner.client.lock().await; // let tunnelCfg: dyn TunnelConfig = TunnelConfig(opts); let (tx, rx) = channel(64); let proto = tunnel_cfg.proto(); let opts = tunnel_cfg.opts(); let mut extra = tunnel_cfg.extra(); let labels = tunnel_cfg.labels(); let forwards_to = tunnel_cfg.forwards_to(); let forwards_proto = tunnel_cfg.forwards_proto(); let verify_upstream_tls = tunnel_cfg.verify_upstream_tls(); // non-labeled tunnel let (tunnel, bound) = if tunnel_cfg.proto() != "" { let resp = client .listen( &proto, opts.clone().unwrap(), // this is crate-defined, and must exist if proto is non-empty extra.clone(), "", &forwards_to, &forwards_proto, ) .await?; extra.token = resp.extra.token; let info = TunnelInnerInfo { id: resp.client_id, proto: resp.proto.clone(), url: resp.url, labels: HashMap::new(), forwards_to: tunnel_cfg.forwards_to(), metadata: extra.metadata.clone(), }; ( TunnelInner { info, session: self.clone(), incoming: rx.into(), }, BoundTunnel { proto: resp.proto, opts: resp.bind_opts.into(), extra, labels, forwards_to, forwards_proto, verify_upstream_tls, tx, }, ) } else { // labeled tunnel let resp = client .listen_label( labels.clone(), &extra.metadata, &forwards_to, &forwards_proto, ) .await?; let info = TunnelInnerInfo { id: resp.id, proto: Default::default(), url: Default::default(), labels: tunnel_cfg.labels(), forwards_to: tunnel_cfg.forwards_to(), metadata: extra.metadata.clone(), }; ( TunnelInner { info, session: self.clone(), incoming: rx.into(), }, BoundTunnel { extra, proto: Default::default(), opts: Default::default(), forwards_to, forwards_proto, verify_upstream_tls, labels, tx, }, ) }; let mut tunnels = inner.tunnels.write().await; tunnels.insert(tunnel.info.id.clone(), bound); Ok(tunnel) } /// Close a tunnel with an error from the remote. /// Skips the call to unlisten, since the remote has already rejected it. pub(crate) async fn close_tunnel_with_error(&self, id: impl AsRef, err: AcceptError) { let id = id.as_ref(); let inner = self.inner.load(); if let Some(tun) = inner.tunnels.write().await.remove(id) { let _ = tun.tx.send(Err(err)).await; }; } /// Close a tunnel with the given ID. pub async fn close_tunnel(&self, id: impl AsRef) -> Result<(), RpcError> { let id = id.as_ref(); let inner = self.inner.load(); inner.client.lock().await.unlisten(id).await?; inner.tunnels.write().await.remove(id); Ok(()) } pub(crate) fn runtime(&self) -> Handle { self.inner.load().runtime.clone() } /// Close the ngrok session. pub async fn close(&mut self) -> Result<(), RpcError> { let inner = self.inner.load(); let res = inner.client.lock().await.close().await; inner.closed.store(true, Ordering::SeqCst); res } } pub(crate) fn host_certs_tls_config() -> Result { // The root certificate store, lazily loaded once. static ROOT_STORE: Lazy> = Lazy::new(|| { let der_certs = rustls_native_certs::load_native_certs()? .into_iter() .collect::>(); let mut root_store = RootCertStore::empty(); root_store.add_parsable_certificates(der_certs); Ok(root_store) }); let root_store = ROOT_STORE.as_ref()?; Ok(rustls::ClientConfig::builder() .with_root_certificates(root_store.clone()) .with_no_client_auth()) } async fn accept_one( incoming: &mut IncomingStreams, inner: &ArcSwap, ) -> Result<(), AcceptError> { let conn = match incoming.accept().await { Ok(conn) => conn, // Assume if we got a muxado error, the session is borked. Break and // propagate the error to all of the tunnels out in the wild. Err(RawAcceptError::Transport(error)) => return Err(error.into()), // The other errors are either a bad header or an unrecognized // stream type. They're non-fatal, but could signal a protocol // mismatch. Err(error) => { warn!(?error, "protocol error when accepting tunnel connection"); return Ok(()); } }; let id = conn.header.id.clone(); let remote_addr = conn.header.client_addr.parse().unwrap_or_else(|error| { warn!( client_addr = conn.header.client_addr, %error, "invalid remote addr for tunnel connection", ); "0.0.0.0:0".parse().unwrap() }); let inner = inner.load(); let guard = inner.tunnels.read().await; let res = if let Some(tun) = guard.get(&id) { let mut header = conn.header; let app_protocol = Some(tun.forwards_proto.to_string()).filter(|s| !s.is_empty()); let verify_upstream_tls = tun.verify_upstream_tls; // Note: this is a bit of a hack. Normally, passthrough_tls is only // a thing on edge connections, but we're making sure it's set for // endpoint connections as well. In their case, we have to look at the // options used to bind the endpoint. if let Some(BindOpts::Tls(opts)) = &tun.opts { header.passthrough_tls = opts.tls_termination.is_none(); } let proxy_proto = if let Some( BindOpts::Tls(TlsEndpoint { proxy_proto, .. }) | BindOpts::Http(HttpEndpoint { proxy_proto, .. }) | BindOpts::Tcp(TcpEndpoint { proxy_proto, .. }), ) = tun.opts { proxy_proto } else { ProxyProto::None }; tun.tx .send(Ok(ConnInner { info: crate::conn::Info { app_protocol, verify_upstream_tls, remote_addr, header, proxy_proto, }, stream: conn.stream, })) .await } else { Ok(()) }; drop(guard); if res.is_err() { RwLock::write(&inner.tunnels).await.remove(&id); } Ok(()) } async fn try_reconnect( inner: Arc>, err: impl Into>, ) -> Result { let old_inner = inner.load(); if old_inner.closed.load(Ordering::SeqCst) { return Err(ConnectError::Canceled); } let (new_inner, new_incoming) = old_inner.builder.connect_inner(err).await?; let mut client = new_inner.client.lock().await; let mut new_tunnels = new_inner.tunnels.write().await; let old_tunnels = old_inner.tunnels.read().await; for (id, tun) in old_tunnels.iter() { if !tun.proto.is_empty() { let resp = client .listen( &tun.proto, tun.opts.clone().unwrap(), tun.extra.clone(), id, &tun.forwards_to, &tun.forwards_proto, ) .await .map_err(ConnectError::Rebind)?; debug!(?resp, %id, %tun.proto, ?tun.opts, ?tun.extra, %tun.forwards_to, "rebound tunnel"); new_tunnels.insert(id.clone(), tun.clone()); } else { let resp = client .listen_label( tun.labels.clone(), &tun.extra.metadata, &tun.forwards_to, &tun.forwards_proto, ) .await .map_err(ConnectError::Rebind)?; if !resp.id.is_empty() { new_tunnels.insert(resp.id, tun.clone()); } else { new_tunnels.insert(id.clone(), tun.clone()); } } } drop(old_tunnels); drop(client); drop(new_tunnels); inner.store(new_inner.into()); Ok(new_incoming) } async fn accept_incoming(mut incoming: IncomingStreams, inner: Arc>) { let error: AcceptError = loop { if let Err(error) = accept_one(&mut incoming, &inner).await { debug!(%error, "failed to accept stream, attempting reconnect"); // This is gross, but should perform fine. Couple of notes: // * Mutex so that both the action and condition can share access to // `error`. Realistically, the lock calls should be non-concurrent, // but Rust can't prove that. // * Not setting the error in the action because then a a reference // to a FnMut closure would escape via the returned Future, which is // a no-no. let error = parking_lot::Mutex::new(Some(error)); let reconnect = RetryIf::spawn( ExponentialBackoff::from_millis(50), || try_reconnect(inner.clone(), error.lock().clone()).map_err(Arc::new), |err: &Arc| { if let ConnectError::Canceled = **err { false } else { *error.lock() = Some(AcceptError::Reconnect(err.clone())); true } }, ); incoming = match reconnect.await { Ok(incoming) => incoming, Err(error) => { debug!(%error, "reconnect failed, giving up"); break AcceptError::Reconnect(error); } }; } }; for (_id, tun) in inner.load().tunnels.write().await.drain() { let _ = tun.tx.send(Err(error.clone())).await; } } #[cfg(test)] mod test { use super::*; #[test] fn test_sanitize_ua() { assert_eq!( sanitize_ua_string("library/official/rust"), "library-official-rust" ); assert_eq!( sanitize_ua_string("something@really☺weird"), "something#really#weird" ); } } ================================================ FILE: ngrok/src/tunnel.rs ================================================ use std::{ collections::HashMap, pin::Pin, sync::Arc, task::{ Context, Poll, }, }; use async_trait::async_trait; use futures::Stream; use muxado::Error as MuxadoError; use thiserror::Error; use tokio::sync::mpsc::Receiver; use crate::{ config::{ HttpTunnelBuilder, LabeledTunnelBuilder, TcpTunnelBuilder, TlsTunnelBuilder, }, conn::{ ConnInner, EdgeConn, EndpointConn, }, internals::raw_session::RpcError, session::ConnectError, Session, }; /// Errors arising when accepting a [Conn] from an ngrok tunnel. #[derive(Error, Debug, Clone)] #[non_exhaustive] pub enum AcceptError { /// An error occurred in the underlying transport protocol. #[error("transport error")] Transport(#[from] MuxadoError), /// An error arose during reconnect #[error("reconnect error")] Reconnect(#[from] Arc), /// The listener was closed. #[error("listener closed: {message}{}", error_code.clone().map(|s| format!(", {s}")).unwrap_or_else(String::new))] ListenerClosed { /// The error message. message: String, /// The error code, if any. error_code: Option, }, } #[derive(Clone)] pub(crate) struct TunnelInnerInfo { pub(crate) id: String, pub(crate) proto: String, pub(crate) url: String, pub(crate) labels: HashMap, pub(crate) forwards_to: String, pub(crate) metadata: String, } pub(crate) struct TunnelInner { pub(crate) info: TunnelInnerInfo, pub(crate) incoming: Option>>, // Note: this session field is also used to detect tunnel liveness for the // purposes of shutting down the accept loop. If it's ever removed, an // awaitdrop::Ref field needs to be added that's derived from the one // belonging to the session. pub(crate) session: Session, } impl Drop for TunnelInner { fn drop(&mut self) { let id = self.id().to_string(); let sess = self.session.clone(); let rt = sess.runtime(); rt.spawn(async move { sess.close_tunnel(&id).await }); } } // This codgen indirect is required to make the hyper "Accept" trait bound // dependent on the hyper feature. You can't put a #[cfg] on a single bound, so // we're putting the whole trait def in a macro. Gross, but gets the job done. macro_rules! tunnel_trait { ($($hyper_bound:tt)*) => { /// An ngrok tunnel. /// /// ngrok [Tunnel]s act like TCP listeners and can be used as a /// [futures::stream::TryStream] of [Conn]ections from endpoints created on the ngrok /// service. pub trait Tunnel: Stream::Conn, AcceptError>> + TunnelInfo + TunnelCloser $($hyper_bound)* + Unpin + Send + 'static { /// The type of connection associated with this tunnel type. /// Agent-initiated http, tls, and tcp tunnels all produce /// `EndpointConn`s, while labeled tunnels produce `EdgeConn`s. type Conn: crate::Conn; } /// Information associated with an ngrok tunnel. pub trait TunnelInfo { /// Returns a tunnel's unique ID. fn id(&self) -> &str; /// Returns a human-readable string presented in the ngrok dashboard /// and the Tunnels API. Use the [HttpTunnelBuilder::forwards_to], /// [TcpTunnelBuilder::forwards_to], etc. to set this value /// explicitly. fn forwards_to(&self) -> &str; /// Returns the arbitrary metadata string for this tunnel. fn metadata(&self) -> &str; } /// An ngrok tunnel closer. #[async_trait] pub trait TunnelCloser { /// Close the tunnel. /// /// This is an RPC call that must be `.await`ed. /// It is equivalent to calling `Session::close_tunnel` with this /// tunnel's ID. /// /// If the tunnel is dropped, a task will be spawned to close it /// asynchronously. async fn close(&mut self) -> Result<(), RpcError>; } } } tunnel_trait!(); /// An ngrok tunnel backing a simple endpoint. /// Most agent-configured tunnels fall into this category, with the exception of /// labeled tunnels. pub trait EndpointInfo { /// Returns the tunnel endpoint's URL. fn url(&self) -> &str; /// Returns the protocol of the tunnel's endpoint. fn proto(&self) -> &str; } /// An ngrok tunnel backing an edge. /// Since labels may be dynamically defined via the dashboard or API, the url /// and protocol for the tunnel is not knowable ahead of time. pub trait EdgeInfo { /// Returns the labels that the tunnel was started with. fn labels(&self) -> &HashMap; } impl Stream for TunnelInner { type Item = Result; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.incoming .as_mut() .expect("tunnel inner lacks a receiver") .poll_recv(cx) } } impl TunnelInner { /// Get this tunnel's ID as returned by the ngrok server. pub fn id(&self) -> &str { &self.info.id } /// Get the URL for this tunnel. /// Labeled tunnels will return an empty string. pub fn url(&self) -> &str { &self.info.url } /// Close the tunnel. /// This is an RPC call and needs to be `.await`ed. pub async fn close(&mut self) -> Result<(), RpcError> { self.session.close_tunnel(self.id()).await?; if let Some(r) = self.incoming.as_mut() { r.close() } Ok(()) } /// Get the protocol that this tunnel uses. pub fn proto(&self) -> &str { &self.info.proto } /// Get the labels this tunnel was started with. /// The returned [`HashMap`] will be empty for non-labeled tunnels. pub fn labels(&self) -> &HashMap { &self.info.labels } /// Get the address that this tunnel says it forwards to. pub fn forwards_to(&self) -> &str { &self.info.forwards_to } /// Get the user-supplied metadata for this tunnel. pub fn metadata(&self) -> &str { &self.info.metadata } /// Split the tunnel into two parts - the first contains the listener and /// all tunnel information, and the second contains *only* the information. pub(crate) fn make_info(&self) -> TunnelInner { TunnelInner { info: self.info.clone(), incoming: None, session: self.session.clone(), } } } macro_rules! make_tunnel_type { ($(#[$outer:meta])* $wrapper:ident, $builder:tt, $conn:tt, $($m:tt),*) => { $(#[$outer])* pub struct $wrapper { pub(crate) inner: TunnelInner, } impl $wrapper { /// Split this tunnel type into two parts - both of which have all /// tunnel information, but only the former can be used as a /// listener. Attempts to accept connections on the later will fail. pub(crate) fn make_info(&self) -> $wrapper { $wrapper { inner: self.inner.make_info(), } } } impl Tunnel for $wrapper { type Conn = $conn; } impl TunnelInfo for $wrapper { fn id(&self) -> &str { self.inner.id() } fn forwards_to(&self) -> &str { self.inner.forwards_to() } fn metadata(&self) -> &str { self.inner.metadata() } } #[async_trait] impl TunnelCloser for $wrapper { async fn close(&mut self) -> Result<(), RpcError> { self.inner.close().await } } impl $wrapper { /// Create a builder for this tunnel type. pub fn builder(session: Session) -> $builder { $builder::from(session) } } $( make_tunnel_type!($m; $wrapper); )* impl Stream for $wrapper { type Item = Result<$conn, AcceptError>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { Pin::new(&mut self.inner).poll_next(cx).map(|o| o.map(|r| r.map(|c| $conn { inner: c }))) } } }; (endpoint; $wrapper:ty) => { impl EndpointInfo for $wrapper { fn url(&self) -> &str { self.inner.url() } fn proto(&self) -> &str { self.inner.proto() } } }; (edge; $wrapper:ty) => { impl EdgeInfo for $wrapper { fn labels(&self) -> &HashMap { self.inner.labels() } } }; } make_tunnel_type! { /// An ngrok tunnel for an HTTP endpoint. HttpTunnel, HttpTunnelBuilder, EndpointConn, endpoint } make_tunnel_type! { /// An ngrok tunnel for a TCP endpoint. TcpTunnel, TcpTunnelBuilder, EndpointConn, endpoint } make_tunnel_type! { /// An ngrok tunnel for a TLS endpoint. TlsTunnel, TlsTunnelBuilder, EndpointConn, endpoint } make_tunnel_type! { /// A labeled ngrok tunnel. LabeledTunnel, LabeledTunnelBuilder, EdgeConn, edge } ================================================ FILE: ngrok/src/tunnel_ext.rs ================================================ use std::{ collections::HashMap, io, sync::Arc, }; #[cfg(feature = "hyper")] use std::{ convert::Infallible, fmt, }; use async_trait::async_trait; use bitflags::bitflags; use futures::stream::TryStreamExt; use futures_rustls::rustls::{ self, pki_types, ClientConfig, }; #[cfg(feature = "hyper")] use hyper::{ server::conn::http1, service::service_fn, Response, StatusCode, }; use once_cell::sync::Lazy; use proxy_protocol::ProxyHeader; use tokio::{ io::copy_bidirectional, net::TcpStream, task::JoinHandle, }; use tokio_util::compat::{ FuturesAsyncReadCompatExt, TokioAsyncReadCompatExt, }; #[cfg(feature = "hyper")] use tracing::debug; use tracing::{ field, warn, Instrument, Span, }; use url::Url; use crate::{ prelude::*, proxy_proto, session::IoStream, EdgeConn, EndpointConn, }; #[allow(deprecated)] #[async_trait] impl TunnelExt for T where T: Tunnel + Send, ::Conn: ConnExt, { async fn forward(&mut self, url: Url) -> Result<(), io::Error> { forward_tunnel(self, url).await } } /// Extension methods auto-implemented for all tunnel types #[async_trait] #[deprecated = "superceded by the `listen_and_forward` builder method"] pub trait TunnelExt: Tunnel + Send { /// Forward incoming tunnel connections to the provided url based on its /// scheme. /// This currently supports http, https, tls, and tcp on all platforms, unix /// sockets on unix platforms, and named pipes on Windows via the "pipe" /// scheme. /// /// Unix socket URLs can be formatted as `unix://path/to/socket` or /// `unix:path/to/socket` for relative paths or as `unix:///path/to/socket` or /// `unix:/path/to/socket` for absolute paths. /// /// Windows named pipe URLs can be formatted as `pipe:mypipename` or /// `pipe://host/mypipename`. If no host is provided, as with /// `pipe:///mypipename` or `pipe:/mypipename`, the leading slash will be /// preserved. async fn forward(&mut self, url: Url) -> Result<(), io::Error>; } pub(crate) trait ConnExt { fn forward_to(self, url: &Url) -> JoinHandle>; } #[tracing::instrument(skip_all, fields(tunnel_id = tun.id(), url = %url))] pub(crate) async fn forward_tunnel(tun: &mut T, url: Url) -> Result<(), io::Error> where T: Tunnel + 'static + ?Sized, ::Conn: ConnExt, { loop { let tunnel_conn = if let Some(conn) = tun .try_next() .await .map_err(|err| io::Error::new(io::ErrorKind::NotConnected, err))? { conn } else { return Ok(()); }; tunnel_conn.forward_to(&url); } } impl ConnExt for EdgeConn { fn forward_to(mut self, url: &Url) -> JoinHandle> { let url = url.clone(); tokio::spawn(async move { let mut upstream = match connect( self.edge_type() == EdgeType::Tls && self.passthrough_tls(), self.inner.info.verify_upstream_tls, self.inner.info.app_protocol.clone(), None, // Edges don't support proxyproto (afaik) &url, ) .await { Ok(conn) => conn, Err(error) => { #[cfg(feature = "hyper")] if self.edge_type() == EdgeType::Https { serve_gateway_error(format!("{error}"), self); } warn!(%error, "error connecting to upstream"); return Err(error); } }; copy_bidirectional(&mut self, &mut upstream).await?; Ok(()) }) } } impl ConnExt for EndpointConn { fn forward_to(self, url: &Url) -> JoinHandle> { let url = url.clone(); tokio::spawn(async move { let proxy_proto = self.inner.info.proxy_proto; let proto_tls = self.proto() == "tls"; #[cfg(feature = "hyper")] let proto_http = matches!(self.proto(), "http" | "https"); let passthrough_tls = self.inner.info.passthrough_tls(); let app_protocol = self.inner.info.app_protocol.clone(); let verify_upstream_tls = self.inner.info.verify_upstream_tls; let (mut stream, proxy_header) = match proxy_proto { ProxyProto::None => (crate::proxy_proto::Stream::disabled(self), None), _ => { let mut stream = crate::proxy_proto::Stream::incoming(self); let header = stream .proxy_header() .await? .map_err(|e| { io::Error::new( io::ErrorKind::InvalidData, format!("invalid proxy-protocol header: {}", e), ) })? .cloned(); (stream, header) } }; let mut upstream = match connect( proto_tls && passthrough_tls, verify_upstream_tls, app_protocol, proxy_header, &url, ) .await { Ok(conn) => conn, Err(error) => { #[cfg(feature = "hyper")] if proto_http { serve_gateway_error(format!("{error}"), stream); } warn!(%error, "error connecting to upstream"); return Err(error); } }; copy_bidirectional(&mut stream, &mut upstream).await?; Ok(()) }) } } bitflags! { struct TlsFlags: u8 { const FLAG_HTTP2 = 0b01; const FLAG_verify_upstream_tls = 0b10; const FLAG_MAX = Self::FLAG_HTTP2.bits() | Self::FLAG_verify_upstream_tls.bits(); } } static NO_CRYPTO_PROVIDER_ERROR: Lazy = Lazy::new(|| { io::Error::new( io::ErrorKind::NotFound, "no default CryptoProvider installed", ) }); fn tls_config( app_protocol: Option, verify_upstream_tls: bool, ) -> Result, &'static io::Error> { // A hashmap of tls client configs for different configurations. // There won't need to be a lot of variation among these, and we'll want to // reuse them as much as we can, which is why we initialize them all once // and then pull out the one we need. // Disabling the lint because this is a local static that doesn't escape the // enclosing context. It fine. #[allow(clippy::type_complexity)] static CONFIGS: Lazy>, &'static io::Error>> = Lazy::new(|| { std::ops::Range { start: 0, end: TlsFlags::FLAG_MAX.bits() + 1, } .map(|p| { let http2 = (p & TlsFlags::FLAG_HTTP2.bits()) != 0; let verify_upstream_tls = (p & TlsFlags::FLAG_verify_upstream_tls.bits()) != 0; let mut config = crate::session::host_certs_tls_config()?; if !verify_upstream_tls { let provider = rustls::crypto::CryptoProvider::get_default() .ok_or(&*NO_CRYPTO_PROVIDER_ERROR)? .as_ref() .clone(); config.dangerous().set_certificate_verifier(Arc::new( danger::NoCertificateVerification::new(provider), )); } if http2 { config .alpn_protocols .extend(["h2", "http/1.1"].iter().map(|s| s.as_bytes().to_vec())); } Ok((p, Arc::new(config))) }) .collect() }); let configs: &HashMap> = CONFIGS.as_ref().map_err(|e| *e)?; let mut key = 0; if Some("http2").eq(&app_protocol.as_deref()) { key |= TlsFlags::FLAG_HTTP2.bits(); } if verify_upstream_tls { key |= TlsFlags::FLAG_verify_upstream_tls.bits(); } Ok(configs .get(&key) .or_else(|| configs.get(&0)) .unwrap() .clone()) } // Establish the connection to forward the tunnel stream to. // Takes the tunnel and connection to make additional decisions on how to wrap // the forwarded connection, i.e. reordering tls termination and proxyproto. // Note: this additional wrapping logic currently unimplemented. async fn connect( tunnel_tls: bool, verify_upstream_tls: bool, app_protocol: Option, proxy_proto_header: Option, url: &Url, ) -> Result, io::Error> { let host = url.host_str().unwrap_or("localhost"); let mut backend_tls: bool = false; let mut conn: Box = match url.scheme() { "tcp" => { let port = url.port().ok_or_else(|| { io::Error::new( io::ErrorKind::InvalidInput, format!("missing port for tcp forwarding url {url}"), ) })?; let conn = connect_tcp(host, port).in_current_span().await?; Box::new(conn) } "http" => { let port = url.port().unwrap_or(80); let conn = connect_tcp(host, port).in_current_span().await?; Box::new(conn) } "https" | "tls" => { let port = url.port().unwrap_or(443); let conn = connect_tcp(host, port).in_current_span().await?; backend_tls = true; Box::new(conn) } #[cfg(not(target_os = "windows"))] "unix" => { use std::borrow::Cow; use tokio::net::UnixStream; let mut addr = Cow::Borrowed(url.path()); if let Some(host) = url.host_str() { // note: if host exists, there should always be a leading / in // the path, but we should consider it a relative path. addr = Cow::Owned(format!("{host}{addr}")); } Box::new(UnixStream::connect(&*addr).await?) } #[cfg(target_os = "windows")] "pipe" => { use std::time::Duration; use tokio::net::windows::named_pipe::ClientOptions; use windows_sys::Win32::Foundation::ERROR_PIPE_BUSY; let mut pipe_name = url.path(); if url.host_str().is_some() { pipe_name = pipe_name.strip_prefix('/').unwrap_or(pipe_name); } if pipe_name.is_empty() { return Err(io::Error::new( io::ErrorKind::InvalidInput, format!("missing pipe name in forwarding url {url}"), )); } let host = url .host_str() // Consider localhost to mean "." for the pipe name .map(|h| if h == "localhost" { "." } else { h }) .unwrap_or("."); // Finally, assemble the full name. let addr = format!("\\\\{host}\\pipe\\{pipe_name}"); // loop behavior copied from docs // https://docs.rs/tokio/latest/tokio/net/windows/named_pipe/struct.NamedPipeClient.html let local_conn = loop { match ClientOptions::new().open(&addr) { Ok(client) => break client, Err(error) if error.raw_os_error() == Some(ERROR_PIPE_BUSY as i32) => (), Err(error) => return Err(error), } tokio::time::sleep(Duration::from_millis(50)).await; }; Box::new(local_conn) } _ => { return Err(io::Error::new( io::ErrorKind::InvalidInput, format!("unrecognized scheme in forwarding url: {url}"), )) } }; // We have to write the proxy header _before_ tls termination if let Some(header) = proxy_proto_header { conn = Box::new( proxy_proto::Stream::outgoing(conn, header) .expect("re-serializing proxy header should always succeed"), ) }; if backend_tls && !tunnel_tls { let domain = pki_types::ServerName::try_from(host) .map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))? .to_owned(); conn = Box::new( futures_rustls::TlsConnector::from( tls_config(app_protocol, verify_upstream_tls).map_err(|e| e.kind())?, ) .connect(domain, conn.compat()) .await? .compat(), ) } // TODO: header rewrites? Ok(conn) } async fn connect_tcp(host: &str, port: u16) -> Result { let conn = TcpStream::connect(&format!("{}:{}", host, port)).await?; if let Ok(addr) = conn.peer_addr() { Span::current().record("forward_addr", field::display(addr)); } Ok(conn) } #[cfg(feature = "hyper")] fn serve_gateway_error( err: impl fmt::Display + Send + 'static, conn: impl hyper::rt::Read + hyper::rt::Write + Unpin + Send + 'static, ) -> JoinHandle<()> { tokio::spawn( async move { let service = service_fn(move |_req| { debug!("serving bad gateway error"); let mut resp = Response::new(format!("failed to dial backend: {err}")); *resp.status_mut() = StatusCode::BAD_GATEWAY; futures::future::ok::<_, Infallible>(resp) }); let res = http1::Builder::new() .keep_alive(false) .serve_connection(conn, service) .await; debug!(?res, "connection closed"); } .in_current_span(), ) } // https://github.com/rustls/rustls/blob/main/examples/src/bin/tlsclient-mio.rs#L334 mod danger { use futures_rustls::rustls; use rustls::{ client::danger::HandshakeSignatureValid, crypto::{ verify_tls12_signature, verify_tls13_signature, CryptoProvider, }, DigitallySignedStruct, }; use super::pki_types::{ CertificateDer, ServerName, UnixTime, }; #[derive(Debug)] pub struct NoCertificateVerification(CryptoProvider); impl NoCertificateVerification { pub fn new(provider: CryptoProvider) -> Self { Self(provider) } } impl rustls::client::danger::ServerCertVerifier for NoCertificateVerification { fn verify_server_cert( &self, _end_entity: &CertificateDer<'_>, _intermediates: &[CertificateDer<'_>], _server_name: &ServerName<'_>, _ocsp: &[u8], _now: UnixTime, ) -> Result { Ok(rustls::client::danger::ServerCertVerified::assertion()) } fn verify_tls12_signature( &self, message: &[u8], cert: &CertificateDer<'_>, dss: &DigitallySignedStruct, ) -> Result { verify_tls12_signature( message, cert, dss, &self.0.signature_verification_algorithms, ) } fn verify_tls13_signature( &self, message: &[u8], cert: &CertificateDer<'_>, dss: &DigitallySignedStruct, ) -> Result { verify_tls13_signature( message, cert, dss, &self.0.signature_verification_algorithms, ) } fn supported_verify_schemes(&self) -> Vec { self.0.signature_verification_algorithms.supported_schemes() } } } ================================================ FILE: rustfmt.toml ================================================ imports_layout = "Vertical" imports_granularity = "Crate" group_imports = "StdExternalCrate" edition = "2021"