[
  {
    "path": ".envrc",
    "content": "use flake\n"
  },
  {
    "path": ".github/workflows/ci.yml",
    "content": "on:\n  push:\n    branches: [main]\n  pull_request:\n  workflow_call:\n    secrets:\n      NGROK_AUTHTOKEN:\n        required: true\n\nname: Continuous integration\n\njobs:\n  udeps:\n    name: Udeps\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v4\n      - uses: jrobsonchase/direnv-action@v0.7\n      - uses: ./.github/workflows/rust-cache\n      - uses: actions-rs/cargo@v1\n        with:\n          command: udeps\n          args: --workspace --all-targets --all-features\n\n  fmt:\n    name: Rustfmt\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v4\n      - uses: jrobsonchase/direnv-action@v0.7\n      - uses: actions-rs/cargo@v1\n        with:\n          command: fmt\n          args: --all -- --check\n\n  clippy:\n    name: Clippy\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v4\n      - uses: jrobsonchase/direnv-action@v0.7\n      - uses: ./.github/workflows/rust-cache\n      - uses: actions-rs/cargo@v1\n        with:\n          command: clippy\n          args: --all-targets --all-features --workspace -- -D warnings\n\n  test-nix:\n    name: Test Nix\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v4\n      - uses: jrobsonchase/direnv-action@v0.7\n      - uses: ./.github/workflows/rust-cache\n      - uses: actions-rs/cargo@v1\n        env:\n          NGROK_AUTHTOKEN: ${{ secrets.NGROK_AUTHTOKEN }}\n        with:\n          command: test\n          args: --workspace --all-targets\n\n  test-stable:\n    name: Test Stable\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v4\n      - uses: actions-rs/toolchain@v1\n        with:\n          profile: minimal\n          toolchain: stable\n          override: true\n      # We don't actually have sccache installed here (yet), but it still\n      # benefits from the cargo cache.\n      - uses: ./.github/workflows/rust-cache\n      - uses: actions-rs/cargo@v1\n        env:\n          NGROK_AUTHTOKEN: ${{ secrets.NGROK_AUTHTOKEN }}\n        with:\n          command: test\n          args: --features=paid-tests,long-tests --workspace --all-targets\n\n  test-win:\n    name: Test Windows Stable\n    runs-on: windows-latest\n    steps:\n      - uses: actions/checkout@v4\n      - uses: actions-rs/toolchain@v1\n        with:\n          profile: minimal\n          toolchain: stable\n          override: true\n      # We don't actually have sccache installed here (yet), but it still\n      # benefits from the cargo cache.\n      - uses: ./.github/workflows/rust-cache\n      - uses: actions-rs/cargo@v1\n        env:\n          NGROK_AUTHTOKEN: ${{ secrets.NGROK_AUTHTOKEN }}\n        with:\n          command: test\n          args: --workspace --all-targets\n\n  semver:\n    name: Semver Check\n    runs-on: ubuntu-latest\n    strategy:\n      matrix:\n        crate: [muxado, ngrok]\n    steps:\n      - uses: actions/checkout@v4\n      - uses: jrobsonchase/direnv-action@v0.7\n      - uses: ./.github/workflows/rust-cache\n      - uses: actions-rs/cargo@v1\n        name: semver checks\n        with:\n          command: semver-checks\n          args: check-release -p ${{ matrix.crate }}\n\n\n"
  },
  {
    "path": ".github/workflows/docs.yml",
    "content": "on:\n  push:\n    branches: [main]\n\nname: Publish Docs\n\njobs:\n  build:\n    name: Build Rustdocs\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v4\n      - uses: actions-rs/toolchain@v1\n        with:\n          profile: minimal\n          toolchain: stable\n          override: true\n\n      - name: update apt\n        run: sudo apt-get update\n      - name: install protoc\n        run: sudo apt-get -o Acquire::Retries=3 install -y protobuf-compiler\n      - uses: actions-rs/cargo@v1\n        with:\n          command: doc\n          args: --no-deps\n\n      - name: Archive docs\n        shell: sh\n        run: |\n          echo \"<meta http-equiv=\\\"refresh\\\" content=\\\"0; url=ngrok\\\">\" > target/doc/index.html\n          chmod -c -R +r target/doc | while read line; do\n            echo \"::warning title=Changed permissions on a file::$line\"\n          done\n\n      - name: Upload static files as artifact\n        uses: actions/upload-pages-artifact@v3\n        with:\n          path: target/doc\n\n    # Deployment job\n  deploy:\n    # Grant GITHUB_TOKEN the permissions required to make a Pages deployment\n    permissions:\n      pages: write      # to deploy to Pages\n      id-token: write   # to verify the deployment originates from an appropriate source\n\n    environment:\n      name: github-pages\n      url: ${{ steps.deployment.outputs.page_url }}\n\n    runs-on: ubuntu-latest\n    needs: build\n    steps:\n      - name: Deploy to GitHub Pages\n        id: deployment\n        uses: actions/deploy-pages@v4\n"
  },
  {
    "path": ".github/workflows/publish.yml",
    "content": "on:\n  workflow_dispatch:\nname: Publish All\n\njobs:\n  ci:\n    name: Run CI\n    uses: ./.github/workflows/ci.yml\n    secrets:\n      NGROK_AUTHTOKEN: ${{ secrets.NGROK_AUTHTOKEN }}\n\n  # Publishing jobs - these run sequentially as before\n  publish-muxado:\n    name: Publish muxado\n    uses: ./.github/workflows/release.yml\n    needs: [ci]\n    permissions:\n      contents: write\n    with:\n      crate: muxado\n    secrets:\n      CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}\n\n  publish-ngrok:\n    name: Publish ngrok\n    uses: ./.github/workflows/release.yml\n    needs: [publish-muxado]\n    if: needs.publish-muxado.result == 'success' || needs.publish-muxado.result == 'skipped'\n    permissions:\n      contents: write\n    with:\n      crate: ngrok\n    secrets:\n      CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}\n\n  publish-cargo-doc-ngrok:\n    name: Publish cargo-doc-ngrok\n    uses: ./.github/workflows/release.yml\n    needs: [publish-ngrok]\n    if: needs.publish-ngrok.result == 'success' || needs.publish-ngrok.result == 'skipped'\n    permissions:\n      contents: write\n    with:\n      crate: cargo-doc-ngrok\n    secrets:\n      CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}\n"
  },
  {
    "path": ".github/workflows/release.yml",
    "content": "on:\n  workflow_dispatch:\n    inputs:\n      crate:\n        description: 'Crate to release'\n        required: true\n        default: 'ngrok'\n  workflow_call:\n    inputs:\n      crate:\n        description: 'Crate to release'\n        required: true\n        type: string\n    secrets:\n      CARGO_REGISTRY_TOKEN:\n        required: true\n\nname: Release\n\njobs:\n  cargo-publish:\n    name: Publish and Tag\n    runs-on: ubuntu-latest\n    permissions:\n      contents: write\n    continue-on-error: true\n    steps:\n      - uses: actions/checkout@v4\n      - uses: jrobsonchase/direnv-action@v0.7\n      - name: cargo publish\n        run: |\n          version=\"$(extract-crate-version ${{inputs.crate}})\"\n          crate=\"${{inputs.crate}}\"\n          tag=\"${crate}-v${version}\"\n          echo \"Checking if crate $crate version $version exists on crates.io\"\n          result=$(cargo search $crate --limit 1 | grep \"$version\" || true)\n          if [ -n \"$result\" ]; then\n            echo \"Crate $crate version $version already exists on crates.io, skipping publish.\"\n            exit 0\n          fi\n          echo \"Crate version $version not found on crates.io, proceeding with publish.\"\n          cargo publish -p $crate --token ${{ secrets.CARGO_REGISTRY_TOKEN }}\n      - name: tag release\n        run: |\n          version=\"$(extract-crate-version ${{inputs.crate}})\"\n          git config user.name \"GitHub Action\"\n          git config user.email noreply@ngrok.com\n          tag=\"${{inputs.crate}}-v${version}\"\n          echo \"Version ${version}, tag ${tag}\"\n          echo \"Fetching all tags in the repository\"\n          git fetch --tags\n          if git rev-parse \"refs/tags/$tag\" >/dev/null 2>&1; then\n            echo \"Tag $tag already exists, skipping tag creation.\"\n          else\n            echo \"Tag $tag does not exist, pushing tag.\"\n            git tag -a -m \"Version ${version}\" $tag\n            git push --tags\n          fi"
  },
  {
    "path": ".github/workflows/rust-cache/action.yml",
    "content": "name: 'rust cache setup'\ndescription: 'Set up cargo and sccache caches'\ninputs: {}\noutputs: {}\nruns:\n  using: \"composite\"\n  steps:\n    - name: configure sccache\n      uses: actions/github-script@v6\n      with:\n        script: |\n          core.exportVariable('ACTIONS_CACHE_URL', process.env.ACTIONS_CACHE_URL || '');\n          core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || '');\n          core.exportVariable('SCCACHE_GHA_CACHE_TO', 'sccache-${{runner.os}}-${{github.ref_name}}');\n          core.exportVariable('SCCACHE_GHA_CACHE_FROM', 'sccache-${{runner.os}}-main,sccache-${{runner.os}}-');\n    - name: cargo registry cache\n      uses: actions/cache@v3\n      with:\n        key: cargo-${{ runner.os }}-${{ hashFiles('**/Cargo.toml') }}-${{ github.sha }}\n        restore-keys: |\n          cargo-${{ runner.os }}-${{ hashFiles('**/Cargo.toml') }}-\n          cargo-${{ runner.os }}-\n        path: |\n          ~/.cargo/registry\n          ~/.cargo/git"
  },
  {
    "path": ".gitignore",
    "content": ".env\n/target\n.direnv\n/.vscode\n*.swp\n"
  },
  {
    "path": "CODE_OF_CONDUCT.md",
    "content": "# ngrok Code of Conduct\n\n## Our Pledge\n\nIn the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to make participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation.\n\n## Our Standards\n\nExamples of behavior that contributes to creating a positive environment include:\n\n- Using welcoming and inclusive language\n- Being respectful of differing viewpoints and experiences\n- Gracefully accepting constructive criticism\n- Focusing on what is best for the community\n- Showing empathy towards other community members\n\nExamples of unacceptable behavior by participants include:\n\n- The use of sexualized language or imagery and unwelcome sexual attention or advances\n- Trolling, insulting/derogatory comments, and personal or political attacks\n- Public or private harassment\n- Publishing others' private information, such as a physical or electronic address, without explicit permission\n- Other conduct which could reasonably be considered inappropriate in a professional setting\n\n## Our Responsibilities\n\nThe ngrok documentation team is responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.\n\nThe ngrok documentation team has the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.\n\n## Scope\n\nThis Code of Conduct applies both within project spaces and in public spaces when an individual is representing the ngrok docs project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.\n\n## Enforcement\n\nInstances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at [support@ngrok.com](mailto:support@ngrok.com). All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.\n\nProject maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.\n\n## Attribution\n\nThis Code of Conduct is adapted from the Contributor Covenant, version 2.1, available at [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].\n\nFor answers to common questions about this code of conduct, see https://www.contributor-covenant.org/faq\n"
  },
  {
    "path": "CONTRIBUTING.md",
    "content": "# Contributing to ngrok-rust\n\nThank you for deciding to contribute to ngrok-rust!\n\n## Reporting a bug\n\nTo report a bug, please [open a new issue](https://github.com/ngrok/ngrok-rust/issues/new) with clear reproduction steps. We will triage and investigate these issues at a regular interval.\n\n## Contributing code\n\nBugfixes and small improvements are always appreciated!\n\nFor any larger changes or features, please [open a new issue](https://github.com/ngrok/ngrok-rust/issues/new) first to discuss whether the change makes sense. When in doubt, it's always okay to open an issue first.\n"
  },
  {
    "path": "Cargo.toml",
    "content": "[workspace]\nresolver = \"2\"\nmembers = [\n\t\"muxado\",\n\t\"ngrok\",\n\t\"cargo-doc-ngrok\",\n]\n\n[profile.release]\ndebug = 1\n"
  },
  {
    "path": "LICENSE-APACHE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "LICENSE-MIT",
    "content": "Copyright 2022 ngrok, Inc.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE."
  },
  {
    "path": "cargo-doc-ngrok/Cargo.toml",
    "content": "[package]\nname = \"cargo-doc-ngrok\"\nversion = \"0.2.2\"\nedition = \"2021\"\nlicense = \"MIT OR Apache-2.0\"\ndescription = \"A cargo subcommand to build and serve documentation via ngrok\"\nrepository = \"https://github.com/ngrok/ngrok-rust\"\n\n[dependencies]\nawaitdrop = \"0.1.2\"\naxum = \"0.7.4\"\nbstr = \"1.4.0\"\ncargo_metadata = \"0.15.2\"\nclap = { version = \"4.0.29\", features = [\"derive\"] }\nfutures = \"0.3.25\"\nhttp = \"1.0.0\"\nhyper = { version = \"1.1.0\", features = [\"server\"] }\nhyper-staticfile = \"0.10.0\"\nhyper-util = { version = \"0.1.3\", features = [\"server\", \"tokio\", \"server-auto\", \"http1\"] }\nngrok = { path = \"../ngrok\", version = \"0.18\", features = [\"hyper\", \"axum\"] }\ntokio = { version = \"1.23.0\", features = [\"full\"] }\nwatchexec = \"2.3.0\"\n# watchexec-signals 1.0.1 causes a compilation error.\n# this will likely be ironed out as they release watchexec 3.0.0 components.\n# https://github.com/watchexec/watchexec/issues/701\nwatchexec-signals = \"=1.0.0\"\n"
  },
  {
    "path": "cargo-doc-ngrok/src/main.rs",
    "content": "use std::{\n    io,\n    path::PathBuf,\n    process::Stdio,\n    sync::Arc,\n};\n\nuse axum::BoxError;\nuse clap::{\n    Args,\n    Parser,\n    Subcommand,\n};\nuse futures::TryStreamExt;\nuse hyper::service::service_fn;\nuse hyper_util::{\n    rt::TokioExecutor,\n    server,\n};\nuse ngrok::prelude::*;\nuse watchexec::{\n    action::{\n        Action,\n        Outcome,\n    },\n    command::Command,\n    config::{\n        InitConfig,\n        RuntimeConfig,\n    },\n    error::CriticalError,\n    handler::PrintDebug,\n    signal::source::MainSignal,\n    Watchexec,\n};\n\n#[derive(Parser, Debug)]\nstruct Cargo {\n    #[command(subcommand)]\n    cmd: Cmd,\n}\n\n#[derive(Debug, Subcommand)]\nenum Cmd {\n    DocNgrok(DocNgrok),\n}\n\n#[derive(Debug, Args)]\nstruct DocNgrok {\n    #[arg(short)]\n    package: Option<String>,\n\n    #[arg(long, short)]\n    domain: Option<String>,\n\n    #[arg(long, short)]\n    watch: bool,\n\n    #[arg(last = true)]\n    doc_args: Vec<String>,\n}\n\n#[tokio::main]\nasync fn main() -> Result<(), BoxError> {\n    let Cmd::DocNgrok(args) = Cargo::parse().cmd;\n\n    std::process::Command::new(\"cargo\")\n        .arg(\"doc\")\n        .args(args.doc_args.iter())\n        .stderr(Stdio::inherit())\n        .stdout(Stdio::inherit())\n        .spawn()?\n        .wait()?;\n\n    let meta = cargo_metadata::MetadataCommand::new().exec()?;\n\n    let default_package = args\n        .package\n        .or(meta.root_package().map(|p| p.name.clone()))\n        .ok_or(\"No default package found. You must provide one with -p\")?;\n    let root_dir = meta.workspace_root;\n    let target_dir = meta.target_directory;\n    let doc_dir = target_dir.join(\"doc\");\n\n    let sess = ngrok::Session::builder()\n        .authtoken_from_env()\n        .connect()\n        .await?;\n\n    let mut listen_cfg = sess.http_endpoint();\n    if let Some(domain) = args.domain {\n        listen_cfg.domain(domain);\n    }\n\n    let mut listener = listen_cfg.listen().await?;\n\n    let service = service_fn(move |req| {\n        let stat = hyper_staticfile::Static::new(&doc_dir);\n        stat.serve(req)\n    });\n\n    println!(\n        \"serving docs on: {}/{}/\",\n        listener.url(),\n        default_package.replace('-', \"_\")\n    );\n\n    let server = async move {\n        let (dropref, waiter) = awaitdrop::awaitdrop();\n\n        // Continuously accept new connections.\n        while let Some(conn) = listener.try_next().await? {\n            let service = service.clone();\n            let dropref = dropref.clone();\n            // Spawn a task to handle the connection. That way we can multiple connections\n            // concurrently.\n            tokio::spawn(async move {\n                if let Err(err) = server::conn::auto::Builder::new(TokioExecutor::new())\n                    .serve_connection(conn, service)\n                    .await\n                {\n                    eprintln!(\"failed to serve connection: {err:#}\");\n                }\n                drop(dropref);\n            });\n        }\n\n        // Wait until all children have finished, not just the listener.\n        drop(dropref);\n        waiter.await;\n\n        Ok::<(), BoxError>(())\n    };\n\n    if args.watch {\n        let we = make_watcher(args.doc_args, root_dir, target_dir)?;\n\n        we.main().await??;\n    } else {\n        server.await?;\n    }\n\n    Ok(())\n}\n\nfn make_watcher(\n    args: Vec<String>,\n    root_dir: impl Into<PathBuf>,\n    target_dir: impl Into<PathBuf>,\n) -> Result<Arc<Watchexec>, Box<CriticalError>> {\n    let target_dir = target_dir.into();\n    let root_dir = root_dir.into();\n    let mut init = InitConfig::default();\n    init.on_error(PrintDebug(std::io::stderr()));\n\n    let mut runtime = RuntimeConfig::default();\n    runtime.pathset([root_dir]);\n    runtime.command(Command::Exec {\n        prog: \"cargo\".into(),\n        args: [String::from(\"doc\")].into_iter().chain(args).collect(),\n    });\n    runtime.on_action({\n        move |action: Action| {\n            let target_dir = target_dir.clone();\n            async move {\n                let sigs = action\n                    .events\n                    .iter()\n                    .flat_map(|event| event.signals())\n                    .collect::<Vec<_>>();\n                if sigs.iter().any(|sig| sig == &MainSignal::Interrupt) {\n                    action.outcome(Outcome::Exit);\n                } else if action\n                    .events\n                    .iter()\n                    .any(|e| e.paths().any(|(p, _)| !p.starts_with(&target_dir)))\n                {\n                    action.outcome(Outcome::if_running(\n                        Outcome::both(Outcome::Stop, Outcome::Start),\n                        Outcome::Start,\n                    ));\n                }\n\n                Result::<_, io::Error>::Ok(())\n            }\n        }\n    });\n    Watchexec::new(init, runtime).map_err(Box::new)\n}\n"
  },
  {
    "path": "flake.nix",
    "content": "{\n  description = \"ngrok agent library in Rust\";\n\n  inputs = {\n    nixpkgs.url = \"github:nixos/nixpkgs/nixpkgs-unstable\";\n\n    # Note: fenix packages are cached via cachix:\n    #       cachix use nix-community\n    fenix-flake = {\n      url = \"github:nix-community/fenix\";\n      inputs.nixpkgs.follows = \"nixpkgs\";\n    };\n\n    flake-utils = {\n      url = \"github:numtide/flake-utils\";\n      inputs.nixpkgs.follows = \"nixpkgs\";\n    };\n  };\n\n  outputs = { self, nixpkgs, fenix-flake, flake-utils }:\n    flake-utils.lib.eachDefaultSystem (system:\n      let\n        pkgs = import nixpkgs {\n          inherit system;\n          overlays = [\n            fenix-flake.overlays.default\n          ];\n        };\n        toolchain = pkgs.fenix.complete.withComponents [\n          \"cargo\"\n          \"clippy\"\n          \"rust-src\"\n          \"rustc\"\n          \"rustfmt\"\n          \"rust-analyzer\"\n        ];\n        fix-n-fmt = pkgs.writeShellScriptBin \"fix-n-fmt\" ''\n          set -euf -o pipefail\n          ${toolchain}/bin/cargo clippy --fix --allow-staged --allow-dirty --all-targets --all-features\n          ${toolchain}/bin/cargo fmt\n        '';\n        pre-commit = pkgs.writeShellScript \"pre-commit\" ''\n          cargo clippy --workspace --all-targets --all-features -- -D warnings\n          result=$?\n\n          if [[ ''${result} -ne 0 ]] ; then\n              cat <<\\EOF\n          There are some linting issues, try `fix-n-fmt` to fix.\n          EOF\n              exit 1\n          fi\n\n          # Use a dedicated sub-target-dir for udeps. For some reason, it fights with clippy over the cache.\n          CARGO_TARGET_DIR=$(git rev-parse --show-toplevel)/target/udeps cargo udeps --workspace --all-targets --all-features\n          result=$?\n\n          if [[ ''${result} -ne 0 ]] ; then\n              cat <<\\EOF\n          There are some unused dependencies.\n          EOF\n              exit 1\n          fi\n\n          diff=$(cargo fmt -- --check)\n          result=$?\n\n          if [[ ''${result} -ne 0 ]] ; then\n              cat <<\\EOF\n          There are some code style issues, run `fix-n-fmt` first.\n          EOF\n              exit 1\n          fi\n\n          exit 0\n        '';\n        setup-hooks = pkgs.writeShellScriptBin \"setup-hooks\" ''\n          repo_root=$(git rev-parse --git-dir)\n\n          ${toString (map (h: ''\n            ln -sf ${h} ''${repo_root}/hooks/${h.name}\n          '') [\n            pre-commit\n          ])}\n        '';\n        # Make sure that cargo semver-checks uses the stable toolchain rather\n        # than the nightly one that we normally develop with.\n        semver-checks = with pkgs; symlinkJoin {\n          name = \"cargo-semver-checks\";\n          paths = [ cargo-semver-checks ];\n          buildInputs = [ makeWrapper ];\n          postBuild = ''\n            wrapProgram $out/bin/cargo-semver-checks \\\n              --prefix PATH : ${rustc}/bin \\\n              --prefix PATH : ${cargo}/bin\n          '';\n        };\n        extract-version = with pkgs; writeShellScriptBin \"extract-crate-version\" ''\n          ${cargo}/bin/cargo metadata --format-version 1 --no-deps | \\\n            ${jq}/bin/jq -r \".packages[] | select(.name == \\\"$1\\\") | .version\"\n        '';\n      in\n      {\n        devShell = pkgs.mkShell {\n          CHALK_OVERFLOW_DEPTH = 3000;\n          CHALK_SOLVER_MAX_SIZE = 1500;\n          OPENSSL_LIB_DIR = \"${pkgs.openssl.out}/lib\";\n          OPENSSL_INCLUDE_DIR = \"${pkgs.openssl.dev}/include\";\n          RUSTC_WRAPPER=\"${pkgs.sccache}/bin/sccache\";\n          buildInputs = with pkgs; [\n            toolchain\n            fix-n-fmt\n            setup-hooks\n            cargo-udeps\n            semver-checks\n            extract-version\n          ] ++ lib.optionals stdenv.isDarwin [\n            # nix darwin stdenv has broken libiconv: https://github.com/NixOS/nixpkgs/issues/158331\n            libiconv\n            pkgs.darwin.apple_sdk.frameworks.CoreServices\n            pkgs.darwin.apple_sdk.frameworks.Security\n          ];\n        };\n      });\n}\n"
  },
  {
    "path": "ngrok/CHANGELOG.md",
    "content": "## 0.18.0\n- Add support for CEL filtering when listing resources.\n- Add support for service users\n- Add support for `vault_name` on Secrets\n-Make `pooling_enabled` on Endpoints optional\n\n## 0.17.0\n\n### Breaking Changes\n- **Binding is now optional**: Tests no longer hardcode `binding(\"public\")`. The ngrok service will use its default binding configuration when not explicitly specified.\n- **Binding validation**: The `binding()` method now validates input values and panics on invalid values or multiple calls.\n\n### Added\n- Added `Binding` enum with three variants: `Public`, `Internal`, and `Kubernetes`\n- Added validation for binding values - only \"public\", \"internal\", and \"kubernetes\" are accepted (case-insensitive)\n- Added `binding()` method documentation with examples for both string and typed enum usage\n- Added panic behavior when `binding()` is called more than once (only one binding allowed)\n\n### Changed\n- `binding()` method now accepts both strings and the `Binding` enum via `Into<String>`\n- Removed hardcoded \"public\" binding from all tests - bindings are now truly optional\n\n## 0.15.0\n- - Removes `hyper-proxy` and `ring` dependencies \n\n## 0.14.0\n- - Adds `pooling_enabled` option, allowing the endpoint to pool with other endpoints with the same host/port/binding\n\n## 0.13.1\n\n- Preserve the `ERR_NGROK` prefix for error codes.\n\n## 0.13.0\n\n- Add the `NgrokError` trait\n- Add the `ErrResp` type\n- Change the `RpcError::Response` variant to the `ErrResp` type (from `String`)\n- Implement `NgrokError` for `ErrResp`, `RpcError`, and `ConnectError`\n\n## 0.12.4\n\n- Add `Win32_Foundation` feature\n- Update nix for rust `1.72`\n\n## 0.12.3\n\n- Add `session.id()`\n\n## 0.12.2\n\n- Updated readme and changelog\n\n## 0.12.1\n\n- Add source error on reconnect\n- Rename repository to ngrok-rust\n\n## 0.12.0\n\n- Add `client_info` to SessionBuilder\n- Update UserAgent generation\n- Make `circuit_breaker` test more reliable\n\n## 0.11.3\n\n- Update stream forwarding logic\n- Add `ca_cert` option to SessionBuilder\n- Unpin `bstr`\n\n## 0.11.2\n\n- Send UserAgent when authenticating\n- Update readme documentation\n\n## 0.11.0\n\n- Include a session close method\n- Mark errors as non-exhaustive\n\n## 0.10.2\n\n- Update default forwards-to\n- Expose OAuth Client ID/Secret setters\n- Muxado: close method on the opener\n\n## 0.10.1\n\n- Add windows pipe support\n- Require tokio rt\n\n## 0.10.0\n\n- Some api-breaking consistency fixes for the session builder.\n- Update the connector to be more in-line with the other handlers and to support\n  disconnect/reconnect error reporting.\n- Add support for custom heartbeat handlers.\n\n## 0.9.0\n\n- Update docs to match ngrok-go\n- Update the tls termination configuration methods to match those in ngrok-go\n- Remove the `_string` suffix from the cidr restriction methods\n\n## 0.8.1\n\n- Fix cancellation bugs causing leaked muxado/ngrok sessions.\n\n## 0.8.0\n\n- Some breaking changes to builder method naming for consistency.\n- Add dashboard command handlers\n\n## 0.7.0\n\n- Initial crates.io release.\n\n## Pre-0.7.0\n\n- There was originally a crate on crates.io named 'ngrok' that wrapped the agent\n  binary. It can be found [here](https://github.com/nkconnor/ngrok).\n\n  Thanks @nkconnor!\n"
  },
  {
    "path": "ngrok/Cargo.toml",
    "content": "[package]\nname = \"ngrok\"\nversion = \"0.18.0\"\nedition = \"2021\"\nlicense = \"MIT OR Apache-2.0\"\ndescription = \"The ngrok agent SDK\"\nrepository = \"https://github.com/ngrok/ngrok-rust\"\n\n[dependencies]\narc-swap = \"1.5.2\"\nasync-trait = \"0.1.59\"\nawaitdrop = \"0.1.1\"\naxum = { version = \"0.7.4\", features = [\"tokio\"], optional = true }\naxum-core = \"0.4.3\"\n\nbase64 = \"0.21.7\"\nbitflags = \"2.4.2\"\nbytes = \"1.10.1\"\n\nfutures = \"0.3.25\"\nfutures-rustls = { version = \"0.26.0\", default-features = false, features = [\"tls12\", \"logging\"] }\nfutures-util = \"0.3.30\"\nhostname = \"0.3.1\"\nhyper = { version = \"^1.1.0\", optional = true }\nhyper-http-proxy = \"1.1.0\"\nhyper-util = { version = \"0.1.3\", features = [\"tokio\"] }\nonce_cell = \"1.17.1\"\nmuxado = { path = \"../muxado\", version = \"0.5\" }\npin-project = \"1.1.3\"\nparking_lot = \"0.12.1\"\nproxy-protocol = \"0.5.0\"\nregex = \"1.7.3\"\nrustls-native-certs = \"0.7.0\"\nrustls-pemfile = \"2.0.0\"\nserde = { version = \"1.0.149\", features = [\"derive\"] }\nserde_json = \"1.0.89\"\nthiserror = \"2\"\ntokio = { version = \"1.23.0\", features = [\n\t\"io-util\",\n\t\"net\",\n\t\"sync\",\n\t\"time\",\n\t\"rt\",\n] }\ntokio-retry = \"0.3.0\"\ntokio-socks = \"0.5.1\"\ntokio-util = { version = \"0.7.4\", features = [\"compat\"] }\ntower-service = { version = \"0.3.3\"}\ntracing = \"0.1.37\"\nurl = \"2.4.0\"\n\n[target.'cfg(windows)'.dependencies]\nwindows-sys = { version = \"0.45.0\", features = [\"Win32_Foundation\"] }\n\n[dev-dependencies]\nanyhow = \"1.0.66\"\naxum = { version = \"0.7.4\", features = [\"tokio\"] }\nflate2 = \"1.0.25\"\nhttp-body-util = \"0.1.3\"\nhyper = { version = \"1.1.0\", features = [ \"client\" ] }\nhyper-util = { version = \"0.1.3\", features = [\n\t\"tokio\",\n\t\"server\",\n\t\"http1\",\n\t\"http2\",\n]}\npaste = \"1.0.11\"\nrand = \"0.8.5\"\nreqwest = \"0.12\"\ntokio = { version = \"1.23.0\", features = [\"full\"] }\ntokio-tungstenite = { version = \"0.26.2\", features = [\n\t\"rustls\",\n\t\"rustls-tls-webpki-roots\",\n] }\ntower = { version = \"0.5\", features = [\"util\"] }\ntracing-subscriber = { version = \"0.3.16\", features = [\"env-filter\"] }\ntracing-test = \"0.2.3\"\n\n[[example]]\nname = \"tls\"\nrequired-features = [\"axum\"]\n\n[[example]]\nname = \"axum\"\nrequired-features = [\"axum\"]\n\n[[example]]\nname = \"labeled\"\nrequired-features = [\"axum\"]\n\n[[example]]\nname = \"mingrok\"\nrequired-features = [\"hyper\"]\n\n[features]\ndefault = [\"aws-lc-rs\"]\nhyper = [\"hyper/server\", \"hyper/http1\",  \"dep:hyper\"]\naxum = [\"dep:axum\", \"hyper\"]\nonline-tests = [\"axum\", \"hyper\"]\nlong-tests = [\"online-tests\"]\nauthenticated-tests = [\"online-tests\"]\npaid-tests = [\"authenticated-tests\"]\naws-lc-rs = [\"futures-rustls/aws-lc-rs\"]\nring = [\"futures-rustls/ring\"]\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n"
  },
  {
    "path": "ngrok/README.md",
    "content": "# ngrok-rust\n\n[![Crates.io][crates-badge]][crates-url]\n[![docs.rs][docs-badge]][docs-url]\n[![MIT licensed][mit-badge]][mit-url]\n[![Apache-2.0 licensed][apache-badge]][apache-url]\n[![Continuous integration][ci-badge]][ci-url]\n\n[crates-badge]: https://img.shields.io/crates/v/ngrok.svg\n[crates-url]: https://crates.io/crates/ngrok\n[docs-badge]: https://img.shields.io/docsrs/ngrok.svg\n[docs-url]: https://docs.rs/ngrok\n[ci-badge]: https://github.com/ngrok/ngrok-rust/actions/workflows/ci.yml/badge.svg\n[ci-url]: https://github.com/ngrok/ngrok-rust/actions/workflows/ci.yml\n[mit-badge]: https://img.shields.io/badge/license-MIT-blue.svg\n[mit-url]: https://github.com/ngrok/ngrok-rust/blob/main/LICENSE-MIT\n[apache-badge]: https://img.shields.io/badge/license-Apache_2.0-blue.svg\n[apache-url]: https://github.com/ngrok/ngrok-rust/blob/main/LICENSE-APACHE\n\n[API Docs (main)](https://ngrok.github.io/ngrok-rust/ngrok)\n\n[ngrok](https://ngrok.com) is a simplified API-first ingress-as-a-service that adds connectivity, \nsecurity, and observability to your apps.\n\nngrok-rust, our native and idiomatic crate for adding a public internet address\nwith secure ingress traffic directly into your Rust apps 🦀. If you’ve used ngrok in\nthe past, you can think of ngrok-rust as the ngrok agent packaged as a Rust crate.\n\nngrok-rust lets developers serve Rust services on the internet in a single statement\nwithout setting up low-level network primitives like IPs, NAT, certificates,\nload balancers, and even ports! Applications using ngrok-rust listen on ngrok’s global\ningress network for TCP and HTTP traffic. ngrok-rust listeners are usable with\n[hyper Servers](https://docs.rs/hyper/latest/hyper/server/index.html), and connections\nimplement [tokio’s AsyncRead and AsyncWrite traits](https://docs.rs/tokio/latest/tokio/io/index.html).\nThis makes it easy to add ngrok-rust into any application that’s built on hyper, such\nas the popular [axum](https://docs.rs/axum/latest/axum/) HTTP framework.\n\nSee [`/ngrok/examples/`][examples] for example usage, or the tests in\n[`/ngrok/src/online_tests.rs`][online-tests].\n\n[examples]: https://github.com/ngrok/ngrok-rust/blob/main/ngrok/examples\n[online-tests]: https://github.com/ngrok/ngrok-rust/blob/main/ngrok/src/online_tests.rs\n\nFor working with the [ngrok API](https://ngrok.com/docs/api/), check out the\n[ngrok Rust API Client Library](https://github.com/ngrok/ngrok-api-rs).\n\n## Installation\n\nAdd `ngrok` to the `[dependencies]` section of your `Cargo.toml` with `cargo add`:\n\n```bash\n$ cargo add ngrok\n```\n\n## Quickstart\n\nCreate a simple HTTP server using `ngrok` and `axum`:\n\n`Cargo.toml`:\n\n```toml\n[package]\nname = \"ngrok-rust-demo\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[dependencies]\nngrok = {version = \"0.14.0\"}\ntokio = { version = \"1\", features = [\n    \"full\"\n] }\naxum = { version = \"0.7.4\", features = [\"tokio\"] }\nasync-trait = \"0.1.59\"\nhyper = {version = \"1\", features = [\"full\"]}\nhyper-util = { version = \"0.1\", features = [\n\t\"full\"\n] }\nurl = \"2.5.4\"\n```\n\n`src/main.rs`:\n\n```rust\n#![deny(warnings)]\nuse axum::{routing::get, Router};\nuse ngrok::config::{ForwarderBuilder, TunnelBuilder};\nuse std::net::SocketAddr;\nuse url::Url;\n\n#[tokio::main]\nasync fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {\n    // Create Axum app\n    let app = Router::new().route(\"/\", get(|| async { \"Hello from Axum!\" }));\n\n    // Spawn Axum server\n    let addr = SocketAddr::from(([127, 0, 0, 1], 3000));\n    tokio::spawn(async move {\n        axum::serve(tokio::net::TcpListener::bind(addr).await.unwrap(), app)\n            .await\n            .unwrap();\n    });\n\n    // Set up ngrok tunnel\n    let sess1 = ngrok::Session::builder()\n        .authtoken_from_env()\n        .connect()\n        .await?;\n    let sess2 = ngrok::Session::builder()\n        .authtoken_from_env()\n        .connect()\n        .await?;\n\n    let _listener = sess1\n        .http_endpoint()\n        .domain(\"/* your domain*/\")\n        .pooling_enabled(true)\n        .listen_and_forward(Url::parse(\"http://localhost:3000\").unwrap())\n        .await?;\n    let _listener2 = sess2\n        .http_endpoint()\n        .domain(\"/* your domain */\")\n        .pooling_enabled(true)\n        .listen_and_forward(Url::parse(\"http://localhost:8000\").unwrap())\n        .await?;\n\n    // Wait indefinitely\n    tokio::signal::ctrl_c().await?;\n    Ok(())\n}\n\n```\n\n# Changelog\n\nChanges to `ngrok-rust` are tracked under [CHANGELOG.md](https://github.com/ngrok/ngrok-rust/blob/main/ngrok/CHANGELOG.md).\n\n# Join the ngrok Community\n\n- Check out [our official docs](https://docs.ngrok.com)\n- Read about updates on [our blog](https://ngrok.com/blog)\n- Open an [issue](https://github.com/ngrok/ngrok-rust/issues) or [pull request](https://github.com/ngrok/ngrok-rust/pulls)\n- Join our [Slack community](https://ngrok.com/slack)\n- Follow us on [X / Twitter (@ngrokHQ)](https://twitter.com/ngrokhq)\n- Subscribe to our [Youtube channel (@ngrokHQ)](https://www.youtube.com/@ngrokhq)\n\n# License\n\nThis project is licensed under either of\n\n- Apache License, Version 2.0, ([LICENSE-APACHE][apache-url] or\n  <http://www.apache.org/licenses/LICENSE-2.0>)\n- MIT license ([LICENSE-MIT][mit-url] or\n  <http://opensource.org/licenses/MIT>)\n\nat your option.\n\n### Contribution\n\nUnless you explicitly state otherwise, any contribution intentionally submitted\nfor inclusion in ngrok by you, as defined in the Apache-2.0 license, shall be\ndual licensed as above, without any additional terms or conditions.\n"
  },
  {
    "path": "ngrok/assets/ngrok.ca.crt",
    "content": "-----BEGIN CERTIFICATE-----\nMIID4TCCAsmgAwIBAgIUZqF2AkB17pISojTndgc2U5BDt74wDQYJKoZIhvcNAQEL\nBQAwbzEQMA4GA1UEAwwHUm9vdCBDQTENMAsGA1UECwwEcHJvZDESMBAGA1UECgwJ\nbmdyb2sgSW5jMRYwFAYDVQQHDA1TYW4gRnJhbmNpc2NvMRMwEQYDVQQIDApDYWxp\nZm9ybmlhMQswCQYDVQQGEwJVUzAgFw0yMjA4MzExNTE3MjFaGA80NzYwMDcyODE1\nMTcyMVowbzEQMA4GA1UEAwwHUm9vdCBDQTENMAsGA1UECwwEcHJvZDESMBAGA1UE\nCgwJbmdyb2sgSW5jMRYwFAYDVQQHDA1TYW4gRnJhbmNpc2NvMRMwEQYDVQQIDApD\nYWxpZm9ybmlhMQswCQYDVQQGEwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC\nAQoCggEBAMPkZpOguChG8QXfp1eCu21wipptiWO9U6F2DRf5ln8XXAAokZyfo4IZ\n795G+KdkEbq4KxSXHehhKQFDwlFnzIkZsDu6PHabXsutAmNLmoRQzsETTdh3gMEJ\nJiCW+mtqmbWPH22GXnUXxe5R6dWbkXqrITy6nFpZWdFbKmo9/1VoyWdIgcXujq2D\naNCWm2BoQ9seCebc5+6gF2syXzvoKVZ4qg6O1anCl1K0ZH/2mDXu/22O2U4Tr/j7\n6Da1Y7TWZYDU2dIz+tyfTOMrlaxXyxxmXewzOpYjBiHisfPpz7AtrTlAzaEVVhRk\nc86vC2h42zqH8Jv0fjJdfMkVXe3eegECAwEAAaNzMHEwHQYDVR0OBBYEFNxeUxPI\nM8G7cX0DhFc81pLD4W+HMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEG\nMC8GA1UdHwQoMCYwJKAioCCGHmh0dHA6Ly9jcmwubmdyb2suY29tL25ncm9rLmNy\nbDANBgkqhkiG9w0BAQsFAAOCAQEAChXl+eYIQbn0OOHLuCBvXxDKHqccJLPaxJR1\nLeWj8HjWbyLXnS405YNn84NFirpYzemeYSex+os92kjjLhBXEOIEpAE9JebDk7N5\nX4xSOkS7vrOepX4JFNhqVdxut7pqEmuj1Xf7KhHtFquFM5fhLJHnWEJGWOTRbRVp\nKWqZI/HzaltSbgiikf3S2qu6oZHph/BpueCqLKwvJziPQGE+cYdYQzRKPJZbuorj\n+CnYUXd7kHC3RZzs6egVIvUYy+bGgv1CeeAm9EccL2RmPkSzThOo6oXBLR50Zlke\n1x7y/5om6zp9vGTW4PWVAW/VWw1x4zxtSQ7NrP1Ldh7Xmnb7sw==\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "ngrok/assets/policy-inbound.json",
    "content": "{\n  \"inbound\": [\n    {\n      \"name\": \"test_in\",\n      \"expressions\": [\n        \"req.Method == 'PUT'\"\n      ],\n      \"actions\": [\n        {\n          \"type\": \"deny\"\n        }\n      ]\n    }\n  ]\n}"
  },
  {
    "path": "ngrok/assets/policy.json",
    "content": "{\n  \"inbound\": [\n    {\n      \"name\": \"test_in\",\n      \"expressions\": [\n        \"req.Method == 'PUT'\"\n      ],\n      \"actions\": [\n        {\n          \"type\": \"deny\"\n        }\n      ]\n    }\n  ],\n  \"outbound\": [\n    {\n      \"name\": \"test_out\",\n      \"expressions\": [\n        \"res.StatusCode == '200'\"\n      ],\n      \"actions\": [\n        {\n          \"type\": \"custom-response\",\n          \"config\": {\n            \"status_code\": 201\n          }\n        }\n      ]\n    }\n  ]\n}"
  },
  {
    "path": "ngrok/examples/axum.rs",
    "content": "use std::{\n    convert::Infallible,\n    net::SocketAddr,\n};\n\nuse axum::{\n    extract::ConnectInfo,\n    routing::get,\n    Router,\n};\nuse axum_core::BoxError;\nuse futures::stream::TryStreamExt;\nuse hyper::{\n    body::Incoming,\n    Request,\n};\nuse hyper_util::{\n    rt::TokioExecutor,\n    server,\n};\nuse ngrok::prelude::*;\nuse tower::{\n    util::ServiceExt,\n    Service,\n};\n\n#[tokio::main]\nasync fn main() -> Result<(), BoxError> {\n    // build our application with a single route\n    let app = Router::new().route(\n        \"/\",\n        get(\n            |ConnectInfo(remote_addr): ConnectInfo<SocketAddr>| async move {\n                format!(\"Hello, {remote_addr:?}!\\r\\n\")\n            },\n        ),\n    );\n\n    let mut listener = ngrok::Session::builder()\n        .authtoken_from_env()\n        .connect()\n        .await?\n        .http_endpoint()\n        // .allow_cidr(\"0.0.0.0/0\")\n        // .basic_auth(\"ngrok\", \"online1line\")\n        // .circuit_breaker(0.5)\n        // .compression()\n        // .deny_cidr(\"10.1.1.1/32\")\n        // .verify_upstream_tls(false)\n        // .domain(\"<somedomain>.ngrok.io\")\n        // .forwards_to(\"example rust\")\n        // .mutual_tlsca(CA_CERT.into())\n        // .oauth(\n        //     OauthOptions::new(\"google\")\n        //         .allow_email(\"<user>@<domain>\")\n        //         .allow_domain(\"<domain>\")\n        //         .scope(\"<scope>\"),\n        // )\n        // .oidc(\n        //     OidcOptions::new(\"<url>\", \"<id>\", \"<secret>\")\n        //         .allow_email(\"<user>@<domain>\")\n        //         .allow_domain(\"<domain>\")\n        //         .scope(\"<scope>\"),\n        // )\n        // .traffic_policy(POLICY_JSON)\n        // .pooling_enabled(false)\n        // .proxy_proto(ProxyProto::None)\n        // .remove_request_header(\"X-Req-Nope\")\n        // .remove_response_header(\"X-Res-Nope\")\n        // .request_header(\"X-Req-Yup\", \"true\")\n        // .response_header(\"X-Res-Yup\", \"true\")\n        // .scheme(ngrok::Scheme::HTTPS)\n        // .websocket_tcp_conversion()\n        // .webhook_verification(\"twilio\", \"asdf\"),\n        .metadata(\"example tunnel metadata from rust\")\n        .listen()\n        .await?;\n\n    println!(\"Listener started on URL: {:?}\", listener.url());\n\n    let mut make_service = app.into_make_service_with_connect_info::<SocketAddr>();\n\n    let server = async move {\n        while let Some(conn) = listener.try_next().await? {\n            let remote_addr = conn.remote_addr();\n            let tower_service = unwrap_infallible(make_service.call(remote_addr).await);\n\n            tokio::spawn(async move {\n                let hyper_service =\n                    hyper::service::service_fn(move |request: Request<Incoming>| {\n                        tower_service.clone().oneshot(request)\n                    });\n\n                if let Err(err) = server::conn::auto::Builder::new(TokioExecutor::new())\n                    .serve_connection_with_upgrades(conn, hyper_service)\n                    .await\n                {\n                    eprintln!(\"failed to serve connection: {err:#}\");\n                }\n            });\n        }\n        Ok::<(), BoxError>(())\n    };\n\n    server.await?;\n\n    Ok(())\n}\n\n#[allow(dead_code)]\nconst POLICY_JSON: &str = r###\"{\n    \"inbound\":[\n        {\n            \"name\":\"deny_put\",\n            \"expressions\":[\"req.Method == 'PUT'\"],\n            \"actions\":[{\"Type\":\"deny\"}]\n        }],\n    \"outbound\":[\n        {\n            \"name\":\"change success response\",\n            \"expressions\":[\"res.StatusCode == '200'\"],\n            \"actions\":[{\n                \"type\":\"custom-response\",\n                \"config\":{\n                    \"status_code\":201, \n                    \"content\": \"Custom 200 response.\", \n                    \"headers\": {\n                        \"content_type\": \"text/html\"\n                    }\n                }\n            }]\n        }]\n}\"###;\n\n#[allow(dead_code)]\nconst POLICY_YAML: &str = r###\"\n---\ninbound:\n    - name: \"deny_put\"\n      expressions:\n      - \"req.Method == 'PUT'\"\n      actions:\n      - type: \"deny\"\noutbound:\n    - name: \"change success response\"\n      expressions:\n      - \"res.StatusCode == '200'\"\n      actions:\n      - type: \"custom-response\"\n        config:\n          status_code: 201\n          content: \"Custom 200 response.\"\n          headers:\n            content_type: \"text/html\"\n\"###;\n\n#[allow(dead_code)]\nfn create_policy() -> Result<Policy, InvalidPolicy> {\n    Ok(Policy::new()\n        .add_inbound(\n            Rule::new(\"deny_put\")\n                .add_expression(\"req.Method == 'PUT'\")\n                .add_action(Action::new(\"deny\", None)?),\n        )\n        .add_outbound(\n            Rule::new(\"200_response\")\n                .add_expression(\"res.StatusCode == '200'\")\n                .add_action(Action::new(\n                    \"custom-response\",\n                    Some(\n                        r###\"{\n                    \"status_code\": 200,\n                    \"content_type\": \"text/html\",\n                    \"content\": \"Custom 200 response.\"\n                }\"###,\n                    ),\n                )?),\n        )\n        .to_owned())\n}\n\n// const CA_CERT: &[u8] = include_bytes!(\"ca.crt\");\n\nfn unwrap_infallible<T>(result: Result<T, Infallible>) -> T {\n    match result {\n        Ok(value) => value,\n        Err(err) => match err {},\n    }\n}\n"
  },
  {
    "path": "ngrok/examples/connect.rs",
    "content": "use futures::TryStreamExt;\nuse ngrok::prelude::*;\nuse tokio::io::{\n    self,\n    AsyncBufReadExt,\n    AsyncWriteExt,\n    BufReader,\n};\nuse tracing::info;\nuse tracing_subscriber::fmt::format::FmtSpan;\n\n#[tokio::main]\nasync fn main() -> anyhow::Result<()> {\n    tracing_subscriber::fmt()\n        .pretty()\n        .with_span_events(FmtSpan::ENTER)\n        .with_env_filter(std::env::var(\"RUST_LOG\").unwrap_or_default())\n        .init();\n\n    let sess = ngrok::Session::builder()\n        .authtoken_from_env()\n        .metadata(\"Online in One Line\")\n        // .root_cas(\"trusted\")?\n        .connect()\n        .await?;\n\n    let tunnel = sess\n        .tcp_endpoint()\n        // .allow_cidr(\"0.0.0.0/0\")\n        // .deny_cidr(\"10.1.1.1/32\")\n        // .verify_upstream_tls(false)\n        // .pooling_enabled(false)\n        // .forwards_to(\"example rust\"),\n        // .proxy_proto(ProxyProto::None)\n        // .remote_addr(\"<n>.tcp.ngrok.io:<p>\")\n        .metadata(\"example tunnel metadata from rust\")\n        .listen()\n        .await?;\n\n    handle_tunnel(tunnel, sess);\n\n    futures::future::pending().await\n}\n\nfn handle_tunnel(mut tunnel: impl EndpointInfo + Tunnel, sess: ngrok::Session) {\n    info!(\"bound new tunnel: {}\", tunnel.url());\n    tokio::spawn(async move {\n        loop {\n            let stream = if let Some(stream) = tunnel.try_next().await? {\n                stream\n            } else {\n                info!(\"tunnel closed!\");\n                break;\n            };\n\n            let sess = sess.clone();\n            let id: String = tunnel.id().into();\n\n            tokio::spawn(async move {\n                info!(\"accepted connection: {:?}\", stream.remote_addr());\n                let (rx, mut tx) = io::split(stream);\n\n                let mut lines = BufReader::new(rx);\n\n                loop {\n                    let mut buf = String::new();\n                    let len = lines.read_line(&mut buf).await?;\n                    if len == 0 {\n                        break;\n                    }\n\n                    if buf.contains(\"bye!\") {\n                        info!(\"unbind requested\");\n                        tx.write_all(\"later!\".as_bytes()).await?;\n                        sess.close_tunnel(id).await?;\n                        return Ok(());\n                    } else if buf.contains(\"another!\") {\n                        info!(\"another requested\");\n                        let new_tunnel = sess.tcp_endpoint().listen().await?;\n                        tx.write_all(new_tunnel.url().as_bytes()).await?;\n                        handle_tunnel(new_tunnel, sess.clone());\n                    } else {\n                        info!(\"read line: {}\", buf);\n                        tx.write_all(buf.as_bytes()).await?;\n                        info!(\"echoed line\");\n                    }\n                    tx.flush().await?;\n                    info!(\"flushed\");\n                }\n\n                Result::<(), anyhow::Error>::Ok(())\n            });\n        }\n        anyhow::Result::<()>::Ok(())\n    });\n}\n"
  },
  {
    "path": "ngrok/examples/domain.crt",
    "content": "-----BEGIN CERTIFICATE-----\nMIIC+jCCAeICCQDobWtly6PonjANBgkqhkiG9w0BAQsFADA/MQswCQYDVQQGEwJV\nUzERMA8GA1UECgwITm90IFJlYWwxHTAbBgNVBAMMFHJ1c3Qtc2RrLmV4YW1wbGUu\nY29tMB4XDTIyMTIwMjE4MzMxM1oXDTMyMTEyOTE4MzMxM1owPzELMAkGA1UEBhMC\nVVMxETAPBgNVBAoMCE5vdCBSZWFsMR0wGwYDVQQDDBRydXN0LXNkay5leGFtcGxl\nLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKhsx8tZWzaqaz9i\ngnyU9O/dCEX8qgCvU2yoeJBfGhwCnlFNQBUdBGlV+Cjf19ozagYlY6Cunu214AUR\nCDHTZsgTmMhtHkJ3kWD0wgDu+uyUuW6akP1+o39lebDc6CbDV7j1ySBoPMROp5dB\npX+ltpH42CmJM6ciwfTD1uuW5LXJvb9d4HISZp2RWyHqb3a6pI7E+XLqXg/Yy9MY\neqQESZMrYCjC+Sn4blGhcQhjTVU2rM5ChoDtZuL8OJQ0UYmchlch8CNc5Lvj9hAT\nBiafEAscGrdIAZkK50kjpcIOWPPSfjCRqz8elSQqoKFq/uQnHBF5NwmsEqE0sXhw\n4UdngRMCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAKieeE6gzuxHGjVT2NKL5BFjL\nXKxdQhI/Tt7ClKu39Ay62fXDRznTBpGRfyWsJ5r3wmsHFogw46a2HYZHyuTMfyPY\nlKhE/9EPMf/faqhIa33nMBASNzuGB5yfcPaod4KJX6DBKZtIpgkm2+S6BivpuSEo\nDJ0lNtlR80mcVPma9KR57A0oh/UIsHXxL0qIKdaxyZYOZ1Zhtm+hzZcZA4wHkqzN\nolNk3SOfhC5vVFudg+5KtxPBZ/efS9sqDUstH8hmE1JnxCF9OBlHdKI4yUMnsEf7\naOy11K5g7Oc3m7EB1twEQkufBAJeYzMOCji17GyJHDojNuOLkrmoLgcgDym5LQ==\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "ngrok/examples/domain.key",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEAqGzHy1lbNqprP2KCfJT0790IRfyqAK9TbKh4kF8aHAKeUU1A\nFR0EaVX4KN/X2jNqBiVjoK6e7bXgBREIMdNmyBOYyG0eQneRYPTCAO767JS5bpqQ\n/X6jf2V5sNzoJsNXuPXJIGg8xE6nl0Glf6W2kfjYKYkzpyLB9MPW65bktcm9v13g\nchJmnZFbIepvdrqkjsT5cupeD9jL0xh6pARJkytgKML5KfhuUaFxCGNNVTaszkKG\ngO1m4vw4lDRRiZyGVyHwI1zku+P2EBMGJp8QCxwat0gBmQrnSSOlwg5Y89J+MJGr\nPx6VJCqgoWr+5CccEXk3CawSoTSxeHDhR2eBEwIDAQABAoIBADKQLc8brWmU8gue\nbGQwZ/RW3DP+rZ71A8ucLE3Tb0g3dQYddf6groFdINpMkUXdp5few7Eqm2Xr8ywy\nN86Vk8a/M2AAelQkB04fTNrw4/4AjEbrOloQGc+WTFlPiJaSkJRjnZUQFiYtIt0j\nBSd0PYJHPcYCfbJQmf/8h1pE+7ajNJlvEWrJ8UjDCjUuPPxq1aCOIA48aN8awDaO\n2R0AeSBws6+6UgyBgy2juat0t8PvS+AiLv4rK3RGMD+x96KoPEoVVgOQLr5YTqRP\nQ+HYrs5cSXx9Jb2cmuJzvPUJmE3HKhoshWrK7fz5Z8wVAqTGhX6dbuHoqMJnAdla\nFFSBEokCgYEA0BAsrDrnSkls1uC54iqzrxPMvITj4UnBR+PK504NrtP2brlcVIDP\ne0dTKPTqjIC0vpDIg2fhPPvKkeoyuL6huiUWL/DdYVphUlwTf2Mu6PUm3o4M1MWN\nS7q09cqUp4HWCUbzN3MIJ8sOPY17Lq+fxi1Wf4mNh+8IIXcJQ4HgUiUCgYEAzzqx\nL7ck6pBUTtpUFYFTCUQDOYdzPE72zOzHK/LpoWJEssQ479srKlmSnRPRZbPZGMGE\nEXvhWROonux96rRrZjiBI4B5G4rzeY0Rs24kClEh+7s5Zw4xmfSJu5oSdLqiy+O+\nIKMVhOm9qq+8+y9LwKyajwR27srLdHSijJoXNNcCgYEAgtc5EJH2MwwbisFFg8mw\nt0+vN3omR91203uXdH/sMN4Qoa6lNmrOj0raK+5gtTyW7SPlRGWGCjCZQctSXEVd\nNM7vtfQ1c2w/uWg3xqsbq9nGuLwBq6gT4+SkudDMTM5kR+87Mcp//W4/JUwcg85j\nnl+Sfp+Exk/1//14cOByrZUCgYEAjrr7HUVEfPbJysHf1iwL2D7rBa3AdhJhNIYF\nLMUTm59Gd+Zk3PeUxIeLTvs+Z5E2/zESWMR9UtASfNugYo6/xlk2wRAU2h6bUeYT\nAgXjduOox2yNvehty389emRFP/boeAw1gN8yzCf+BdkjDdLmlx+LGORXUmOFPIG1\nD6h2QWMCgYA0WysR3XMcRH/8GDAgNVry5JvKoxlVXTPqVScTjMRj3VAzPYPCV+ql\nlNN6yh/TuJwdvNs+uhKd1Wu4cDIb9GqxkBbUTKoKBrVL1YB93IC7QIR5wVjhJF/i\nlrFW1ogr3535UzHzyDD1oXvcnWV/JnTdadHf2oA3Em8n2oTQvXQAog==\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "ngrok/examples/labeled.rs",
    "content": "use std::{\n    convert::Infallible,\n    error::Error,\n    net::SocketAddr,\n};\n\nuse axum::{\n    extract::ConnectInfo,\n    routing::get,\n    BoxError,\n    Router,\n};\nuse futures::TryStreamExt;\nuse hyper::{\n    body::Incoming,\n    Request,\n};\nuse hyper_util::{\n    rt::TokioExecutor,\n    server,\n};\nuse ngrok::prelude::*;\nuse tower::{\n    util::ServiceExt,\n    Service,\n};\n\n#[tokio::main]\nasync fn main() -> Result<(), Box<dyn Error + Send + Sync>> {\n    // build our application with a single route\n    let app = Router::new().route(\n        \"/\",\n        get(\n            |ConnectInfo(remote_addr): ConnectInfo<SocketAddr>| async move {\n                format!(\"Hello, {remote_addr:?}!\\r\\n\")\n            },\n        ),\n    );\n\n    let sess = ngrok::Session::builder()\n        .authtoken_from_env()\n        .connect()\n        .await?;\n\n    let mut listener = sess\n        .labeled_tunnel()\n        // .app_protocol(\"http2\")\n        // .verify_upstream_tls(false)\n        .label(\"edge\", \"edghts_<edge_id>\")\n        .metadata(\"example tunnel metadata from rust\")\n        .listen()\n        .await?;\n\n    println!(\"Labeled listener started!\");\n\n    let mut make_service = app.into_make_service_with_connect_info::<SocketAddr>();\n\n    let server = async move {\n        while let Some(conn) = listener.try_next().await? {\n            let remote_addr = conn.remote_addr();\n            let tower_service = unwrap_infallible(make_service.call(remote_addr).await);\n\n            tokio::spawn(async move {\n                let hyper_service =\n                    hyper::service::service_fn(move |request: Request<Incoming>| {\n                        tower_service.clone().oneshot(request)\n                    });\n\n                if let Err(err) = server::conn::auto::Builder::new(TokioExecutor::new())\n                    .serve_connection_with_upgrades(conn, hyper_service)\n                    .await\n                {\n                    eprintln!(\"failed to serve connection: {err:#}\");\n                }\n            });\n        }\n        Ok::<(), BoxError>(())\n    };\n\n    server.await?;\n    Ok(())\n}\n\nfn unwrap_infallible<T>(result: Result<T, Infallible>) -> T {\n    match result {\n        Ok(value) => value,\n        Err(err) => match err {},\n    }\n}\n"
  },
  {
    "path": "ngrok/examples/mingrok.rs",
    "content": "use std::sync::{\n    Arc,\n    Mutex,\n};\n\nuse anyhow::Error;\nuse futures::{\n    prelude::*,\n    select,\n};\nuse ngrok::prelude::*;\nuse tokio::sync::oneshot;\nuse tracing::info;\nuse url::Url;\n\n#[tokio::main]\nasync fn main() -> Result<(), Error> {\n    tracing_subscriber::fmt()\n        .pretty()\n        .with_env_filter(std::env::var(\"RUST_LOG\").unwrap_or_else(|_| \"info\".into()))\n        .init();\n\n    let forwards_to = std::env::args()\n        .nth(1)\n        .ok_or_else(|| anyhow::anyhow!(\"missing forwarding address\"))\n        .and_then(|s| Ok(Url::parse(&s)?))?;\n\n    loop {\n        let (stop_tx, stop_rx) = oneshot::channel();\n        let stop_tx = Arc::new(Mutex::new(Some(stop_tx)));\n\n        let (restart_tx, restart_rx) = oneshot::channel();\n        let restart_tx = Arc::new(Mutex::new(Some(restart_tx)));\n\n        let mut fwd = ngrok::Session::builder()\n            .authtoken_from_env()\n            .handle_stop_command(move |req| {\n                let stop_tx = stop_tx.clone();\n                async move {\n                    info!(?req, \"received stop command\");\n                    let _ = stop_tx.lock().unwrap().take().unwrap().send(());\n                    Ok(())\n                }\n            })\n            .handle_restart_command(move |req| {\n                let restart_tx = restart_tx.clone();\n                async move {\n                    info!(?req, \"received restart command\");\n                    let _ = restart_tx.lock().unwrap().take().unwrap().send(());\n                    Ok(())\n                }\n            })\n            .handle_update_command(|req| async move {\n                info!(?req, \"received update command\");\n                Err(\"unable to update\".into())\n            })\n            .connect()\n            .await?\n            .http_endpoint()\n            .listen_and_forward(forwards_to.clone())\n            .await?;\n\n        info!(url = fwd.url(), %forwards_to, \"started forwarder\");\n\n        let mut fut = fwd.join().fuse();\n        let mut stop_rx = stop_rx.fuse();\n        let mut restart_rx = restart_rx.fuse();\n\n        select! {\n            res = fut => info!(\"{:?}\", res?),\n            _ = stop_rx => return Ok(()),\n            _ = restart_rx => {\n                drop(fut);\n                let _ = fwd.close().await;\n                continue\n            },\n        }\n    }\n}\n"
  },
  {
    "path": "ngrok/examples/tls.rs",
    "content": "use std::{\n    convert::Infallible,\n    error::Error,\n    net::SocketAddr,\n};\n\nuse axum::{\n    extract::ConnectInfo,\n    routing::get,\n    BoxError,\n    Router,\n};\nuse futures::TryStreamExt;\nuse hyper::{\n    body::Incoming,\n    Request,\n};\nuse hyper_util::{\n    rt::TokioExecutor,\n    server,\n};\nuse ngrok::prelude::*;\nuse tower::{\n    util::ServiceExt,\n    Service,\n};\n\nconst CERT: &[u8] = include_bytes!(\"domain.crt\");\nconst KEY: &[u8] = include_bytes!(\"domain.key\");\n// const CA_CERT: &[u8] = include_bytes!(\"ca.crt\");\n\n#[tokio::main]\nasync fn main() -> Result<(), Box<dyn Error + Send + Sync>> {\n    // build our application with a single route\n    let app = Router::new().route(\n        \"/\",\n        get(\n            |ConnectInfo(remote_addr): ConnectInfo<SocketAddr>| async move {\n                format!(\"Hello, {remote_addr:?}!\\r\\n\")\n            },\n        ),\n    );\n\n    let sess = ngrok::Session::builder()\n        .authtoken_from_env()\n        .connect()\n        .await?;\n\n    let mut listener = sess\n        .tls_endpoint()\n        // .allow_cidr(\"0.0.0.0/0\")\n        // .deny_cidr(\"10.1.1.1/32\")\n        // .verify_upstream_tls(false)\n        // .domain(\"<somedomain>.ngrok.io\")\n        // .forwards_to(\"example rust\"),\n        // .mutual_tlsca(CA_CERT.into())\n        // .proxy_proto(ProxyProto::None)\n        .termination(CERT.into(), KEY.into())\n        .metadata(\"example tunnel metadata from rust\")\n        .listen()\n        .await?;\n\n    let mut make_service = app.into_make_service_with_connect_info::<SocketAddr>();\n\n    let server = async move {\n        while let Some(conn) = listener.try_next().await? {\n            let remote_addr = conn.remote_addr();\n            let tower_service = unwrap_infallible(make_service.call(remote_addr).await);\n\n            tokio::spawn(async move {\n                let hyper_service =\n                    hyper::service::service_fn(move |request: Request<Incoming>| {\n                        tower_service.clone().oneshot(request)\n                    });\n\n                if let Err(err) = server::conn::auto::Builder::new(TokioExecutor::new())\n                    .serve_connection_with_upgrades(conn, hyper_service)\n                    .await\n                {\n                    eprintln!(\"failed to serve connection: {err:#}\");\n                }\n            });\n        }\n        Ok::<(), BoxError>(())\n    };\n\n    server.await?;\n\n    Ok(())\n}\n\nfn unwrap_infallible<T>(result: Result<T, Infallible>) -> T {\n    match result {\n        Ok(value) => value,\n        Err(err) => match err {},\n    }\n}\n"
  },
  {
    "path": "ngrok/src/config/common.rs",
    "content": "use std::{\n    collections::HashMap,\n    env,\n    process,\n};\n\nuse async_trait::async_trait;\nuse once_cell::sync::OnceCell;\nuse url::Url;\n\npub use crate::internals::proto::ProxyProto;\nuse crate::{\n    config::policies::Policy,\n    forwarder::Forwarder,\n    internals::proto::{\n        BindExtra,\n        BindOpts,\n        IpRestriction,\n        MutualTls,\n    },\n    session::RpcError,\n    Session,\n    Tunnel,\n};\n\n/// Represents the ingress configuration for an ngrok endpoint.\n///\n/// Bindings determine where and how your endpoint is exposed.\n#[derive(Debug, Clone, Copy, PartialEq, Eq)]\npub enum Binding {\n    /// Publicly accessible endpoint (default for most configurations).\n    Public,\n    /// Internal-only endpoint, not accessible from the public internet.\n    Internal,\n    /// Kubernetes cluster binding for service mesh integration.\n    Kubernetes,\n}\n\nimpl Binding {\n    /// Returns the string representation of this binding.\n    pub fn as_str(&self) -> &'static str {\n        match self {\n            Binding::Public => \"public\",\n            Binding::Internal => \"internal\",\n            Binding::Kubernetes => \"kubernetes\",\n        }\n    }\n\n    /// Validates if a string is a recognized binding value.\n    pub(crate) fn validate(s: &str) -> Result<(), String> {\n        match s.to_lowercase().as_str() {\n            \"public\" | \"internal\" | \"kubernetes\" => Ok(()),\n            _ => Err(format!(\n                \"Invalid binding value '{}'. Expected 'public', 'internal', or 'kubernetes'\",\n                s\n            )),\n        }\n    }\n}\n\nimpl From<Binding> for String {\n    fn from(binding: Binding) -> String {\n        binding.as_str().to_string()\n    }\n}\n\nimpl std::str::FromStr for Binding {\n    type Err = String;\n\n    fn from_str(s: &str) -> Result<Self, Self::Err> {\n        match s.to_lowercase().as_str() {\n            \"public\" => Ok(Binding::Public),\n            \"internal\" => Ok(Binding::Internal),\n            \"kubernetes\" => Ok(Binding::Kubernetes),\n            _ => Err(format!(\n                \"Invalid binding value '{}'. Expected 'public', 'internal', or 'kubernetes'\",\n                s\n            )),\n        }\n    }\n}\n\nimpl std::fmt::Display for Binding {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        write!(f, \"{}\", self.as_str())\n    }\n}\n\npub(crate) fn default_forwards_to() -> &'static str {\n    static FORWARDS_TO: OnceCell<String> = OnceCell::new();\n\n    FORWARDS_TO\n        .get_or_init(|| {\n            let hostname = hostname::get()\n                .unwrap_or(\"<unknown>\".into())\n                .to_string_lossy()\n                .into_owned();\n            let exe = env::current_exe()\n                .unwrap_or(\"<unknown>\".into())\n                .to_string_lossy()\n                .into_owned();\n            let pid = process::id();\n            format!(\"app://{hostname}/{exe}?pid={pid}\")\n        })\n        .as_str()\n}\n\n/// Trait representing things that can be built into an ngrok tunnel.\n#[async_trait]\npub trait TunnelBuilder: From<Session> {\n    /// The ngrok tunnel type that this builder produces.\n    type Tunnel: Tunnel;\n\n    /// Begin listening for new connections on this tunnel.\n    async fn listen(&self) -> Result<Self::Tunnel, RpcError>;\n}\n\n/// Trait representing things that can be built into an ngrok tunnel and then\n/// forwarded to a provided URL.\n#[async_trait]\npub trait ForwarderBuilder: TunnelBuilder {\n    /// Start listening for new connections on this tunnel and forward all\n    /// connections to the provided URL.\n    ///\n    /// This will also set the `forwards_to` metadata for the tunnel.\n    async fn listen_and_forward(&self, to_url: Url) -> Result<Forwarder<Self::Tunnel>, RpcError>;\n}\n\nmacro_rules! impl_builder {\n    ($(#[$m:meta])* $name:ident, $opts:ty, $tun:ident, $edgepoint:tt) => {\n        $(#[$m])*\n        #[derive(Clone)]\n        pub struct $name {\n            options: $opts,\n            // Note: This is only optional for testing purposes.\n            session: Option<Session>,\n        }\n\n        mod __builder_impl {\n            use $crate::forwarder::Forwarder;\n            use $crate::config::common::ForwarderBuilder;\n            use $crate::config::common::TunnelBuilder;\n            use $crate::session::RpcError;\n            use async_trait::async_trait;\n            use url::Url;\n\n            use super::*;\n\n            impl From<Session> for $name {\n                fn from(session: Session) -> Self {\n                    $name {\n                        options: Default::default(),\n                        session: session.into(),\n                    }\n                }\n            }\n\n            #[async_trait]\n            impl TunnelBuilder for $name {\n                type Tunnel = $tun;\n\n                async fn listen(&self) -> Result<$tun, RpcError> {\n                    Ok($tun {\n                        inner: self\n                            .session\n                            .as_ref()\n                            .unwrap()\n                            .start_tunnel(&self.options)\n                            .await?,\n                    })\n                }\n            }\n\n            #[async_trait]\n            impl ForwarderBuilder for $name {\n                async fn listen_and_forward(&self, to_url: Url) -> Result<Forwarder<$tun>, RpcError> {\n                    let mut cfg = self.clone();\n                    cfg.for_forwarding_to(&to_url).await;\n                    let tunnel = cfg.listen().await?;\n                    let info = tunnel.make_info();\n                    $crate::forwarder::forward(tunnel, info, to_url)\n                }\n            }\n        }\n    };\n}\n\n/// Tunnel configuration trait, implemented by our top-level config objects.\npub(crate) trait TunnelConfig {\n    /// The \"forwards to\" metadata.\n    ///\n    /// Only for display/informational purposes.\n    fn forwards_to(&self) -> String;\n    /// The L7 protocol the upstream service expects\n    fn forwards_proto(&self) -> String;\n    /// Whether to disable certificate verification for this tunnel.\n    fn verify_upstream_tls(&self) -> bool;\n    /// Internal-only, extra data sent when binding a tunnel.\n    fn extra(&self) -> BindExtra;\n    /// The protocol for this tunnel.\n    fn proto(&self) -> String;\n    /// The middleware and other configuration options for this tunnel.\n    fn opts(&self) -> Option<BindOpts>;\n    /// The labels for this tunnel.\n    fn labels(&self) -> HashMap<String, String>;\n}\n\n// delegate references\nimpl<T> TunnelConfig for &T\nwhere\n    T: TunnelConfig,\n{\n    fn forwards_to(&self) -> String {\n        (**self).forwards_to()\n    }\n\n    fn forwards_proto(&self) -> String {\n        (**self).forwards_proto()\n    }\n    fn verify_upstream_tls(&self) -> bool {\n        (**self).verify_upstream_tls()\n    }\n    fn extra(&self) -> BindExtra {\n        (**self).extra()\n    }\n    fn proto(&self) -> String {\n        (**self).proto()\n    }\n    fn opts(&self) -> Option<BindOpts> {\n        (**self).opts()\n    }\n    fn labels(&self) -> HashMap<String, String> {\n        (**self).labels()\n    }\n}\n\n/// Restrictions placed on the origin of incoming connections to the edge.\n#[derive(Clone, Default)]\npub(crate) struct CidrRestrictions {\n    /// Rejects connections that do not match the given CIDRs\n    pub(crate) allowed: Vec<String>,\n    /// Rejects connections that match the given CIDRs and allows all other CIDRs.\n    pub(crate) denied: Vec<String>,\n}\n\nimpl CidrRestrictions {\n    pub(crate) fn allow(&mut self, cidr: impl Into<String>) {\n        self.allowed.push(cidr.into());\n    }\n    pub(crate) fn deny(&mut self, cidr: impl Into<String>) {\n        self.denied.push(cidr.into());\n    }\n}\n\n// Common\n#[derive(Default, Clone)]\npub(crate) struct CommonOpts {\n    // Restrictions placed on the origin of incoming connections to the edge.\n    pub(crate) cidr_restrictions: CidrRestrictions,\n    // The version of PROXY protocol to use with this tunnel, zero if not\n    // using.\n    pub(crate) proxy_proto: ProxyProto,\n    // Tunnel-specific opaque metadata. Viewable via the API.\n    pub(crate) metadata: Option<String>,\n    // Tunnel backend metadata. Viewable via the dashboard and API, but has no\n    // bearing on tunnel behavior.\n    pub(crate) forwards_to: Option<String>,\n    // Tunnel L7 app protocol\n    pub(crate) forwards_proto: Option<String>,\n    // Whether to disable certificate verification for this tunnel.\n    verify_upstream_tls: Option<bool>,\n    // DEPRECATED: use traffic_policy instead.\n    pub(crate) policy: Option<Policy>,\n    // Policy that defines rules that should be applied to incoming or outgoing\n    // connections to the edge.\n    pub(crate) traffic_policy: Option<String>,\n    // Allows the endpoint to pool with other endpoints with the same host/port/binding\n    pub(crate) pooling_enabled: Option<bool>,\n}\n\nimpl CommonOpts {\n    // Get the proto version of cidr restrictions\n    pub(crate) fn ip_restriction(&self) -> Option<IpRestriction> {\n        (!self.cidr_restrictions.allowed.is_empty() || !self.cidr_restrictions.denied.is_empty())\n            .then_some(self.cidr_restrictions.clone().into())\n    }\n\n    pub(crate) fn for_forwarding_to(&mut self, to_url: &Url) -> &mut Self {\n        self.forwards_to = Some(to_url.as_str().into());\n        self\n    }\n\n    pub(crate) fn set_verify_upstream_tls(&mut self, verify_upstream_tls: bool) {\n        self.verify_upstream_tls = Some(verify_upstream_tls)\n    }\n\n    pub(crate) fn verify_upstream_tls(&self) -> bool {\n        self.verify_upstream_tls.unwrap_or(true)\n    }\n}\n\n// transform into the wire protocol format\nimpl From<CidrRestrictions> for IpRestriction {\n    fn from(cr: CidrRestrictions) -> Self {\n        IpRestriction {\n            allow_cidrs: cr.allowed,\n            deny_cidrs: cr.denied,\n        }\n    }\n}\n\nimpl From<&[bytes::Bytes]> for MutualTls {\n    fn from(b: &[bytes::Bytes]) -> Self {\n        let mut aggregated = Vec::new();\n        b.iter().for_each(|c| aggregated.extend(c));\n        MutualTls {\n            mutual_tls_ca: aggregated,\n        }\n    }\n}\n"
  },
  {
    "path": "ngrok/src/config/headers.rs",
    "content": "use std::collections::HashMap;\n\nuse crate::internals::proto::Headers as HeaderProto;\n\n/// HTTP Headers to modify at the ngrok edge.\n#[derive(Clone, Default)]\npub(crate) struct Headers {\n    /// Headers to add to requests or responses at the ngrok edge.\n    added: HashMap<String, String>,\n    /// Header names to remove from requests or responses at the ngrok edge.\n    removed: Vec<String>,\n}\n\nimpl Headers {\n    pub(crate) fn add(&mut self, name: impl Into<String>, value: impl Into<String>) {\n        self.added.insert(name.into().to_lowercase(), value.into());\n    }\n    pub(crate) fn remove(&mut self, name: impl Into<String>) {\n        self.removed.push(name.into().to_lowercase());\n    }\n    pub(crate) fn has_entries(&self) -> bool {\n        !self.added.is_empty() || !self.removed.is_empty()\n    }\n}\n\n// transform into the wire protocol format\nimpl From<Headers> for HeaderProto {\n    fn from(headers: Headers) -> Self {\n        HeaderProto {\n            add: headers\n                .added\n                .iter()\n                .map(|a| format!(\"{}:{}\", a.0, a.1))\n                .collect(),\n            remove: headers.removed,\n            add_parsed: HashMap::new(), // unused in this context\n        }\n    }\n}\n"
  },
  {
    "path": "ngrok/src/config/http.rs",
    "content": "use std::{\n    borrow::Borrow,\n    collections::HashMap,\n    convert::From,\n    str::FromStr,\n};\n\nuse bytes::Bytes;\nuse thiserror::Error;\nuse url::Url;\n\nuse super::{\n    common::ProxyProto,\n    Policy,\n};\n// These are used for doc comment links.\n#[allow(unused_imports)]\nuse crate::config::{\n    ForwarderBuilder,\n    TunnelBuilder,\n};\nuse crate::{\n    config::{\n        common::{\n            default_forwards_to,\n            Binding,\n            CommonOpts,\n            TunnelConfig,\n        },\n        headers::Headers,\n        oauth::OauthOptions,\n        oidc::OidcOptions,\n        webhook_verification::WebhookVerification,\n    },\n    internals::proto::{\n        BasicAuth,\n        BasicAuthCredential,\n        BindExtra,\n        BindOpts,\n        CircuitBreaker,\n        Compression,\n        HttpEndpoint,\n        UserAgentFilter,\n        WebsocketTcpConverter,\n    },\n    tunnel::HttpTunnel,\n    Session,\n};\n\n/// Error representing invalid string for Scheme\n#[derive(Debug, Clone, Error)]\n#[error(\"invalid scheme string: {}\", .0)]\npub struct InvalidSchemeString(String);\n\n/// The URL scheme for this HTTP endpoint.\n///\n/// [Scheme::HTTPS] will enable TLS termination at the ngrok edge.\n#[derive(Clone, Default, Eq, PartialEq)]\npub enum Scheme {\n    /// The `http` URL scheme.\n    HTTP,\n    /// The `https` URL scheme.\n    #[default]\n    HTTPS,\n}\n\nimpl FromStr for Scheme {\n    type Err = InvalidSchemeString;\n    fn from_str(s: &str) -> Result<Self, Self::Err> {\n        use Scheme::*;\n        Ok(match s.to_uppercase().as_str() {\n            \"HTTP\" => HTTP,\n            \"HTTPS\" => HTTPS,\n            _ => return Err(InvalidSchemeString(s.into())),\n        })\n    }\n}\n\n/// Restrictions placed on the origin of incoming connections to the edge.\n#[derive(Clone, Default)]\npub(crate) struct UaFilter {\n    /// Rejects connections that do not match the given regular expression\n    pub(crate) allow: Vec<String>,\n    /// Rejects connections that match the given regular expression and allows\n    /// all other regular expressions.\n    pub(crate) deny: Vec<String>,\n}\n\nimpl UaFilter {\n    pub(crate) fn allow(&mut self, allow: impl Into<String>) {\n        self.allow.push(allow.into());\n    }\n    pub(crate) fn deny(&mut self, deny: impl Into<String>) {\n        self.deny.push(deny.into());\n    }\n}\n\nimpl From<UaFilter> for UserAgentFilter {\n    fn from(ua: UaFilter) -> Self {\n        UserAgentFilter {\n            allow: ua.allow,\n            deny: ua.deny,\n        }\n    }\n}\n\n/// The options for a HTTP edge.\n#[derive(Default, Clone)]\nstruct HttpOptions {\n    pub(crate) common_opts: CommonOpts,\n    pub(crate) scheme: Scheme,\n    pub(crate) domain: Option<String>,\n    pub(crate) mutual_tlsca: Vec<bytes::Bytes>,\n    pub(crate) compression: bool,\n    pub(crate) websocket_tcp_conversion: bool,\n    pub(crate) circuit_breaker: f64,\n    pub(crate) request_headers: Headers,\n    pub(crate) response_headers: Headers,\n    pub(crate) rewrite_host: bool,\n    pub(crate) basic_auth: Vec<(String, String)>,\n    pub(crate) oauth: Option<OauthOptions>,\n    pub(crate) oidc: Option<OidcOptions>,\n    pub(crate) webhook_verification: Option<WebhookVerification>,\n    // Flitering placed on the origin of incoming connections to the edge.\n    pub(crate) user_agent_filter: UaFilter,\n    pub(crate) bindings: Vec<String>,\n}\n\nimpl HttpOptions {\n    fn user_agent_filter(&self) -> Option<UserAgentFilter> {\n        (!self.user_agent_filter.allow.is_empty() || !self.user_agent_filter.deny.is_empty())\n            .then_some(self.user_agent_filter.clone().into())\n    }\n}\n\nimpl TunnelConfig for HttpOptions {\n    fn forwards_to(&self) -> String {\n        self.common_opts\n            .forwards_to\n            .clone()\n            .unwrap_or(default_forwards_to().into())\n    }\n\n    fn forwards_proto(&self) -> String {\n        self.common_opts.forwards_proto.clone().unwrap_or_default()\n    }\n\n    fn verify_upstream_tls(&self) -> bool {\n        self.common_opts.verify_upstream_tls()\n    }\n\n    fn extra(&self) -> BindExtra {\n        BindExtra {\n            token: Default::default(),\n            ip_policy_ref: Default::default(),\n            metadata: self.common_opts.metadata.clone().unwrap_or_default(),\n            bindings: self.bindings.clone(),\n            pooling_enabled: self.common_opts.pooling_enabled.unwrap_or(false),\n        }\n    }\n    fn proto(&self) -> String {\n        if self.scheme == Scheme::HTTP {\n            return \"http\".into();\n        }\n        \"https\".into()\n    }\n    fn opts(&self) -> Option<BindOpts> {\n        let http_endpoint = HttpEndpoint {\n            proxy_proto: self.common_opts.proxy_proto,\n            domain: self.domain.clone().unwrap_or_default(),\n            hostname: String::new(),\n            compression: self.compression.then_some(Compression {}),\n            circuit_breaker: (self.circuit_breaker != 0f64).then_some(CircuitBreaker {\n                error_threshold: self.circuit_breaker,\n            }),\n            ip_restriction: self.common_opts.ip_restriction(),\n            basic_auth: (!self.basic_auth.is_empty()).then_some(self.basic_auth.as_slice().into()),\n            oauth: self.oauth.clone().map(From::from),\n            oidc: self.oidc.clone().map(From::from),\n            webhook_verification: self.webhook_verification.clone().map(From::from),\n            mutual_tls_ca: (!self.mutual_tlsca.is_empty())\n                .then_some(self.mutual_tlsca.as_slice().into()),\n            request_headers: self\n                .request_headers\n                .has_entries()\n                .then_some(self.request_headers.clone().into()),\n            response_headers: self\n                .response_headers\n                .has_entries()\n                .then_some(self.response_headers.clone().into()),\n            websocket_tcp_converter: self\n                .websocket_tcp_conversion\n                .then_some(WebsocketTcpConverter {}),\n            user_agent_filter: self.user_agent_filter(),\n            traffic_policy: if self.common_opts.traffic_policy.is_some() {\n                self.common_opts.traffic_policy.clone().map(From::from)\n            } else if self.common_opts.policy.is_some() {\n                self.common_opts.policy.clone().map(From::from)\n            } else {\n                None\n            },\n            ..Default::default()\n        };\n\n        Some(BindOpts::Http(http_endpoint))\n    }\n    fn labels(&self) -> HashMap<String, String> {\n        HashMap::new()\n    }\n}\n\n// transform into the wire protocol format\nimpl From<&[(String, String)]> for BasicAuth {\n    fn from(v: &[(String, String)]) -> Self {\n        BasicAuth {\n            credentials: v.iter().cloned().map(From::from).collect(),\n        }\n    }\n}\n\n// transform into the wire protocol format\nimpl From<(String, String)> for BasicAuthCredential {\n    fn from(b: (String, String)) -> Self {\n        BasicAuthCredential {\n            username: b.0,\n            cleartext_password: b.1,\n            hashed_password: vec![], // unused in this context\n        }\n    }\n}\n\nimpl_builder! {\n    /// A builder for a tunnel backing an HTTP endpoint.\n    ///\n    /// https://ngrok.com/docs/http/\n    HttpTunnelBuilder, HttpOptions, HttpTunnel, endpoint\n}\n\nimpl HttpTunnelBuilder {\n    /// Add the provided CIDR to the allowlist.\n    ///\n    /// https://ngrok.com/docs/http/ip-restrictions/\n    pub fn allow_cidr(&mut self, cidr: impl Into<String>) -> &mut Self {\n        self.options.common_opts.cidr_restrictions.allow(cidr);\n        self\n    }\n    /// Add the provided CIDR to the denylist.\n    ///\n    /// https://ngrok.com/docs/http/ip-restrictions/\n    pub fn deny_cidr(&mut self, cidr: impl Into<String>) -> &mut Self {\n        self.options.common_opts.cidr_restrictions.deny(cidr);\n        self\n    }\n    /// Sets the PROXY protocol version for connections over this tunnel.\n    pub fn proxy_proto(&mut self, proxy_proto: ProxyProto) -> &mut Self {\n        self.options.common_opts.proxy_proto = proxy_proto;\n        self\n    }\n    /// Sets the opaque metadata string for this tunnel.\n    ///\n    /// https://ngrok.com/docs/api/resources/tunnels/#tunnel-fields\n    pub fn metadata(&mut self, metadata: impl Into<String>) -> &mut Self {\n        self.options.common_opts.metadata = Some(metadata.into());\n        self\n    }\n\n    /// Sets the ingress configuration for this endpoint.\n    ///\n    /// Valid binding values are:\n    /// - `\"public\"` - Publicly accessible endpoint\n    /// - `\"internal\"` - Internal-only endpoint\n    /// - `\"kubernetes\"` - Kubernetes cluster binding\n    ///\n    /// If not specified, the ngrok service will use its default binding configuration.\n    ///\n    /// # Panics\n    ///\n    /// Panics if called more than once or if an invalid binding value is provided.\n    ///\n    /// # Examples\n    ///\n    /// ```no_run\n    /// # use ngrok::Session;\n    /// # use ngrok::config::TunnelBuilder;\n    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {\n    /// let session = Session::builder().authtoken_from_env().connect().await?;\n    ///\n    /// // Using string\n    /// let tunnel = session.http_endpoint().binding(\"internal\").listen().await?;\n    ///\n    /// // Using typed enum\n    /// use ngrok::config::Binding;\n    /// let tunnel = session.http_endpoint().binding(Binding::Public).listen().await?;\n    /// # Ok(())\n    /// # }\n    /// ```\n    pub fn binding(&mut self, binding: impl Into<String>) -> &mut Self {\n        if !self.options.bindings.is_empty() {\n            panic!(\"binding() can only be called once\");\n        }\n        let binding_str = binding.into();\n        if let Err(e) = Binding::validate(&binding_str) {\n            panic!(\"{}\", e);\n        }\n        self.options.bindings.push(binding_str);\n        self\n    }\n    /// Sets the ForwardsTo string for this tunnel. This can be viewed via the\n    /// API or dashboard.\n    ///\n    /// This overrides the default process info if using\n    /// [TunnelBuilder::listen], and is in turn overridden by the url provided\n    /// to [ForwarderBuilder::listen_and_forward].\n    ///\n    /// https://ngrok.com/docs/api/resources/tunnels/#tunnel-fields\n    pub fn forwards_to(&mut self, forwards_to: impl Into<String>) -> &mut Self {\n        self.options.common_opts.forwards_to = Some(forwards_to.into());\n        self\n    }\n\n    /// Sets the L7 protocol for this tunnel.\n    pub fn app_protocol(&mut self, app_protocol: impl Into<String>) -> &mut Self {\n        self.options.common_opts.forwards_proto = Some(app_protocol.into());\n        self\n    }\n\n    /// Disables backend TLS certificate verification for forwards from this tunnel.\n    pub fn verify_upstream_tls(&mut self, verify_upstream_tls: bool) -> &mut Self {\n        self.options\n            .common_opts\n            .set_verify_upstream_tls(verify_upstream_tls);\n        self\n    }\n\n    /// Sets the scheme for this edge.\n    pub fn scheme(&mut self, scheme: Scheme) -> &mut Self {\n        self.options.scheme = scheme;\n        self\n    }\n\n    /// Sets the domain to request for this edge.\n    ///\n    /// https://ngrok.com/docs/network-edge/domains-and-tcp-addresses/#domains\n    pub fn domain(&mut self, domain: impl Into<String>) -> &mut Self {\n        self.options.domain = Some(domain.into());\n        self\n    }\n    /// Adds a certificate in PEM format to use for mutual TLS authentication.\n    ///\n    /// These will be used to authenticate client certificates for requests at\n    /// the ngrok edge.\n    ///\n    /// https://ngrok.com/docs/http/mutual-tls/\n    pub fn mutual_tlsca(&mut self, mutual_tlsca: Bytes) -> &mut Self {\n        self.options.mutual_tlsca.push(mutual_tlsca);\n        self\n    }\n    /// Enables gzip compression.\n    ///\n    /// https://ngrok.com/docs/http/compression/\n    pub fn compression(&mut self) -> &mut Self {\n        self.options.compression = true;\n        self\n    }\n    /// Enables the websocket-to-tcp converter.\n    ///\n    /// https://ngrok.com/docs/http/websocket-tcp-converter/\n    pub fn websocket_tcp_conversion(&mut self) -> &mut Self {\n        self.options.websocket_tcp_conversion = true;\n        self\n    }\n    /// Sets the 5XX response ratio at which the ngrok edge will stop sending\n    /// requests to this tunnel.\n    ///\n    /// https://ngrok.com/docs/http/circuit-breaker/\n    pub fn circuit_breaker(&mut self, circuit_breaker: f64) -> &mut Self {\n        self.options.circuit_breaker = circuit_breaker;\n        self\n    }\n\n    /// Automatically rewrite the host header to the one in the provided URL\n    /// when calling [ForwarderBuilder::listen_and_forward]. Does nothing if\n    /// using [TunnelBuilder::listen]. Defaults to `false`.\n    ///\n    /// If you need to set the host header to a specific value, use\n    /// `cfg.request_header(\"host\", \"some.host.com\")` instead.\n    pub fn host_header_rewrite(&mut self, rewrite: bool) -> &mut Self {\n        self.options.rewrite_host = rewrite;\n        self\n    }\n\n    /// Adds a header to all requests to this edge.\n    ///\n    /// https://ngrok.com/docs/http/request-headers/\n    pub fn request_header(\n        &mut self,\n        name: impl Into<String>,\n        value: impl Into<String>,\n    ) -> &mut Self {\n        self.options.request_headers.add(name, value);\n        self\n    }\n    /// Adds a header to all responses coming from this edge.\n    ///\n    /// https://ngrok.com/docs/http/response-headers/\n    pub fn response_header(\n        &mut self,\n        name: impl Into<String>,\n        value: impl Into<String>,\n    ) -> &mut Self {\n        self.options.response_headers.add(name, value);\n        self\n    }\n    /// Removes a header from requests to this edge.\n    ///\n    /// https://ngrok.com/docs/http/request-headers/\n    pub fn remove_request_header(&mut self, name: impl Into<String>) -> &mut Self {\n        self.options.request_headers.remove(name);\n        self\n    }\n    /// Removes a header from responses from this edge.\n    ///\n    /// https://ngrok.com/docs/http/response-headers/\n    pub fn remove_response_header(&mut self, name: impl Into<String>) -> &mut Self {\n        self.options.response_headers.remove(name);\n        self\n    }\n\n    /// Adds the provided credentials to the list of basic authentication\n    /// credentials.\n    ///\n    /// https://ngrok.com/docs/http/basic-auth/\n    pub fn basic_auth(\n        &mut self,\n        username: impl Into<String>,\n        password: impl Into<String>,\n    ) -> &mut Self {\n        self.options\n            .basic_auth\n            .push((username.into(), password.into()));\n        self\n    }\n\n    /// Set the OAuth configuraton for this edge.\n    ///\n    /// https://ngrok.com/docs/http/oauth/\n    pub fn oauth(&mut self, oauth: impl Borrow<OauthOptions>) -> &mut Self {\n        self.options.oauth = Some(oauth.borrow().to_owned());\n        self\n    }\n\n    /// Set the OIDC configuration for this edge.\n    ///\n    /// https://ngrok.com/docs/http/openid-connect/\n    pub fn oidc(&mut self, oidc: impl Borrow<OidcOptions>) -> &mut Self {\n        self.options.oidc = Some(oidc.borrow().to_owned());\n        self\n    }\n\n    /// Configures webhook verification for this edge.\n    ///\n    /// https://ngrok.com/docs/http/webhook-verification/\n    pub fn webhook_verification(\n        &mut self,\n        provider: impl Into<String>,\n        secret: impl Into<String>,\n    ) -> &mut Self {\n        self.options.webhook_verification = Some(WebhookVerification {\n            provider: provider.into(),\n            secret: secret.into().into(),\n        });\n        self\n    }\n\n    /// Add the provided regex to the allowlist.\n    ///\n    /// https://ngrok.com/docs/http/user-agent-filter/\n    pub fn allow_user_agent(&mut self, regex: impl Into<String>) -> &mut Self {\n        self.options.user_agent_filter.allow(regex);\n        self\n    }\n    /// Add the provided regex to the denylist.\n    ///\n    /// https://ngrok.com/docs/http/user-agent-filter/\n    pub fn deny_user_agent(&mut self, regex: impl Into<String>) -> &mut Self {\n        self.options.user_agent_filter.deny(regex);\n        self\n    }\n\n    /// DEPRECATED: use traffic_policy instead.\n    pub fn policy<S>(&mut self, s: S) -> Result<&mut Self, S::Error>\n    where\n        S: TryInto<Policy>,\n    {\n        self.options.common_opts.policy = Some(s.try_into()?);\n        Ok(self)\n    }\n\n    /// Set policy for this edge.\n    pub fn traffic_policy(&mut self, policy_str: impl Into<String>) -> &mut Self {\n        self.options.common_opts.traffic_policy = Some(policy_str.into());\n        self\n    }\n\n    pub(crate) async fn for_forwarding_to(&mut self, to_url: &Url) -> &mut Self {\n        self.options.common_opts.for_forwarding_to(to_url);\n        if let Some(host) = to_url.host_str().filter(|_| self.options.rewrite_host) {\n            self.request_header(\"host\", host);\n        }\n        self\n    }\n\n    /// Allows the endpoint to pool with other endpoints with the same host/port/binding\n    pub fn pooling_enabled(&mut self, pooling_enabled: impl Into<bool>) -> &mut Self {\n        self.options.common_opts.pooling_enabled = Some(pooling_enabled.into());\n        self\n    }\n}\n\n#[cfg(test)]\nmod test {\n    use super::*;\n    use crate::config::policies::test::POLICY_JSON;\n    const METADATA: &str = \"testmeta\";\n    const TEST_FORWARD: &str = \"testforward\";\n    const TEST_FORWARD_PROTO: &str = \"http2\";\n    const ALLOW_CIDR: &str = \"0.0.0.0/0\";\n    const DENY_CIDR: &str = \"10.1.1.1/32\";\n    const CA_CERT: &[u8] = \"test ca cert\".as_bytes();\n    const CA_CERT2: &[u8] = \"test ca cert2\".as_bytes();\n    const DOMAIN: &str = \"test domain\";\n    const ALLOW_AGENT: &str = r\"bar/(\\d)+\";\n    const DENY_AGENT: &str = r\"foo/(\\d)+\";\n\n    #[test]\n    fn test_interface_to_proto() {\n        // pass to a function accepting the trait to avoid\n        // \"creates a temporary which is freed while still in use\"\n        tunnel_test(\n            &HttpTunnelBuilder {\n                session: None,\n                options: Default::default(),\n            }\n            .allow_user_agent(ALLOW_AGENT)\n            .deny_user_agent(DENY_AGENT)\n            .allow_cidr(ALLOW_CIDR)\n            .deny_cidr(DENY_CIDR)\n            .proxy_proto(ProxyProto::V2)\n            .metadata(METADATA)\n            .scheme(Scheme::from_str(\"hTtPs\").unwrap())\n            .domain(DOMAIN)\n            .mutual_tlsca(CA_CERT.into())\n            .mutual_tlsca(CA_CERT2.into())\n            .compression()\n            .websocket_tcp_conversion()\n            .circuit_breaker(0.5)\n            .request_header(\"X-Req-Yup\", \"true\")\n            .response_header(\"X-Res-Yup\", \"true\")\n            .remove_request_header(\"X-Req-Nope\")\n            .remove_response_header(\"X-Res-Nope\")\n            .oauth(OauthOptions::new(\"google\"))\n            .oauth(\n                OauthOptions::new(\"google\")\n                    .allow_email(\"<user>@<domain>\")\n                    .allow_domain(\"<domain>\")\n                    .scope(\"<scope>\"),\n            )\n            .oidc(OidcOptions::new(\"<url>\", \"<id>\", \"<secret>\"))\n            .oidc(\n                OidcOptions::new(\"<url>\", \"<id>\", \"<secret>\")\n                    .allow_email(\"<user>@<domain>\")\n                    .allow_domain(\"<domain>\")\n                    .scope(\"<scope>\"),\n            )\n            .webhook_verification(\"twilio\", \"asdf\")\n            .basic_auth(\"ngrok\", \"online1line\")\n            .forwards_to(TEST_FORWARD)\n            .app_protocol(\"http2\")\n            .policy(POLICY_JSON)\n            .unwrap()\n            .options,\n        );\n    }\n\n    fn tunnel_test<C>(tunnel_cfg: C)\n    where\n        C: TunnelConfig,\n    {\n        assert_eq!(TEST_FORWARD, tunnel_cfg.forwards_to());\n        assert_eq!(TEST_FORWARD_PROTO, tunnel_cfg.forwards_proto());\n        let extra = tunnel_cfg.extra();\n        assert_eq!(String::default(), *extra.token);\n        assert_eq!(METADATA, extra.metadata);\n        assert_eq!(Vec::<String>::new(), extra.bindings);\n        assert_eq!(String::default(), extra.ip_policy_ref);\n\n        assert_eq!(\"https\", tunnel_cfg.proto());\n\n        let opts = tunnel_cfg.opts().unwrap();\n        assert!(matches!(opts, BindOpts::Http { .. }));\n        if let BindOpts::Http(endpoint) = opts {\n            assert_eq!(DOMAIN, endpoint.domain);\n            assert_eq!(String::default(), endpoint.subdomain);\n            assert!(matches!(endpoint.proxy_proto, ProxyProto::V2));\n\n            let ip_restriction = endpoint.ip_restriction.unwrap();\n            assert_eq!(Vec::from([ALLOW_CIDR]), ip_restriction.allow_cidrs);\n            assert_eq!(Vec::from([DENY_CIDR]), ip_restriction.deny_cidrs);\n\n            let mutual_tls = endpoint.mutual_tls_ca.unwrap();\n            let mut agg = CA_CERT.to_vec();\n            agg.extend(CA_CERT2.to_vec());\n            assert_eq!(agg, mutual_tls.mutual_tls_ca);\n\n            assert!(endpoint.compression.is_some());\n            assert!(endpoint.websocket_tcp_converter.is_some());\n            assert_eq!(0.5f64, endpoint.circuit_breaker.unwrap().error_threshold);\n\n            let request_headers = endpoint.request_headers.unwrap();\n            assert_eq!([\"x-req-yup:true\"].to_vec(), request_headers.add);\n            assert_eq!([\"x-req-nope\"].to_vec(), request_headers.remove);\n\n            let response_headers = endpoint.response_headers.unwrap();\n            assert_eq!([\"x-res-yup:true\"].to_vec(), response_headers.add);\n            assert_eq!([\"x-res-nope\"].to_vec(), response_headers.remove);\n\n            let webhook = endpoint.webhook_verification.unwrap();\n            assert_eq!(\"twilio\", webhook.provider);\n            assert_eq!(\"asdf\", *webhook.secret);\n            assert!(webhook.sealed_secret.is_empty());\n\n            let creds = endpoint.basic_auth.unwrap().credentials;\n            assert_eq!(1, creds.len());\n            assert_eq!(\"ngrok\", creds[0].username);\n            assert_eq!(\"online1line\", creds[0].cleartext_password);\n            assert!(creds[0].hashed_password.is_empty());\n\n            let oauth = endpoint.oauth.unwrap();\n            assert_eq!(\"google\", oauth.provider);\n            assert_eq!([\"<user>@<domain>\"].to_vec(), oauth.allow_emails);\n            assert_eq!([\"<domain>\"].to_vec(), oauth.allow_domains);\n            assert_eq!([\"<scope>\"].to_vec(), oauth.scopes);\n            assert_eq!(String::default(), oauth.client_id);\n            assert_eq!(String::default(), *oauth.client_secret);\n            assert!(oauth.sealed_client_secret.is_empty());\n\n            let oidc = endpoint.oidc.unwrap();\n            assert_eq!(\"<url>\", oidc.issuer_url);\n            assert_eq!([\"<user>@<domain>\"].to_vec(), oidc.allow_emails);\n            assert_eq!([\"<domain>\"].to_vec(), oidc.allow_domains);\n            assert_eq!([\"<scope>\"].to_vec(), oidc.scopes);\n            assert_eq!(\"<id>\", oidc.client_id);\n            assert_eq!(\"<secret>\", *oidc.client_secret);\n            assert!(oidc.sealed_client_secret.is_empty());\n\n            let user_agent_filter = endpoint.user_agent_filter.unwrap();\n            assert_eq!(Vec::from([ALLOW_AGENT]), user_agent_filter.allow);\n            assert_eq!(Vec::from([DENY_AGENT]), user_agent_filter.deny);\n        }\n\n        assert_eq!(HashMap::new(), tunnel_cfg.labels());\n    }\n\n    #[test]\n    fn test_binding_valid_values() {\n        let mut builder = HttpTunnelBuilder {\n            session: None,\n            options: Default::default(),\n        };\n\n        // Test \"public\"\n        builder.binding(\"public\");\n        assert_eq!(vec![\"public\"], builder.options.bindings);\n\n        // Test \"internal\"\n        let mut builder = HttpTunnelBuilder {\n            session: None,\n            options: Default::default(),\n        };\n        builder.binding(\"internal\");\n        assert_eq!(vec![\"internal\"], builder.options.bindings);\n\n        // Test \"kubernetes\"\n        let mut builder = HttpTunnelBuilder {\n            session: None,\n            options: Default::default(),\n        };\n        builder.binding(\"kubernetes\");\n        assert_eq!(vec![\"kubernetes\"], builder.options.bindings);\n\n        // Test with Binding enum\n        let mut builder = HttpTunnelBuilder {\n            session: None,\n            options: Default::default(),\n        };\n        builder.binding(Binding::Internal);\n        assert_eq!(vec![\"internal\"], builder.options.bindings);\n    }\n\n    #[test]\n    #[should_panic(expected = \"Invalid binding value\")]\n    fn test_binding_invalid_value() {\n        let mut builder = HttpTunnelBuilder {\n            session: None,\n            options: Default::default(),\n        };\n        builder.binding(\"invalid\");\n    }\n\n    #[test]\n    #[should_panic(expected = \"binding() can only be called once\")]\n    fn test_binding_called_twice() {\n        let mut builder = HttpTunnelBuilder {\n            session: None,\n            options: Default::default(),\n        };\n        builder.binding(\"public\");\n        builder.binding(\"internal\");\n    }\n\n    #[test]\n    fn test_binding_with_domain() {\n        let mut builder = HttpTunnelBuilder {\n            session: None,\n            options: Default::default(),\n        };\n        builder.binding(\"internal\").domain(\"foo.internal\");\n\n        // Check that both binding and domain are set\n        assert_eq!(vec![\"internal\"], builder.options.bindings);\n        assert_eq!(Some(\"foo.internal\".to_string()), builder.options.domain);\n\n        // Check that they're properly included in extra() and opts()\n        let extra = builder.options.extra();\n        assert_eq!(vec![\"internal\"], extra.bindings);\n\n        let opts = builder.options.opts().unwrap();\n        if let BindOpts::Http(endpoint) = opts {\n            assert_eq!(\"foo.internal\", endpoint.domain);\n        } else {\n            panic!(\"Expected Http endpoint\");\n        }\n    }\n}\n"
  },
  {
    "path": "ngrok/src/config/labeled.rs",
    "content": "use std::collections::HashMap;\n\nuse url::Url;\n\n// These are used for doc comment links.\n#[allow(unused_imports)]\nuse crate::config::{\n    ForwarderBuilder,\n    TunnelBuilder,\n};\nuse crate::{\n    config::common::{\n        default_forwards_to,\n        CommonOpts,\n        TunnelConfig,\n    },\n    internals::proto::{\n        BindExtra,\n        BindOpts,\n    },\n    tunnel::LabeledTunnel,\n    Session,\n};\n\n/// Options for labeled tunnels.\n#[derive(Default, Clone)]\nstruct LabeledOptions {\n    pub(crate) common_opts: CommonOpts,\n    pub(crate) labels: HashMap<String, String>,\n}\n\nimpl TunnelConfig for LabeledOptions {\n    fn forwards_to(&self) -> String {\n        self.common_opts\n            .forwards_to\n            .clone()\n            .unwrap_or(default_forwards_to().into())\n    }\n\n    fn forwards_proto(&self) -> String {\n        self.common_opts.forwards_proto.clone().unwrap_or_default()\n    }\n\n    fn verify_upstream_tls(&self) -> bool {\n        self.common_opts.verify_upstream_tls()\n    }\n\n    fn extra(&self) -> BindExtra {\n        BindExtra {\n            token: Default::default(),\n            ip_policy_ref: Default::default(),\n            metadata: self.common_opts.metadata.clone().unwrap_or_default(),\n            bindings: Vec::new(),\n            pooling_enabled: self.common_opts.pooling_enabled.unwrap_or(false),\n        }\n    }\n    fn proto(&self) -> String {\n        \"\".into()\n    }\n    fn opts(&self) -> Option<BindOpts> {\n        None\n    }\n    fn labels(&self) -> HashMap<String, String> {\n        self.labels.clone()\n    }\n}\n\nimpl_builder! {\n    /// A builder for a labeled tunnel.\n    LabeledTunnelBuilder, LabeledOptions, LabeledTunnel, edge\n}\n\nimpl LabeledTunnelBuilder {\n    /// Sets the opaque metadata string for this tunnel.\n    /// Viewable via the API.\n    ///\n    /// https://ngrok.com/docs/api/resources/tunnels/#tunnel-fields\n    pub fn metadata(&mut self, metadata: impl Into<String>) -> &mut Self {\n        self.options.common_opts.metadata = Some(metadata.into());\n        self\n    }\n\n    /// Add a label, value pair for this tunnel.\n    ///\n    /// https://ngrok.com/docs/network-edge/edges/#tunnel-group\n    pub fn label(&mut self, label: impl Into<String>, value: impl Into<String>) -> &mut Self {\n        self.options.labels.insert(label.into(), value.into());\n        self\n    }\n\n    /// Sets the ForwardsTo string for this tunnel. This can be viewed via the\n    /// API or dashboard.\n    ///\n    /// This overrides the default process info if using\n    /// [TunnelBuilder::listen], and is in turn overridden by the url provided\n    /// to [ForwarderBuilder::listen_and_forward].\n    ///\n    /// https://ngrok.com/docs/api/resources/tunnels/#tunnel-fields\n    pub fn forwards_to(&mut self, forwards_to: impl Into<String>) -> &mut Self {\n        self.options.common_opts.forwards_to = forwards_to.into().into();\n        self\n    }\n\n    /// Sets the L7 protocol string for this tunnel.\n    pub fn app_protocol(&mut self, app_protocol: impl Into<String>) -> &mut Self {\n        self.options.common_opts.forwards_proto = Some(app_protocol.into());\n        self\n    }\n\n    /// Disables backend TLS certificate verification for forwards from this tunnel.\n    pub fn verify_upstream_tls(&mut self, verify_upstream_tls: bool) -> &mut Self {\n        self.options\n            .common_opts\n            .set_verify_upstream_tls(verify_upstream_tls);\n        self\n    }\n\n    pub(crate) async fn for_forwarding_to(&mut self, to_url: &Url) -> &mut Self {\n        self.options.common_opts.for_forwarding_to(to_url);\n        self\n    }\n}\n\n#[cfg(test)]\nmod test {\n    use super::*;\n\n    const METADATA: &str = \"testmeta\";\n    const LABEL_KEY: &str = \"edge\";\n    const LABEL_VAL: &str = \"edghts_2IC6RJ6CQnuh7waciWyaGKc50Nt\";\n\n    #[test]\n    fn test_interface_to_proto() {\n        // pass to a function accepting the trait to avoid\n        // \"creates a temporary which is freed while still in use\"\n        tunnel_test(\n            &LabeledTunnelBuilder {\n                session: None,\n                options: Default::default(),\n            }\n            .metadata(METADATA)\n            .label(LABEL_KEY, LABEL_VAL)\n            .options,\n        );\n    }\n\n    fn tunnel_test<C>(tunnel_cfg: &C)\n    where\n        C: TunnelConfig,\n    {\n        assert_eq!(default_forwards_to(), tunnel_cfg.forwards_to());\n\n        let extra = tunnel_cfg.extra();\n        assert_eq!(String::default(), *extra.token);\n        assert_eq!(METADATA, extra.metadata);\n        assert_eq!(String::default(), extra.ip_policy_ref);\n\n        assert_eq!(\"\", tunnel_cfg.proto());\n\n        assert!(tunnel_cfg.opts().is_none());\n\n        let mut labels: HashMap<String, String> = HashMap::new();\n        labels.insert(LABEL_KEY.into(), LABEL_VAL.into());\n        assert_eq!(labels, tunnel_cfg.labels());\n    }\n}\n"
  },
  {
    "path": "ngrok/src/config/oauth.rs",
    "content": "use crate::internals::proto::{\n    Oauth,\n    SecretString,\n};\n\n/// Oauth Options configuration\n///\n/// https://ngrok.com/docs/http/oauth/\n#[derive(Clone, Default)]\npub struct OauthOptions {\n    /// The OAuth provider to use\n    provider: String,\n\n    /// The client ID, if a custom one is being used\n    client_id: String,\n    /// The client secret, if a custom one is being used\n    client_secret: SecretString,\n\n    /// Email addresses of users to authorize.\n    allow_emails: Vec<String>,\n    /// Email domains of users to authorize.\n    allow_domains: Vec<String>,\n    /// OAuth scopes to request from the provider.\n    scopes: Vec<String>,\n}\n\nimpl OauthOptions {\n    /// Create a new [OauthOptions] for the given provider.\n    pub fn new(provider: impl Into<String>) -> Self {\n        OauthOptions {\n            provider: provider.into(),\n            ..Default::default()\n        }\n    }\n\n    /// Provide an OAuth client ID for custom apps.\n    pub fn client_id(&mut self, id: impl Into<String>) -> &mut Self {\n        self.client_id = id.into();\n        self\n    }\n\n    /// Provide an OAuth client secret for custom apps.\n    pub fn client_secret(&mut self, secret: impl Into<String>) -> &mut Self {\n        self.client_secret = SecretString::from(secret.into());\n        self\n    }\n\n    /// Append an email address to the list of allowed emails.\n    pub fn allow_email(&mut self, email: impl Into<String>) -> &mut Self {\n        self.allow_emails.push(email.into());\n        self\n    }\n    /// Append an email domain to the list of allowed domains.\n    pub fn allow_domain(&mut self, domain: impl Into<String>) -> &mut Self {\n        self.allow_domains.push(domain.into());\n        self\n    }\n    /// Append a scope to the list of scopes to request.\n    pub fn scope(&mut self, scope: impl Into<String>) -> &mut Self {\n        self.scopes.push(scope.into());\n        self\n    }\n}\n\n// transform into the wire protocol format\nimpl From<OauthOptions> for Oauth {\n    fn from(o: OauthOptions) -> Self {\n        Oauth {\n            provider: o.provider,\n            client_id: o.client_id,\n            client_secret: o.client_secret,\n            sealed_client_secret: Default::default(), // unused in this context\n            allow_emails: o.allow_emails,\n            allow_domains: o.allow_domains,\n            scopes: o.scopes,\n        }\n    }\n}\n"
  },
  {
    "path": "ngrok/src/config/oidc.rs",
    "content": "use crate::internals::proto::{\n    Oidc,\n    SecretString,\n};\n\n/// Oidc Options configuration\n///\n/// https://ngrok.com/docs/http/openid-connect/\n#[derive(Clone, Default)]\npub struct OidcOptions {\n    issuer_url: String,\n    client_id: String,\n    client_secret: SecretString,\n    allow_emails: Vec<String>,\n    allow_domains: Vec<String>,\n    scopes: Vec<String>,\n}\n\nimpl OidcOptions {\n    /// Create a new [OidcOptions] with the given issuer and client information.\n    pub fn new(\n        issuer_url: impl Into<String>,\n        client_id: impl Into<String>,\n        client_secret: impl Into<String>,\n    ) -> Self {\n        OidcOptions {\n            issuer_url: issuer_url.into(),\n            client_id: client_id.into(),\n            client_secret: client_secret.into().into(),\n            ..Default::default()\n        }\n    }\n\n    /// Allow the oidc user with the given email to access the tunnel.\n    pub fn allow_email(&mut self, email: impl Into<String>) -> &mut Self {\n        self.allow_emails.push(email.into());\n        self\n    }\n    /// Allow the oidc user with the given email domain to access the tunnel.\n    pub fn allow_domain(&mut self, domain: impl Into<String>) -> &mut Self {\n        self.allow_domains.push(domain.into());\n        self\n    }\n    /// Request the given scope from the oidc provider.\n    pub fn scope(&mut self, scope: impl Into<String>) -> &mut Self {\n        self.scopes.push(scope.into());\n        self\n    }\n}\n\n// transform into the wire protocol format\nimpl From<OidcOptions> for Oidc {\n    fn from(o: OidcOptions) -> Self {\n        Oidc {\n            issuer_url: o.issuer_url,\n            client_id: o.client_id,\n            client_secret: o.client_secret,\n            sealed_client_secret: Default::default(), // unused in this context\n            allow_emails: o.allow_emails,\n            allow_domains: o.allow_domains,\n            scopes: o.scopes,\n        }\n    }\n}\n"
  },
  {
    "path": "ngrok/src/config/policies.rs",
    "content": "use std::{\n    fs::read_to_string,\n    io,\n};\n\nuse serde::{\n    Deserialize,\n    Serialize,\n};\nuse thiserror::Error;\n\nuse crate::internals::proto;\n\n/// A policy that defines rules that should be applied to incoming or outgoing\n/// connections to the edge.\n#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)]\n#[serde(default)]\npub struct Policy {\n    inbound: Vec<Rule>,\n    outbound: Vec<Rule>,\n}\n\n/// A policy rule that should be applied\n#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)]\n#[serde(default)]\npub struct Rule {\n    name: String,\n    expressions: Vec<String>,\n    actions: Vec<Action>,\n}\n\n/// An action that should be taken if the rule matches\n#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)]\n#[serde(default)]\npub struct Action {\n    #[serde(rename = \"type\")]\n    type_: String,\n    config: Option<serde_json::Value>,\n}\n\n/// Errors in creating or serializing Policies\n#[derive(Debug, Error)]\npub enum InvalidPolicy {\n    /// Error representing an invalid string for a Policy\n    #[error(\"failure to parse or generate policy\")]\n    SerializationError(#[from] serde_json::Error),\n    /// An error loading a Policy from a file\n    #[error(\"failure to read policy file '{}'\", .1)]\n    FileReadError(#[source] io::Error, String),\n}\n\nimpl Policy {\n    /// Create a new empty [Policy] struct\n    pub fn new() -> Self {\n        Policy {\n            ..Default::default()\n        }\n    }\n\n    /// Create a new [Policy] from a json string\n    fn from_json(json: impl AsRef<str>) -> Result<Self, InvalidPolicy> {\n        serde_json::from_str(json.as_ref()).map_err(InvalidPolicy::SerializationError)\n    }\n\n    /// Create a new [Policy] from a json file\n    pub fn from_file(json_file_path: impl AsRef<str>) -> Result<Self, InvalidPolicy> {\n        Policy::from_json(\n            read_to_string(json_file_path.as_ref()).map_err(|e| {\n                InvalidPolicy::FileReadError(e, json_file_path.as_ref().to_string())\n            })?,\n        )\n    }\n\n    /// Convert [Policy] to json string\n    pub fn to_json(&self) -> Result<String, InvalidPolicy> {\n        serde_json::to_string(&self).map_err(InvalidPolicy::SerializationError)\n    }\n\n    /// Add an inbound policy\n    pub fn add_inbound(&mut self, rule: impl Into<Rule>) -> &mut Self {\n        self.inbound.push(rule.into());\n        self\n    }\n\n    /// Add an outbound policy\n    pub fn add_outbound(&mut self, rule: impl Into<Rule>) -> &mut Self {\n        self.outbound.push(rule.into());\n        self\n    }\n}\n\nimpl TryFrom<&Policy> for Policy {\n    type Error = InvalidPolicy;\n\n    fn try_from(other: &Policy) -> Result<Policy, Self::Error> {\n        Ok(other.clone())\n    }\n}\n\nimpl TryFrom<Result<Policy, InvalidPolicy>> for Policy {\n    type Error = InvalidPolicy;\n\n    fn try_from(other: Result<Policy, InvalidPolicy>) -> Result<Policy, Self::Error> {\n        other\n    }\n}\n\nimpl TryFrom<&str> for Policy {\n    type Error = InvalidPolicy;\n\n    fn try_from(other: &str) -> Result<Policy, Self::Error> {\n        Policy::from_json(other)\n    }\n}\n\nimpl Rule {\n    /// Create a new [Rule]\n    pub fn new(name: impl Into<String>) -> Self {\n        Rule {\n            name: name.into(),\n            ..Default::default()\n        }\n    }\n\n    /// Convert [Rule] to json string\n    pub fn to_json(&self) -> Result<String, InvalidPolicy> {\n        serde_json::to_string(&self).map_err(InvalidPolicy::SerializationError)\n    }\n\n    /// Add an expression\n    pub fn add_expression(&mut self, expression: impl Into<String>) -> &mut Self {\n        self.expressions.push(expression.into());\n        self\n    }\n\n    /// Add an action\n    pub fn add_action(&mut self, action: Action) -> &mut Self {\n        self.actions.push(action);\n        self\n    }\n}\n\nimpl From<&mut Rule> for Rule {\n    fn from(other: &mut Rule) -> Self {\n        other.to_owned()\n    }\n}\n\nimpl Action {\n    /// Create a new [Action]\n    pub fn new(type_: impl Into<String>, config: Option<&str>) -> Result<Self, InvalidPolicy> {\n        Ok(Action {\n            type_: type_.into(),\n            config: config\n                .map(|c| serde_json::from_str(c).map_err(InvalidPolicy::SerializationError))\n                .transpose()?,\n        })\n    }\n\n    /// Convert [Action] to json string\n    pub fn to_json(&self) -> Result<String, InvalidPolicy> {\n        serde_json::to_string(&self).map_err(InvalidPolicy::SerializationError)\n    }\n}\n\nimpl From<Policy> for proto::PolicyWrapper {\n    fn from(value: Policy) -> Self {\n        proto::PolicyWrapper::Policy(value.into())\n    }\n}\n\n// transform into the wire protocol format\nimpl From<Policy> for proto::Policy {\n    fn from(o: Policy) -> Self {\n        proto::Policy {\n            inbound: o.inbound.into_iter().map(|p| p.into()).collect(),\n            outbound: o.outbound.into_iter().map(|p| p.into()).collect(),\n        }\n    }\n}\n\nimpl From<Rule> for proto::Rule {\n    fn from(p: Rule) -> Self {\n        proto::Rule {\n            name: p.name,\n            expressions: p.expressions,\n            actions: p.actions.into_iter().map(|a| a.into()).collect(),\n        }\n    }\n}\n\nimpl From<Action> for proto::Action {\n    fn from(a: Action) -> Self {\n        proto::Action {\n            type_: a.type_,\n            config: a\n                .config\n                .map(|c| c.to_string().into_bytes())\n                .unwrap_or_default(),\n        }\n    }\n}\n\n#[cfg(test)]\npub(crate) mod test {\n    use super::*;\n\n    pub(crate) const POLICY_JSON: &str = r###\"\n        {\"inbound\": [\n            {\n                \"name\": \"test_in\",\n                \"expressions\": [\"req.Method == 'PUT'\"],\n                \"actions\": [{\"type\": \"deny\"}]\n            }\n        ],\n        \"outbound\": [\n            {\n                \"name\": \"test_out\",\n                \"expressions\": [\"res.StatusCode == '200'\"],\n                \"actions\": [{\"type\": \"custom-response\", \"config\": {\"status_code\":201}}]\n            }\n        ]}\n        \"###;\n\n    #[test]\n    fn test_json_to_policy() {\n        let policy: Policy = Policy::from_json(POLICY_JSON).unwrap();\n        assert_eq!(1, policy.inbound.len());\n        assert_eq!(1, policy.outbound.len());\n        let inbound = &policy.inbound[0];\n        let outbound = &policy.outbound[0];\n\n        assert_eq!(\"test_in\", inbound.name);\n        assert_eq!(1, inbound.expressions.len());\n        assert_eq!(1, inbound.actions.len());\n        assert_eq!(\"req.Method == 'PUT'\", inbound.expressions[0]);\n        assert_eq!(\"deny\", inbound.actions[0].type_);\n        assert_eq!(None, inbound.actions[0].config);\n\n        assert_eq!(\"test_out\", outbound.name);\n        assert_eq!(1, outbound.expressions.len());\n        assert_eq!(1, outbound.actions.len());\n        assert_eq!(\"res.StatusCode == '200'\", outbound.expressions[0]);\n        assert_eq!(\"custom-response\", outbound.actions[0].type_);\n        assert_eq!(\n            \"{\\\"status_code\\\":201}\",\n            outbound.actions[0].config.as_ref().unwrap().to_string()\n        );\n    }\n\n    #[test]\n    fn test_empty_json_to_policy() {\n        let policy: Policy = Policy::from_json(\"{}\").unwrap();\n        assert_eq!(0, policy.inbound.len());\n        assert_eq!(0, policy.outbound.len());\n    }\n\n    #[test]\n    fn test_policy_to_json() {\n        let policy = Policy::from_json(POLICY_JSON).unwrap();\n        let json = policy.to_json().unwrap();\n        let policy2 = Policy::from_json(json).unwrap();\n        assert_eq!(policy, policy2);\n    }\n\n    #[test]\n    fn test_policy_to_json_error() {\n        let error = Policy::from_json(\"asdf\").err().unwrap();\n        assert!(matches!(error, InvalidPolicy::SerializationError { .. }));\n    }\n\n    #[test]\n    fn test_rule_to_json() {\n        let policy = Policy::from_json(POLICY_JSON).unwrap();\n        let rule = &policy.outbound[0];\n        let json = rule.to_json().unwrap();\n        let parsed: serde_json::Value = serde_json::from_str(&json).unwrap();\n        let rule_map = parsed.as_object().unwrap();\n        assert_eq!(\"test_out\", rule_map[\"name\"]);\n\n        // expressions\n        let expressions = rule_map[\"expressions\"].as_array().unwrap();\n        assert_eq!(1, expressions.len());\n        assert_eq!(\"res.StatusCode == '200'\", expressions[0]);\n\n        // actions\n        let actions = rule_map[\"actions\"].as_array().unwrap();\n        assert_eq!(1, actions.len());\n        assert_eq!(\"custom-response\", actions[0][\"type\"]);\n        assert_eq!(201, actions[0][\"config\"][\"status_code\"]);\n    }\n\n    #[test]\n    fn test_action_to_json() {\n        let policy = Policy::from_json(POLICY_JSON).unwrap();\n        let action = &policy.outbound[0].actions[0];\n        let json = action.to_json().unwrap();\n        let parsed: serde_json::Value = serde_json::from_str(&json).unwrap();\n        let action_map = parsed.as_object().unwrap();\n        assert_eq!(\"custom-response\", action_map[\"type\"]);\n        assert_eq!(201, action_map[\"config\"][\"status_code\"]);\n    }\n\n    #[test]\n    fn test_builders() {\n        let policy = Policy::from_json(POLICY_JSON).unwrap();\n        let policy2 = Policy::new()\n            .add_inbound(\n                Rule::new(\"test_in\")\n                    .add_expression(\"req.Method == 'PUT'\")\n                    .add_action(Action::new(\"deny\", None).unwrap()),\n            )\n            .add_outbound(\n                Rule::new(\"test_out\")\n                    .add_expression(\"res.StatusCode == '200'\")\n                    // .add_action(Action::new(\"deny\", \"\"))\n                    .add_action(\n                        Action::new(\"custom-response\", Some(\"{\\\"status_code\\\":201}\")).unwrap(),\n                    ),\n            )\n            .to_owned();\n        assert_eq!(policy, policy2);\n    }\n\n    #[test]\n    fn test_load_file() {\n        let policy = Policy::from_json(POLICY_JSON).unwrap();\n        let policy2 = Policy::from_file(\"assets/policy.json\").unwrap();\n        assert_eq!(\"test_in\", policy2.inbound[0].name);\n        assert_eq!(\"test_out\", policy2.outbound[0].name);\n        assert_eq!(policy, policy2);\n    }\n\n    #[test]\n    fn test_load_inbound_file() {\n        let policy = Policy::from_file(\"assets/policy-inbound.json\").unwrap();\n        assert_eq!(\"test_in\", policy.inbound[0].name);\n        assert_eq!(0, policy.outbound.len());\n    }\n\n    #[test]\n    fn test_load_file_error() {\n        let error = Policy::from_file(\"assets/absent.json\").err().unwrap();\n        assert!(matches!(error, InvalidPolicy::FileReadError { .. }));\n    }\n}\n"
  },
  {
    "path": "ngrok/src/config/tcp.rs",
    "content": "use std::{\n    collections::HashMap,\n    convert::From,\n};\n\nuse url::Url;\n\nuse super::{\n    common::ProxyProto,\n    Policy,\n};\n// These are used for doc comment links.\n#[allow(unused_imports)]\nuse crate::config::{\n    ForwarderBuilder,\n    TunnelBuilder,\n};\nuse crate::{\n    config::common::{\n        default_forwards_to,\n        Binding,\n        CommonOpts,\n        TunnelConfig,\n    },\n    internals::proto::{\n        self,\n        BindExtra,\n        BindOpts,\n    },\n    tunnel::TcpTunnel,\n    Session,\n};\n\n/// The options for a TCP edge.\n#[derive(Default, Clone)]\nstruct TcpOptions {\n    pub(crate) common_opts: CommonOpts,\n    pub(crate) remote_addr: Option<String>,\n    pub(crate) bindings: Vec<String>,\n}\n\nimpl TunnelConfig for TcpOptions {\n    fn forwards_to(&self) -> String {\n        self.common_opts\n            .forwards_to\n            .clone()\n            .unwrap_or(default_forwards_to().into())\n    }\n    fn extra(&self) -> BindExtra {\n        BindExtra {\n            token: Default::default(),\n            ip_policy_ref: Default::default(),\n            metadata: self.common_opts.metadata.clone().unwrap_or_default(),\n            bindings: self.bindings.clone(),\n            pooling_enabled: self.common_opts.pooling_enabled.unwrap_or(false),\n        }\n    }\n    fn proto(&self) -> String {\n        \"tcp\".into()\n    }\n\n    fn forwards_proto(&self) -> String {\n        // not supported\n        String::new()\n    }\n\n    fn verify_upstream_tls(&self) -> bool {\n        self.common_opts.verify_upstream_tls()\n    }\n\n    fn opts(&self) -> Option<BindOpts> {\n        // fill out all the options, translating to proto here\n        let mut tcp_endpoint = proto::TcpEndpoint::default();\n\n        if let Some(remote_addr) = self.remote_addr.as_ref() {\n            tcp_endpoint.addr = remote_addr.clone();\n        }\n        tcp_endpoint.proxy_proto = self.common_opts.proxy_proto;\n\n        tcp_endpoint.ip_restriction = self.common_opts.ip_restriction();\n\n        tcp_endpoint.traffic_policy = if self.common_opts.traffic_policy.is_some() {\n            self.common_opts.traffic_policy.clone().map(From::from)\n        } else if self.common_opts.policy.is_some() {\n            self.common_opts.policy.clone().map(From::from)\n        } else {\n            None\n        };\n        Some(BindOpts::Tcp(tcp_endpoint))\n    }\n    fn labels(&self) -> HashMap<String, String> {\n        HashMap::new()\n    }\n}\n\nimpl_builder! {\n    /// A builder for a tunnel backing a TCP endpoint.\n    ///\n    /// https://ngrok.com/docs/tcp/\n    TcpTunnelBuilder, TcpOptions, TcpTunnel, endpoint\n}\n\n/// The options for a TCP edge.\nimpl TcpTunnelBuilder {\n    /// Add the provided CIDR to the allowlist.\n    ///\n    /// https://ngrok.com/docs/tcp/ip-restrictions/\n    pub fn allow_cidr(&mut self, cidr: impl Into<String>) -> &mut Self {\n        self.options.common_opts.cidr_restrictions.allow(cidr);\n        self\n    }\n    /// Add the provided CIDR to the denylist.\n    ///\n    /// https://ngrok.com/docs/tcp/ip-restrictions/\n    pub fn deny_cidr(&mut self, cidr: impl Into<String>) -> &mut Self {\n        self.options.common_opts.cidr_restrictions.deny(cidr);\n        self\n    }\n    /// Sets the PROXY protocol version for connections over this tunnel.\n    pub fn proxy_proto(&mut self, proxy_proto: ProxyProto) -> &mut Self {\n        self.options.common_opts.proxy_proto = proxy_proto;\n        self\n    }\n    /// Sets the opaque metadata string for this tunnel.\n    ///\n    /// https://ngrok.com/docs/api/resources/tunnels/#tunnel-fields\n    pub fn metadata(&mut self, metadata: impl Into<String>) -> &mut Self {\n        self.options.common_opts.metadata = Some(metadata.into());\n        self\n    }\n\n    /// Sets the ingress configuration for this endpoint.\n    ///\n    /// Valid binding values are:\n    /// - `\"public\"` - Publicly accessible endpoint\n    /// - `\"internal\"` - Internal-only endpoint\n    /// - `\"kubernetes\"` - Kubernetes cluster binding\n    ///\n    /// If not specified, the ngrok service will use its default binding configuration.\n    ///\n    /// # Panics\n    ///\n    /// Panics if called more than once or if an invalid binding value is provided.\n    ///\n    /// # Examples\n    ///\n    /// ```no_run\n    /// # use ngrok::Session;\n    /// # use ngrok::config::TunnelBuilder;\n    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {\n    /// let session = Session::builder().authtoken_from_env().connect().await?;\n    ///\n    /// // Using string\n    /// let tunnel = session.tcp_endpoint().binding(\"internal\").listen().await?;\n    ///\n    /// // Using typed enum\n    /// use ngrok::config::Binding;\n    /// let tunnel = session.tcp_endpoint().binding(Binding::Public).listen().await?;\n    /// # Ok(())\n    /// # }\n    /// ```\n    pub fn binding(&mut self, binding: impl Into<String>) -> &mut Self {\n        if !self.options.bindings.is_empty() {\n            panic!(\"binding() can only be called once\");\n        }\n        let binding_str = binding.into();\n        if let Err(e) = Binding::validate(&binding_str) {\n            panic!(\"{}\", e);\n        }\n        self.options.bindings.push(binding_str);\n        self\n    }\n    /// Sets the ForwardsTo string for this tunnel. This can be viewed via the\n    /// API or dashboard.\n    ///\n    /// This overrides the default process info if using\n    /// [TunnelBuilder::listen], and is in turn overridden by the url provided\n    /// to [ForwarderBuilder::listen_and_forward].\n    ///\n    /// https://ngrok.com/docs/api/resources/tunnels/#tunnel-fields\n    pub fn forwards_to(&mut self, forwards_to: impl Into<String>) -> &mut Self {\n        self.options.common_opts.forwards_to = Some(forwards_to.into());\n        self\n    }\n\n    /// Disables backend TLS certificate verification for forwards from this tunnel.\n    pub fn verify_upstream_tls(&mut self, verify_upstream_tls: bool) -> &mut Self {\n        self.options\n            .common_opts\n            .set_verify_upstream_tls(verify_upstream_tls);\n        self\n    }\n\n    /// Sets the TCP address to request for this edge.\n    ///\n    /// https://ngrok.com/docs/network-edge/domains-and-tcp-addresses/#tcp-addresses\n    pub fn remote_addr(&mut self, remote_addr: impl Into<String>) -> &mut Self {\n        self.options.remote_addr = Some(remote_addr.into());\n        self\n    }\n\n    /// DEPRECATED: use traffic_policy instead.\n    pub fn policy<S>(&mut self, s: S) -> Result<&mut Self, S::Error>\n    where\n        S: TryInto<Policy>,\n    {\n        self.options.common_opts.policy = Some(s.try_into()?);\n        Ok(self)\n    }\n\n    /// Set policy for this edge.\n    pub fn traffic_policy(&mut self, policy_str: impl Into<String>) -> &mut Self {\n        self.options.common_opts.traffic_policy = Some(policy_str.into());\n        self\n    }\n\n    pub(crate) async fn for_forwarding_to(&mut self, to_url: &Url) -> &mut Self {\n        self.options.common_opts.for_forwarding_to(to_url);\n        self\n    }\n\n    /// Allows the endpoint to pool with other endpoints with the same host/port/binding\n    pub fn pooling_enabled(&mut self, pooling_enabled: impl Into<bool>) -> &mut Self {\n        self.options.common_opts.pooling_enabled = Some(pooling_enabled.into());\n        self\n    }\n}\n\n#[cfg(test)]\nmod test {\n    use super::*;\n    use crate::config::policies::test::POLICY_JSON;\n    const METADATA: &str = \"testmeta\";\n    const TEST_FORWARD: &str = \"testforward\";\n    const REMOTE_ADDR: &str = \"4.tcp.ngrok.io:1337\";\n    const ALLOW_CIDR: &str = \"0.0.0.0/0\";\n    const DENY_CIDR: &str = \"10.1.1.1/32\";\n\n    #[test]\n    fn test_interface_to_proto() {\n        // pass to a function accepting the trait to avoid\n        // \"creates a temporary which is freed while still in use\"\n        tunnel_test(\n            &TcpTunnelBuilder {\n                session: None,\n                options: Default::default(),\n            }\n            .allow_cidr(ALLOW_CIDR)\n            .deny_cidr(DENY_CIDR)\n            .proxy_proto(ProxyProto::V2)\n            .metadata(METADATA)\n            .remote_addr(REMOTE_ADDR)\n            .forwards_to(TEST_FORWARD)\n            .policy(POLICY_JSON)\n            .unwrap()\n            .options,\n        );\n    }\n\n    fn tunnel_test<C>(tunnel_cfg: &C)\n    where\n        C: TunnelConfig,\n    {\n        assert_eq!(TEST_FORWARD, tunnel_cfg.forwards_to());\n\n        let extra = tunnel_cfg.extra();\n        assert_eq!(String::default(), *extra.token);\n        assert_eq!(METADATA, extra.metadata);\n        assert_eq!(Vec::<String>::new(), extra.bindings);\n        assert_eq!(String::default(), extra.ip_policy_ref);\n\n        assert_eq!(\"tcp\", tunnel_cfg.proto());\n\n        let opts = tunnel_cfg.opts().unwrap();\n        assert!(matches!(opts, BindOpts::Tcp { .. }));\n        if let BindOpts::Tcp(endpoint) = opts {\n            assert_eq!(REMOTE_ADDR, endpoint.addr);\n            assert!(matches!(endpoint.proxy_proto, ProxyProto::V2));\n\n            let ip_restriction = endpoint.ip_restriction.unwrap();\n            assert_eq!(Vec::from([ALLOW_CIDR]), ip_restriction.allow_cidrs);\n            assert_eq!(Vec::from([DENY_CIDR]), ip_restriction.deny_cidrs);\n        }\n\n        assert_eq!(HashMap::new(), tunnel_cfg.labels());\n    }\n\n    #[test]\n    fn test_binding_valid_values() {\n        let mut builder = TcpTunnelBuilder {\n            session: None,\n            options: Default::default(),\n        };\n\n        // Test \"public\"\n        builder.binding(\"public\");\n        assert_eq!(vec![\"public\"], builder.options.bindings);\n\n        // Test \"internal\"\n        let mut builder = TcpTunnelBuilder {\n            session: None,\n            options: Default::default(),\n        };\n        builder.binding(\"internal\");\n        assert_eq!(vec![\"internal\"], builder.options.bindings);\n\n        // Test \"kubernetes\"\n        let mut builder = TcpTunnelBuilder {\n            session: None,\n            options: Default::default(),\n        };\n        builder.binding(\"kubernetes\");\n        assert_eq!(vec![\"kubernetes\"], builder.options.bindings);\n\n        // Test with Binding enum\n        let mut builder = TcpTunnelBuilder {\n            session: None,\n            options: Default::default(),\n        };\n        builder.binding(Binding::Public);\n        assert_eq!(vec![\"public\"], builder.options.bindings);\n    }\n\n    #[test]\n    #[should_panic(expected = \"Invalid binding value\")]\n    fn test_binding_invalid_value() {\n        let mut builder = TcpTunnelBuilder {\n            session: None,\n            options: Default::default(),\n        };\n        builder.binding(\"invalid\");\n    }\n\n    #[test]\n    #[should_panic(expected = \"binding() can only be called once\")]\n    fn test_binding_called_twice() {\n        let mut builder = TcpTunnelBuilder {\n            session: None,\n            options: Default::default(),\n        };\n        builder.binding(\"public\");\n        builder.binding(\"internal\");\n    }\n}\n"
  },
  {
    "path": "ngrok/src/config/tls.rs",
    "content": "use std::collections::HashMap;\n\nuse bytes::Bytes;\nuse url::Url;\n\nuse super::{\n    common::ProxyProto,\n    Policy,\n};\n// These are used for doc comment links.\n#[allow(unused_imports)]\nuse crate::config::{\n    ForwarderBuilder,\n    TunnelBuilder,\n};\nuse crate::{\n    config::common::{\n        default_forwards_to,\n        Binding,\n        CommonOpts,\n        TunnelConfig,\n    },\n    internals::proto::{\n        self,\n        BindExtra,\n        BindOpts,\n        TlsTermination,\n    },\n    tunnel::TlsTunnel,\n    Session,\n};\n\n/// The options for TLS edges.\n#[derive(Default, Clone)]\nstruct TlsOptions {\n    pub(crate) common_opts: CommonOpts,\n    pub(crate) domain: Option<String>,\n    pub(crate) mutual_tlsca: Vec<bytes::Bytes>,\n    pub(crate) key_pem: Option<bytes::Bytes>,\n    pub(crate) cert_pem: Option<bytes::Bytes>,\n    pub(crate) bindings: Vec<String>,\n}\n\nimpl TunnelConfig for TlsOptions {\n    fn forwards_to(&self) -> String {\n        self.common_opts\n            .forwards_to\n            .clone()\n            .unwrap_or(default_forwards_to().into())\n    }\n\n    fn forwards_proto(&self) -> String {\n        // not supported\n        String::new()\n    }\n\n    fn verify_upstream_tls(&self) -> bool {\n        self.common_opts.verify_upstream_tls()\n    }\n\n    fn extra(&self) -> BindExtra {\n        BindExtra {\n            token: Default::default(),\n            ip_policy_ref: Default::default(),\n            metadata: self.common_opts.metadata.clone().unwrap_or_default(),\n            bindings: self.bindings.clone(),\n            pooling_enabled: self.common_opts.pooling_enabled.unwrap_or(false),\n        }\n    }\n    fn proto(&self) -> String {\n        \"tls\".into()\n    }\n\n    fn opts(&self) -> Option<BindOpts> {\n        // fill out all the options, translating to proto here\n        let mut tls_endpoint = proto::TlsEndpoint::default();\n\n        if let Some(domain) = self.domain.as_ref() {\n            tls_endpoint.domain = domain.clone();\n        }\n        tls_endpoint.proxy_proto = self.common_opts.proxy_proto;\n\n        // doing some backflips to check both cert_pem and key_pem are set, and avoid unwrapping\n        let tls_termination = self\n            .cert_pem\n            .as_ref()\n            .zip(self.key_pem.as_ref())\n            .map(|(c, k)| TlsTermination {\n                cert: c.to_vec(),\n                key: k.to_vec().into(),\n                sealed_key: Vec::new(),\n            });\n\n        tls_endpoint.ip_restriction = self.common_opts.ip_restriction();\n        tls_endpoint.mutual_tls_at_edge =\n            (!self.mutual_tlsca.is_empty()).then_some(self.mutual_tlsca.as_slice().into());\n        tls_endpoint.tls_termination = tls_termination;\n        tls_endpoint.traffic_policy = if self.common_opts.traffic_policy.is_some() {\n            self.common_opts.traffic_policy.clone().map(From::from)\n        } else if self.common_opts.policy.is_some() {\n            self.common_opts.policy.clone().map(From::from)\n        } else {\n            None\n        };\n        Some(BindOpts::Tls(tls_endpoint))\n    }\n    fn labels(&self) -> HashMap<String, String> {\n        HashMap::new()\n    }\n}\n\nimpl_builder! {\n    /// A builder for a tunnel backing a TCP endpoint.\n    ///\n    /// https://ngrok.com/docs/tls/\n    TlsTunnelBuilder, TlsOptions, TlsTunnel, endpoint\n}\n\nimpl TlsTunnelBuilder {\n    /// Add the provided CIDR to the allowlist.\n    ///\n    /// https://ngrok.com/docs/tls/ip-restrictions/\n    pub fn allow_cidr(&mut self, cidr: impl Into<String>) -> &mut Self {\n        self.options.common_opts.cidr_restrictions.allow(cidr);\n        self\n    }\n    /// Add the provided CIDR to the denylist.\n    ///\n    /// https://ngrok.com/docs/tls/ip-restrictions/\n    pub fn deny_cidr(&mut self, cidr: impl Into<String>) -> &mut Self {\n        self.options.common_opts.cidr_restrictions.deny(cidr);\n        self\n    }\n    /// Sets the PROXY protocol version for connections over this tunnel.\n    pub fn proxy_proto(&mut self, proxy_proto: ProxyProto) -> &mut Self {\n        self.options.common_opts.proxy_proto = proxy_proto;\n        self\n    }\n    /// Sets the opaque metadata string for this tunnel.\n    ///\n    /// https://ngrok.com/docs/api/resources/tunnels/#tunnel-fields\n    pub fn metadata(&mut self, metadata: impl Into<String>) -> &mut Self {\n        self.options.common_opts.metadata = Some(metadata.into());\n        self\n    }\n\n    /// Sets the ingress configuration for this endpoint.\n    ///\n    /// Valid binding values are:\n    /// - `\"public\"` - Publicly accessible endpoint\n    /// - `\"internal\"` - Internal-only endpoint\n    /// - `\"kubernetes\"` - Kubernetes cluster binding\n    ///\n    /// If not specified, the ngrok service will use its default binding configuration.\n    ///\n    /// # Panics\n    ///\n    /// Panics if called more than once or if an invalid binding value is provided.\n    ///\n    /// # Examples\n    ///\n    /// ```no_run\n    /// # use ngrok::Session;\n    /// # use ngrok::config::TunnelBuilder;\n    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {\n    /// let session = Session::builder().authtoken_from_env().connect().await?;\n    ///\n    /// // Using string\n    /// let tunnel = session.tls_endpoint().binding(\"internal\").listen().await?;\n    ///\n    /// // Using typed enum\n    /// use ngrok::config::Binding;\n    /// let tunnel = session.tls_endpoint().binding(Binding::Public).listen().await?;\n    /// # Ok(())\n    /// # }\n    /// ```\n    pub fn binding(&mut self, binding: impl Into<String>) -> &mut Self {\n        if !self.options.bindings.is_empty() {\n            panic!(\"binding() can only be called once\");\n        }\n        let binding_str = binding.into();\n        if let Err(e) = Binding::validate(&binding_str) {\n            panic!(\"{}\", e);\n        }\n        self.options.bindings.push(binding_str);\n        self\n    }\n    /// Sets the ForwardsTo string for this tunnel. This can be viewed via the\n    /// API or dashboard.\n    ///\n    /// This overrides the default process info if using\n    /// [TunnelBuilder::listen], and is in turn overridden by the url provided\n    /// to [ForwarderBuilder::listen_and_forward].\n    ///\n    /// https://ngrok.com/docs/api/resources/tunnels/#tunnel-fields\n    pub fn forwards_to(&mut self, forwards_to: impl Into<String>) -> &mut Self {\n        self.options.common_opts.forwards_to = Some(forwards_to.into());\n        self\n    }\n\n    /// Disables backend TLS certificate verification for forwards from this tunnel.\n    pub fn verify_upstream_tls(&mut self, verify_upstream_tls: bool) -> &mut Self {\n        self.options\n            .common_opts\n            .set_verify_upstream_tls(verify_upstream_tls);\n        self\n    }\n\n    /// Sets the domain to request for this edge.\n    ///\n    /// https://ngrok.com/docs/network-edge/domains-and-tcp-addresses/#domains\n    pub fn domain(&mut self, domain: impl Into<String>) -> &mut Self {\n        self.options.domain = Some(domain.into());\n        self\n    }\n\n    /// Adds a certificate in PEM format to use for mutual TLS authentication.\n    ///\n    /// These will be used to authenticate client certificates for requests at\n    /// the ngrok edge.\n    ///\n    /// https://ngrok.com/docs/tls/mutual-tls/\n    pub fn mutual_tlsca(&mut self, mutual_tlsca: Bytes) -> &mut Self {\n        self.options.mutual_tlsca.push(mutual_tlsca);\n        self\n    }\n\n    /// Sets the key and certificate in PEM format for TLS termination at the\n    /// ngrok edge.\n    ///\n    /// https://ngrok.com/docs/tls/tls-termination/\n    pub fn termination(&mut self, cert_pem: Bytes, key_pem: Bytes) -> &mut Self {\n        self.options.key_pem = Some(key_pem);\n        self.options.cert_pem = Some(cert_pem);\n        self\n    }\n\n    /// DEPRECATED: use traffic_policy instead.\n    pub fn policy<S>(&mut self, s: S) -> Result<&mut Self, S::Error>\n    where\n        S: TryInto<Policy>,\n    {\n        self.options.common_opts.policy = Some(s.try_into()?);\n        Ok(self)\n    }\n\n    /// Set policy for this edge.\n    pub fn traffic_policy(&mut self, policy_str: impl Into<String>) -> &mut Self {\n        self.options.common_opts.traffic_policy = Some(policy_str.into());\n        self\n    }\n\n    pub(crate) async fn for_forwarding_to(&mut self, to_url: &Url) -> &mut Self {\n        self.options.common_opts.for_forwarding_to(to_url);\n        self\n    }\n\n    /// Allows the endpoint to pool with other endpoints with the same host/port/binding\n    pub fn pooling_enabled(&mut self, pooling_enabled: impl Into<bool>) -> &mut Self {\n        self.options.common_opts.pooling_enabled = Some(pooling_enabled.into());\n        self\n    }\n}\n\n#[cfg(test)]\nmod test {\n    use super::*;\n    use crate::config::policies::test::POLICY_JSON;\n\n    const METADATA: &str = \"testmeta\";\n    const TEST_FORWARD: &str = \"testforward\";\n    const ALLOW_CIDR: &str = \"0.0.0.0/0\";\n    const DENY_CIDR: &str = \"10.1.1.1/32\";\n    const CA_CERT: &[u8] = \"test ca cert\".as_bytes();\n    const CA_CERT2: &[u8] = \"test ca cert2\".as_bytes();\n    const KEY: &[u8] = \"test cert\".as_bytes();\n    const CERT: &[u8] = \"test cert\".as_bytes();\n    const DOMAIN: &str = \"test domain\";\n\n    #[test]\n    fn test_interface_to_proto() {\n        // pass to a function accepting the trait to avoid\n        // \"creates a temporary which is freed while still in use\"\n        tunnel_test(\n            &TlsTunnelBuilder {\n                session: None,\n                options: Default::default(),\n            }\n            .allow_cidr(ALLOW_CIDR)\n            .deny_cidr(DENY_CIDR)\n            .proxy_proto(ProxyProto::V2)\n            .metadata(METADATA)\n            .domain(DOMAIN)\n            .mutual_tlsca(CA_CERT.into())\n            .mutual_tlsca(CA_CERT2.into())\n            .termination(CERT.into(), KEY.into())\n            .forwards_to(TEST_FORWARD)\n            .policy(POLICY_JSON)\n            .unwrap()\n            .options,\n        );\n    }\n\n    fn tunnel_test<C>(tunnel_cfg: C)\n    where\n        C: TunnelConfig,\n    {\n        assert_eq!(TEST_FORWARD, tunnel_cfg.forwards_to());\n\n        let extra = tunnel_cfg.extra();\n        assert_eq!(String::default(), *extra.token);\n        assert_eq!(METADATA, extra.metadata);\n        assert_eq!(Vec::<String>::new(), extra.bindings);\n        assert_eq!(String::default(), extra.ip_policy_ref);\n\n        assert_eq!(\"tls\", tunnel_cfg.proto());\n\n        let opts = tunnel_cfg.opts().unwrap();\n        assert!(matches!(opts, BindOpts::Tls { .. }));\n        if let BindOpts::Tls(endpoint) = opts {\n            assert_eq!(DOMAIN, endpoint.domain);\n            assert_eq!(String::default(), endpoint.subdomain);\n            assert!(matches!(endpoint.proxy_proto, ProxyProto::V2));\n            assert!(!endpoint.mutual_tls_at_agent);\n\n            let ip_restriction = endpoint.ip_restriction.unwrap();\n            assert_eq!(Vec::from([ALLOW_CIDR]), ip_restriction.allow_cidrs);\n            assert_eq!(Vec::from([DENY_CIDR]), ip_restriction.deny_cidrs);\n\n            let tls_termination = endpoint.tls_termination.unwrap();\n            assert_eq!(CERT, tls_termination.cert);\n            assert_eq!(KEY, *tls_termination.key);\n            assert!(tls_termination.sealed_key.is_empty());\n\n            let mutual_tls = endpoint.mutual_tls_at_edge.unwrap();\n            let mut agg = CA_CERT.to_vec();\n            agg.extend(CA_CERT2.to_vec());\n            assert_eq!(agg, mutual_tls.mutual_tls_ca);\n        }\n\n        assert_eq!(HashMap::new(), tunnel_cfg.labels());\n    }\n\n    #[test]\n    fn test_binding_valid_values() {\n        let mut builder = TlsTunnelBuilder {\n            session: None,\n            options: Default::default(),\n        };\n\n        // Test \"public\"\n        builder.binding(\"public\");\n        assert_eq!(vec![\"public\"], builder.options.bindings);\n\n        // Test \"internal\"\n        let mut builder = TlsTunnelBuilder {\n            session: None,\n            options: Default::default(),\n        };\n        builder.binding(\"internal\");\n        assert_eq!(vec![\"internal\"], builder.options.bindings);\n\n        // Test \"kubernetes\"\n        let mut builder = TlsTunnelBuilder {\n            session: None,\n            options: Default::default(),\n        };\n        builder.binding(\"kubernetes\");\n        assert_eq!(vec![\"kubernetes\"], builder.options.bindings);\n\n        // Test with Binding enum\n        let mut builder = TlsTunnelBuilder {\n            session: None,\n            options: Default::default(),\n        };\n        builder.binding(Binding::Kubernetes);\n        assert_eq!(vec![\"kubernetes\"], builder.options.bindings);\n    }\n\n    #[test]\n    #[should_panic(expected = \"Invalid binding value\")]\n    fn test_binding_invalid_value() {\n        let mut builder = TlsTunnelBuilder {\n            session: None,\n            options: Default::default(),\n        };\n        builder.binding(\"invalid\");\n    }\n\n    #[test]\n    #[should_panic(expected = \"binding() can only be called once\")]\n    fn test_binding_called_twice() {\n        let mut builder = TlsTunnelBuilder {\n            session: None,\n            options: Default::default(),\n        };\n        builder.binding(\"public\");\n        builder.binding(\"internal\");\n    }\n}\n"
  },
  {
    "path": "ngrok/src/config/webhook_verification.rs",
    "content": "use crate::internals::proto::{\n    SecretString,\n    WebhookVerification as WebhookProto,\n};\n\n/// Configuration for webhook verification.\n#[derive(Clone)]\npub(crate) struct WebhookVerification {\n    /// The webhook provider\n    pub(crate) provider: String,\n    /// The secret for verifying webhooks from this provider.\n    pub(crate) secret: SecretString,\n}\n\nimpl WebhookVerification {}\n\n// transform into the wire protocol format\nimpl From<WebhookVerification> for WebhookProto {\n    fn from(wv: WebhookVerification) -> Self {\n        WebhookProto {\n            provider: wv.provider,\n            secret: wv.secret,\n            sealed_secret: vec![], // unused in this context\n        }\n    }\n}\n"
  },
  {
    "path": "ngrok/src/conn.rs",
    "content": "use std::{\n    net::SocketAddr,\n    pin::Pin,\n    task::{\n        Context,\n        Poll,\n    },\n};\n\n// Support for axum's connection info trait.\n#[cfg(feature = \"axum\")]\nuse axum::extract::connect_info::Connected;\n#[cfg(feature = \"hyper\")]\nuse hyper::rt::{\n    Read as HyperRead,\n    Write as HyperWrite,\n};\nuse muxado::typed::TypedStream;\nuse tokio::io::{\n    AsyncRead,\n    AsyncWrite,\n};\n\nuse crate::{\n    config::ProxyProto,\n    internals::proto::{\n        EdgeType,\n        ProxyHeader,\n    },\n};\n/// A connection from an ngrok tunnel.\n///\n/// This implements [AsyncRead]/[AsyncWrite], as well as providing access to the\n/// address from which the connection to the ngrok edge originated.\npub(crate) struct ConnInner {\n    pub(crate) info: Info,\n    pub(crate) stream: TypedStream,\n}\n\n#[derive(Clone)]\npub(crate) struct Info {\n    pub(crate) header: ProxyHeader,\n    pub(crate) remote_addr: SocketAddr,\n    pub(crate) proxy_proto: ProxyProto,\n    pub(crate) app_protocol: Option<String>,\n    pub(crate) verify_upstream_tls: bool,\n}\n\nimpl ConnInfo for Info {\n    fn remote_addr(&self) -> SocketAddr {\n        self.remote_addr\n    }\n}\n\nimpl EdgeConnInfo for Info {\n    fn edge_type(&self) -> EdgeType {\n        self.header.edge_type\n    }\n    fn passthrough_tls(&self) -> bool {\n        self.header.passthrough_tls\n    }\n}\n\nimpl EndpointConnInfo for Info {\n    fn proto(&self) -> &str {\n        self.header.proto.as_str()\n    }\n}\n\n// This codgen indirect is required to make the hyper io trait bounds\n// dependent on the hyper feature. You can't put a #[cfg] on a single bound, so\n// we're putting the whole trait def in a macro. Gross, but gets the job done.\nmacro_rules! conn_trait {\n    ($($hyper_bound:tt)*) => {\n\t\t/// An incoming connection over an ngrok tunnel.\n\t\t/// Effectively a trait alias for async read+write, plus connection info.\n\t\tpub trait Conn: ConnInfo + AsyncRead + AsyncWrite $($hyper_bound)* + Unpin + Send + 'static {}\n\t}\n}\n\n#[cfg(not(feature = \"hyper\"))]\nconn_trait!();\n\n#[cfg(feature = \"hyper\")]\nconn_trait! {\n    + hyper::rt::Read + hyper::rt::Write\n}\n\n/// Information common to all ngrok connections.\npub trait ConnInfo {\n    /// Returns the client address that initiated the connection to the ngrok\n    /// edge.\n    fn remote_addr(&self) -> SocketAddr;\n}\n\n/// Information about connections via ngrok edges.\npub trait EdgeConnInfo {\n    /// Returns the edge type for this connection.\n    fn edge_type(&self) -> EdgeType;\n    /// Returns whether the connection includes the tls handshake and encrypted\n    /// stream.\n    fn passthrough_tls(&self) -> bool;\n}\n\n/// Information about connections via ngrok endpoints.\npub trait EndpointConnInfo {\n    /// Returns the endpoint protocol.\n    fn proto(&self) -> &str;\n}\n\nmacro_rules! make_conn_type {\n\t(info EdgeConnInfo, $wrapper:tt) => {\n\t\timpl EdgeConnInfo for $wrapper {\n\t\t\tfn edge_type(&self) -> EdgeType {\n\t\t\t\tself.inner.info.edge_type()\n\t\t\t}\n\t\t\tfn passthrough_tls(&self) -> bool {\n\t\t\t\tself.inner.info.passthrough_tls()\n\t\t\t}\n\t\t}\n\t};\n\t(info EndpointConnInfo, $wrapper:tt) => {\n\t\timpl EndpointConnInfo for $wrapper {\n\t\t\tfn proto(&self) -> &str {\n\t\t\t\tself.inner.info.proto()\n\t\t\t}\n\t\t}\n\t};\n    ($(#[$outer:meta])* $wrapper:ident, $($m:tt),*) => {\n        $(#[$outer])*\n        pub struct $wrapper {\n            pub(crate) inner: ConnInner,\n        }\n\n        impl Conn for $wrapper {}\n\n        impl ConnInfo for $wrapper {\n\t\t\tfn remote_addr(&self) -> SocketAddr {\n\t\t\t\tself.inner.info.remote_addr()\n\t\t\t}\n        }\n\n\t\timpl AsyncRead for $wrapper {\n\t\t\tfn poll_read(\n\t\t\t\tmut self: Pin<&mut Self>,\n\t\t\t\tcx: &mut Context<'_>,\n\t\t\t\tbuf: &mut tokio::io::ReadBuf<'_>,\n\t\t\t) -> Poll<std::io::Result<()>> {\n\t\t\t\tPin::new(&mut *self.inner.stream).poll_read(cx, buf)\n\t\t\t}\n\t\t}\n\n\t\t#[cfg(feature = \"hyper\")]\n\t\timpl HyperRead for $wrapper {\n\t\t\tfn poll_read(\n\t\t\t\tmut self: Pin<&mut Self>,\n\t\t\t\tcx: &mut Context<'_>,\n\t\t\t\tmut buf: hyper::rt::ReadBufCursor<'_>,\n\t\t\t) -> Poll<std::io::Result<()>> {\n\t\t\t\tlet mut tokio_buf = tokio::io::ReadBuf::uninit(unsafe{ buf.as_mut() });\n\t\t\t\tlet res = std::task::ready!(Pin::new(&mut *self.inner.stream).poll_read(cx, &mut tokio_buf));\n\t\t\t\tlet filled = tokio_buf.filled().len();\n\t\t\t\tunsafe { buf.advance(filled) };\n\t\t\t\tPoll::Ready(res)\n\t\t\t}\n\t\t}\n\n\t\t#[cfg(feature = \"hyper\")]\n\t\timpl HyperWrite for $wrapper {\n\t\t\tfn poll_write(\n\t\t\t\tmut self: Pin<&mut Self>,\n\t\t\t\tcx: &mut Context<'_>,\n\t\t\t\tbuf: &[u8],\n\t\t\t) -> Poll<Result<usize, std::io::Error>> {\n\t\t\t\tPin::new(&mut *self.inner.stream).poll_write(cx, buf)\n\t\t\t}\n\t\t\tfn poll_flush(\n\t\t\t\tmut self: Pin<&mut Self>,\n\t\t\t\tcx: &mut Context<'_>,\n\t\t\t) -> Poll<Result<(), std::io::Error>> {\n\t\t\t\tPin::new(&mut *self.inner.stream).poll_flush(cx)\n\t\t\t}\n\t\t\tfn poll_shutdown(\n\t\t\t\tmut self: Pin<&mut Self>,\n\t\t\t\tcx: &mut Context<'_>,\n\t\t\t) -> Poll<Result<(), std::io::Error>> {\n\t\t\t\tPin::new(&mut *self.inner.stream).poll_shutdown(cx)\n\t\t\t}\n\t\t}\n\n\t\timpl AsyncWrite for $wrapper {\n\t\t\tfn poll_write(\n\t\t\t\tmut self: Pin<&mut Self>,\n\t\t\t\tcx: &mut Context<'_>,\n\t\t\t\tbuf: &[u8],\n\t\t\t) -> Poll<Result<usize, std::io::Error>> {\n\t\t\t\tPin::new(&mut *self.inner.stream).poll_write(cx, buf)\n\t\t\t}\n\t\t\tfn poll_flush(\n\t\t\t\tmut self: Pin<&mut Self>,\n\t\t\t\tcx: &mut Context<'_>,\n\t\t\t) -> Poll<Result<(), std::io::Error>> {\n\t\t\t\tPin::new(&mut *self.inner.stream).poll_flush(cx)\n\t\t\t}\n\t\t\tfn poll_shutdown(\n\t\t\t\tmut self: Pin<&mut Self>,\n\t\t\t\tcx: &mut Context<'_>,\n\t\t\t) -> Poll<Result<(), std::io::Error>> {\n\t\t\t\tPin::new(&mut *self.inner.stream).poll_shutdown(cx)\n\t\t\t}\n\t\t}\n\n\t\t#[cfg_attr(docsrs, doc(cfg(feature = \"axum\")))]\n\t\t#[cfg(feature = \"axum\")]\n\t\timpl Connected<&$wrapper> for SocketAddr {\n\t\t\tfn connect_info(target: &$wrapper) -> Self {\n\t\t\t\ttarget.inner.info.remote_addr()\n\t\t\t}\n\t\t}\n\n\t\t$(\n\t\t\tmake_conn_type!(info $m, $wrapper);\n\t\t)*\n    };\n}\n\nmake_conn_type! {\n    /// A connection via an ngrok Edge.\n    EdgeConn, EdgeConnInfo\n}\n\nmake_conn_type! {\n    /// A connection via an ngrok Endpoint.\n    EndpointConn, EndpointConnInfo\n}\n"
  },
  {
    "path": "ngrok/src/forwarder.rs",
    "content": "use std::{\n    collections::HashMap,\n    error::Error as StdError,\n};\n\nuse async_trait::async_trait;\nuse tokio::task::JoinHandle;\nuse url::Url;\n\nuse crate::{\n    prelude::{\n        EdgeInfo,\n        EndpointInfo,\n        TunnelCloser,\n        TunnelInfo,\n    },\n    session::RpcError,\n    Tunnel,\n};\n\n/// An ngrok forwarder.\n///\n/// Represents a tunnel that is being forwarded to a URL.\npub struct Forwarder<T> {\n    pub(crate) join: JoinHandle<Result<(), Box<dyn StdError + Send + Sync>>>,\n    pub(crate) inner: T,\n}\n\nimpl<T> Forwarder<T> {\n    /// Wait for the forwarding task to exit.\n    pub fn join(&mut self) -> &mut JoinHandle<Result<(), Box<dyn StdError + Send + Sync>>> {\n        &mut self.join\n    }\n}\n\n#[async_trait]\nimpl<T> TunnelCloser for Forwarder<T>\nwhere\n    T: TunnelCloser + Send,\n{\n    async fn close(&mut self) -> Result<(), RpcError> {\n        self.inner.close().await\n    }\n}\n\nimpl<T> TunnelInfo for Forwarder<T>\nwhere\n    T: TunnelInfo,\n{\n    fn id(&self) -> &str {\n        self.inner.id()\n    }\n\n    fn forwards_to(&self) -> &str {\n        self.inner.forwards_to()\n    }\n\n    fn metadata(&self) -> &str {\n        self.inner.metadata()\n    }\n}\n\nimpl<T> EndpointInfo for Forwarder<T>\nwhere\n    T: EndpointInfo,\n{\n    fn proto(&self) -> &str {\n        self.inner.proto()\n    }\n\n    fn url(&self) -> &str {\n        self.inner.url()\n    }\n}\n\nimpl<T> EdgeInfo for Forwarder<T>\nwhere\n    T: EdgeInfo,\n{\n    fn labels(&self) -> &HashMap<String, String> {\n        self.inner.labels()\n    }\n}\n\npub(crate) fn forward<T>(mut listener: T, info: T, to_url: Url) -> Result<Forwarder<T>, RpcError>\nwhere\n    T: Tunnel + Send + 'static,\n    <T as Tunnel>::Conn: crate::tunnel_ext::ConnExt,\n{\n    let handle =\n        tokio::spawn(\n            async move { Ok(crate::tunnel_ext::forward_tunnel(&mut listener, to_url).await?) },\n        );\n\n    Ok(Forwarder {\n        join: handle,\n        inner: info,\n    })\n}\n"
  },
  {
    "path": "ngrok/src/internals/proto.rs",
    "content": "use std::{\n    collections::HashMap,\n    error,\n    fmt,\n    io,\n    ops::{\n        Deref,\n        DerefMut,\n    },\n    str::FromStr,\n    string::FromUtf8Error,\n    sync::Arc,\n};\n\nuse muxado::typed::StreamType;\nuse serde::{\n    de::{\n        DeserializeOwned,\n        Visitor,\n    },\n    Deserialize,\n    Serialize,\n    Serializer,\n};\nuse thiserror::Error;\nuse tokio::io::{\n    AsyncRead,\n    AsyncReadExt,\n};\nuse tracing::debug;\n\npub const AUTH_REQ: StreamType = StreamType::clamp(0);\npub const BIND_REQ: StreamType = StreamType::clamp(1);\npub const UNBIND_REQ: StreamType = StreamType::clamp(2);\npub const PROXY_REQ: StreamType = StreamType::clamp(3);\npub const RESTART_REQ: StreamType = StreamType::clamp(4);\npub const STOP_REQ: StreamType = StreamType::clamp(5);\npub const UPDATE_REQ: StreamType = StreamType::clamp(6);\npub const BIND_LABELED_REQ: StreamType = StreamType::clamp(7);\npub const STOP_TUNNEL_REQ: StreamType = StreamType::clamp(9);\n\npub const VERSION: &[&str] = &[\"3\", \"2\"]; // integers in priority order\n\n/// An error that may have an ngrok error code.\n/// All ngrok error codes are documented at https://ngrok.com/docs/errors\npub trait Error: error::Error {\n    /// Return the ngrok error code, if one exists for this error.\n    fn error_code(&self) -> Option<&str> {\n        None\n    }\n    /// Return the error message minus the ngrok error code.\n    /// If this error has no error code, this is equivalent to\n    /// `format!(\"{error}\")`.\n    fn msg(&self) -> String {\n        format!(\"{self}\")\n    }\n}\n\nimpl<E> Error for Box<E>\nwhere\n    E: Error,\n{\n    fn error_code(&self) -> Option<&str> {\n        <E as Error>::error_code(self)\n    }\n    fn msg(&self) -> String {\n        <E as Error>::msg(self)\n    }\n}\n\nimpl<E> Error for Arc<E>\nwhere\n    E: Error,\n{\n    fn error_code(&self) -> Option<&str> {\n        <E as Error>::error_code(self)\n    }\n    fn msg(&self) -> String {\n        <E as Error>::msg(self)\n    }\n}\n\nimpl<E> Error for &E\nwhere\n    E: Error,\n{\n    fn error_code(&self) -> Option<&str> {\n        <E as Error>::error_code(self)\n    }\n    fn msg(&self) -> String {\n        <E as Error>::msg(self)\n    }\n}\n\n#[derive(Serialize, Deserialize, Debug, Clone, Default)]\npub struct ErrResp {\n    pub msg: String,\n    pub error_code: Option<String>,\n}\n\nimpl<'a> From<&'a str> for ErrResp {\n    fn from(value: &'a str) -> Self {\n        let mut error_code = None;\n        let mut msg_lines = vec![];\n        for line in value.lines().filter(|l| !l.is_empty()) {\n            if line.starts_with(\"ERR_NGROK_\") {\n                error_code = Some(line.trim().into());\n            } else {\n                msg_lines.push(line);\n            }\n        }\n        ErrResp {\n            error_code,\n            msg: msg_lines.join(\"\\n\"),\n        }\n    }\n}\n\nimpl error::Error for ErrResp {}\n\nconst ERR_URL: &str = \"https://ngrok.com/docs/errors\";\n\nimpl fmt::Display for ErrResp {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        self.msg.fmt(f)?;\n        if let Some(code) = self.error_code.as_ref().map(|s| s.to_lowercase()) {\n            write!(f, \"\\n\\n{ERR_URL}/{code}\")?;\n        }\n        Ok(())\n    }\n}\n\nimpl Error for ErrResp {\n    fn error_code(&self) -> Option<&str> {\n        self.error_code.as_deref()\n    }\n    fn msg(&self) -> String {\n        self.msg.clone()\n    }\n}\n\n#[derive(Serialize, Deserialize, Debug, Clone, Default)]\n#[serde(rename_all = \"PascalCase\")]\npub struct Auth {\n    pub version: Vec<String>, // protocol versions supported, ordered by preference\n    pub client_id: String,    // empty for new sessions\n    pub extra: AuthExtra,     // clients may add whatever data the like to auth messages\n}\n\n#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize, Default)]\n#[serde(transparent)]\npub struct SecretBytes(#[serde(with = \"base64bytes\")] Vec<u8>);\n\nimpl Deref for SecretBytes {\n    type Target = Vec<u8>;\n    fn deref(&self) -> &Self::Target {\n        &self.0\n    }\n}\n\nimpl DerefMut for SecretBytes {\n    fn deref_mut(&mut self) -> &mut Self::Target {\n        &mut self.0\n    }\n}\n\nimpl<'a> From<&'a [u8]> for SecretBytes {\n    fn from(other: &'a [u8]) -> Self {\n        SecretBytes(other.into())\n    }\n}\n\nimpl From<Vec<u8>> for SecretBytes {\n    fn from(other: Vec<u8>) -> Self {\n        SecretBytes(other)\n    }\n}\n\nimpl fmt::Display for SecretBytes {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        write!(f, \"********\")\n    }\n}\n\nimpl fmt::Debug for SecretBytes {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        write!(f, \"********\")\n    }\n}\n\n#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize, Default)]\n#[serde(transparent)]\npub struct SecretString(String);\n\nimpl Deref for SecretString {\n    type Target = String;\n    fn deref(&self) -> &Self::Target {\n        &self.0\n    }\n}\n\nimpl DerefMut for SecretString {\n    fn deref_mut(&mut self) -> &mut Self::Target {\n        &mut self.0\n    }\n}\n\nimpl<'a> From<&'a str> for SecretString {\n    fn from(other: &'a str) -> Self {\n        SecretString(other.into())\n    }\n}\n\nimpl From<String> for SecretString {\n    fn from(other: String) -> Self {\n        SecretString(other)\n    }\n}\n\nimpl fmt::Display for SecretString {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        write!(f, \"********\")\n    }\n}\n\nimpl fmt::Debug for SecretString {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        write!(f, \"********\")\n    }\n}\n\n#[derive(Serialize, Deserialize, Debug, Clone, Default)]\n#[serde(rename_all = \"PascalCase\")]\npub struct AuthExtra {\n    #[serde(rename = \"OS\")]\n    pub os: String,\n    pub arch: String,\n    pub auth_token: SecretString,\n    pub version: String,\n    pub hostname: String,\n    pub user_agent: String,\n    pub metadata: String,\n    pub cookie: SecretString,\n    pub heartbeat_interval: i64,\n    pub heartbeat_tolerance: i64,\n\n    // for each remote operation, these variables define whether the ngrok\n    // client is capable of executing that operation. each capability\n    // is transmitted as a pointer to String, with the following meanings:\n    //\n    // null ->               operation disallow beause the ngrok agent version is too old.\n    //                       this is true because older clients will never set this value\n    //\n    // \"\" (empty String)  -> the operation is supported\n    //\n    // non-empty String   -> the operation is not supported and this value is the  user-facing\n    //                       error message describing why it is not supported\n    pub update_unsupported_error: Option<String>,\n    pub stop_unsupported_error: Option<String>,\n    pub restart_unsupported_error: Option<String>,\n\n    pub proxy_type: String,\n    #[serde(rename = \"MutualTLS\")]\n    pub mutual_tls: bool,\n    pub service_run: bool,\n    pub config_version: String,\n    pub custom_interface: bool,\n    #[serde(rename = \"CustomCAs\")]\n    pub custom_cas: bool,\n\n    pub client_type: String,\n}\n\n#[derive(Serialize, Deserialize, Debug, Clone)]\n#[serde(rename_all = \"PascalCase\")]\npub struct AuthResp {\n    pub version: String,\n    pub client_id: String,\n    #[serde(default)]\n    pub extra: AuthRespExtra,\n}\n\nrpc_req!(Auth, AuthResp, AUTH_REQ);\n\n#[derive(Serialize, Deserialize, Debug, Clone, Default)]\n#[serde(rename_all = \"PascalCase\")]\npub struct AuthRespExtra {\n    pub version: Option<String>,\n    pub region: Option<String>,\n    pub cookie: Option<SecretString>,\n    pub account_name: Option<String>,\n    pub session_duration: Option<i64>,\n    pub plan_name: Option<String>,\n    pub banner: Option<String>,\n}\n\n#[derive(Serialize, Deserialize, Debug, Clone)]\n#[serde(rename_all = \"PascalCase\")]\npub struct Bind<T> {\n    #[serde(rename = \"Id\")]\n    pub client_id: String,\n    pub proto: String,\n    pub forwards_to: String,\n    pub forwards_proto: String,\n    pub opts: T,\n    pub extra: BindExtra,\n}\n\n#[derive(Debug, Clone)]\n// allowing this since these aren't persistent values.\n#[allow(clippy::large_enum_variant)]\npub enum BindOpts {\n    Http(HttpEndpoint),\n    Tcp(TcpEndpoint),\n    Tls(TlsEndpoint),\n}\n\n#[derive(Serialize, Deserialize, Debug, Clone, Default)]\n#[serde(rename_all = \"PascalCase\")]\npub struct BindExtra {\n    pub token: SecretString,\n    #[serde(rename = \"IPPolicyRef\")]\n    pub ip_policy_ref: String,\n    pub metadata: String,\n    pub bindings: Vec<String>,\n    #[serde(rename = \"PoolingEnabled\")]\n    pub pooling_enabled: bool,\n}\n\n#[derive(Serialize, Deserialize, Debug, Clone)]\n#[serde(rename_all = \"PascalCase\")]\npub struct BindResp<T> {\n    #[serde(rename = \"Id\")]\n    pub client_id: String,\n    #[serde(rename = \"URL\")]\n    pub url: String,\n    pub proto: String,\n    #[serde(rename = \"Opts\")]\n    pub bind_opts: T,\n    pub extra: BindRespExtra,\n}\n\n#[derive(Serialize, Deserialize, Debug, Clone)]\n#[serde(rename_all = \"PascalCase\")]\npub struct BindRespExtra {\n    pub token: SecretString,\n}\n\nrpc_req!(Bind<T>, BindResp<T>, BIND_REQ; T: std::fmt::Debug + Serialize + DeserializeOwned + Clone);\n\n#[derive(Serialize, Deserialize, Debug, Clone)]\n#[serde(rename_all = \"PascalCase\")]\npub struct StartTunnelWithLabel {\n    pub labels: HashMap<String, String>,\n    pub forwards_to: String,\n    pub forwards_proto: String,\n    pub metadata: String,\n}\n\n#[derive(Serialize, Deserialize, Debug, Clone)]\n#[serde(rename_all = \"PascalCase\")]\npub struct StartTunnelWithLabelResp {\n    pub id: String,\n}\n\nrpc_req!(\n    StartTunnelWithLabel,\n    StartTunnelWithLabelResp,\n    BIND_LABELED_REQ\n);\n\n#[derive(Serialize, Deserialize, Debug, Clone)]\n#[serde(rename_all = \"PascalCase\")]\npub struct Unbind {\n    #[serde(rename = \"Id\")]\n    pub client_id: String,\n    // extra: not sure what this field actually contains\n}\n\n#[derive(Serialize, Deserialize, Debug, Clone)]\n#[serde(rename_all = \"PascalCase\")]\npub struct UnbindResp {\n    // extra: not sure what this field actually contains\n}\n\nrpc_req!(Unbind, UnbindResp, UNBIND_REQ);\n\n#[derive(Serialize, Deserialize, Debug, Clone)]\n#[serde(rename_all = \"PascalCase\")]\npub struct ProxyHeader {\n    pub id: String,\n    pub client_addr: String,\n    pub proto: String,\n    pub edge_type: EdgeType,\n    #[serde(rename = \"PassthroughTLS\")]\n    pub passthrough_tls: bool,\n}\n\n#[derive(Error, Debug)]\n#[non_exhaustive]\npub enum ReadHeaderError {\n    #[error(\"error reading proxy header\")]\n    Io(#[from] io::Error),\n    #[error(\"invalid utf-8 in proxy header\")]\n    InvalidUtf8(#[from] FromUtf8Error),\n    #[error(\"invalid proxy header json\")]\n    InvalidHeader(#[from] serde_json::Error),\n}\n\nimpl ProxyHeader {\n    pub async fn read_from_stream(\n        mut stream: impl AsyncRead + Unpin,\n    ) -> Result<Self, ReadHeaderError> {\n        let size = stream.read_i64_le().await?;\n        let mut buf = vec![0u8; size as usize];\n\n        stream.read_exact(&mut buf).await?;\n\n        let header = String::from_utf8(buf)?;\n\n        debug!(?header, \"read header\");\n\n        Ok(serde_json::from_str(&header)?)\n    }\n}\n\n/// The edge type for an incomming connection.\n#[derive(Copy, Clone, Debug, PartialEq, Eq)]\npub enum EdgeType {\n    /// EdgeType Undefined\n    Undefined,\n    /// A TCP Edge\n    Tcp,\n    /// A TLS Edge\n    Tls,\n    /// A HTTPs Edge\n    Https,\n}\n\nimpl FromStr for EdgeType {\n    type Err = ();\n    fn from_str(s: &str) -> Result<Self, Self::Err> {\n        Ok(match s {\n            \"1\" => EdgeType::Tcp,\n            \"2\" => EdgeType::Tls,\n            \"3\" => EdgeType::Https,\n            _ => EdgeType::Undefined,\n        })\n    }\n}\n\nimpl EdgeType {\n    pub(crate) fn as_str(self) -> &'static str {\n        match self {\n            EdgeType::Undefined => \"0\",\n            EdgeType::Tcp => \"1\",\n            EdgeType::Tls => \"2\",\n            EdgeType::Https => \"3\",\n        }\n    }\n}\n\nimpl Serialize for EdgeType {\n    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>\n    where\n        S: serde::Serializer,\n    {\n        serializer.serialize_str(self.as_str())\n    }\n}\n\nstruct EdgeTypeVisitor;\n\nimpl<'de> Visitor<'de> for EdgeTypeVisitor {\n    type Value = EdgeType;\n    fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {\n        formatter.write_str(r#\"\"0\", \"1\", \"2\", or \"3\"\"#)\n    }\n\n    fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>\n    where\n        E: serde::de::Error,\n    {\n        Ok(EdgeType::from_str(v).unwrap())\n    }\n}\n\nimpl<'de> Deserialize<'de> for EdgeType {\n    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>\n    where\n        D: serde::Deserializer<'de>,\n    {\n        deserializer.deserialize_str(EdgeTypeVisitor)\n    }\n}\n\n/// A request from the ngrok dashboard for the agent to stop.\n#[derive(Serialize, Deserialize, Debug, Clone, Copy, Default)]\n#[serde(rename_all = \"PascalCase\")]\npub struct Stop {}\n\n/// Common response structure for all remote commands originating from the ngrok\n/// dashboard.\n#[derive(Serialize, Deserialize, Debug, Clone, Default)]\n#[serde(rename_all = \"PascalCase\")]\npub struct CommandResp {\n    /// The error arising from command handling, if any.\n    #[serde(default, skip_serializing_if = \"Option::is_none\")]\n    pub error: Option<String>,\n}\n\npub type StopResp = CommandResp;\n\nrpc_req!(Stop, StopResp, STOP_REQ);\n\n/// A request from the ngrok dashboard for the agent to restart.\n#[derive(Serialize, Deserialize, Debug, Clone, Copy, Default)]\n#[serde(rename_all = \"PascalCase\")]\npub struct Restart {}\n\npub type RestartResp = CommandResp;\nrpc_req!(Restart, RestartResp, RESTART_REQ);\n\n/// A request from the ngrok dashboard for the agent to update itself.\n#[derive(Serialize, Deserialize, Debug, Clone, Default)]\n#[serde(rename_all = \"PascalCase\")]\npub struct Update {\n    /// The version that the agent is requested to update to.\n    pub version: String,\n    /// Whether or not updating to the same major version is sufficient.\n    pub permit_major_version: bool,\n}\n\n/// A request from remote to stop a tunnel\n#[derive(Serialize, Deserialize, Debug, Clone, Default)]\n#[serde(rename_all = \"PascalCase\")]\npub struct StopTunnel {\n    /// The id of the tunnel to stop\n    #[serde(rename = \"Id\")]\n    pub client_id: String,\n    /// The message on why this tunnel was stopped\n    pub message: String,\n    /// An optional ngrok error code\n    pub error_code: Option<String>,\n}\n\npub type UpdateResp = CommandResp;\nrpc_req!(Update, UpdateResp, UPDATE_REQ);\n\n/// The version of [PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt)\n/// to use with this tunnel.\n///\n/// [ProxyProto::None] disables PROXY protocol support.\n#[derive(Debug, Copy, Clone, Default, Eq, PartialEq)]\npub enum ProxyProto {\n    /// No PROXY protocol\n    #[default]\n    None,\n    /// PROXY protocol v1\n    V1,\n    /// PROXY protocol v2\n    V2,\n}\n\nimpl From<ProxyProto> for i64 {\n    fn from(other: ProxyProto) -> Self {\n        use ProxyProto::*;\n        match other {\n            None => 0,\n            V1 => 1,\n            V2 => 2,\n        }\n    }\n}\n\nimpl From<i64> for ProxyProto {\n    fn from(other: i64) -> Self {\n        use ProxyProto::*;\n        match other {\n            1 => V1,\n            2 => V2,\n            _ => None,\n        }\n    }\n}\n\n#[derive(Debug, Clone, Error)]\n#[error(\"invalid proxyproto string: {}\", .0)]\npub struct InvalidProxyProtoString(String);\n\nimpl FromStr for ProxyProto {\n    type Err = InvalidProxyProtoString;\n    fn from_str(s: &str) -> Result<Self, Self::Err> {\n        use ProxyProto::*;\n        Ok(match s {\n            \"\" => None,\n            \"1\" => V1,\n            \"2\" => V2,\n            _ => return Err(InvalidProxyProtoString(s.into())),\n        })\n    }\n}\n\nimpl Serialize for ProxyProto {\n    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>\n    where\n        S: serde::Serializer,\n    {\n        serializer.serialize_i64(i64::from(*self))\n    }\n}\n\nstruct ProxyProtoVisitor;\n\nimpl<'de> Visitor<'de> for ProxyProtoVisitor {\n    type Value = ProxyProto;\n    fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {\n        formatter.write_str(\"0, 1, or 2\")\n    }\n\n    fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E>\n    where\n        E: serde::de::Error,\n    {\n        Ok(ProxyProto::from(v))\n    }\n\n    fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E>\n    where\n        E: serde::de::Error,\n    {\n        Ok(ProxyProto::from(v as i64))\n    }\n}\n\nimpl<'de> Deserialize<'de> for ProxyProto {\n    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>\n    where\n        D: serde::Deserializer<'de>,\n    {\n        deserializer.deserialize_i64(ProxyProtoVisitor)\n    }\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone)]\n#[serde(untagged)]\npub enum PolicyWrapper {\n    #[serde(serialize_with = \"serialize_policy\")]\n    Policy(Policy),\n    String(String),\n}\n\nimpl From<String> for PolicyWrapper {\n    fn from(value: String) -> Self {\n        PolicyWrapper::String(value)\n    }\n}\n\n#[derive(Serialize, Deserialize, Debug, Clone, Default)]\n#[serde(rename_all = \"PascalCase\")]\npub struct HttpEndpoint {\n    #[serde(default)]\n    pub domain: String,\n    pub hostname: String,\n    pub auth: String,\n    pub subdomain: String,\n    pub host_header_rewrite: bool,\n    pub local_url_scheme: Option<String>,\n    pub proxy_proto: ProxyProto,\n\n    pub compression: Option<Compression>,\n    pub circuit_breaker: Option<CircuitBreaker>,\n    #[serde(rename = \"IPRestriction\")]\n    pub ip_restriction: Option<IpRestriction>,\n    pub basic_auth: Option<BasicAuth>,\n    #[serde(rename = \"OAuth\")]\n    pub oauth: Option<Oauth>,\n    #[serde(rename = \"OIDC\")]\n    pub oidc: Option<Oidc>,\n    pub webhook_verification: Option<WebhookVerification>,\n    #[serde(rename = \"MutualTLSCA\")]\n    pub mutual_tls_ca: Option<MutualTls>,\n    #[serde(default)]\n    pub request_headers: Option<Headers>,\n    #[serde(default)]\n    pub response_headers: Option<Headers>,\n    #[serde(rename = \"WebsocketTCPConverter\")]\n    pub websocket_tcp_converter: Option<WebsocketTcpConverter>,\n    #[serde(rename = \"UserAgentFilter\")]\n    pub user_agent_filter: Option<UserAgentFilter>,\n    #[serde(rename = \"TrafficPolicy\")]\n    pub traffic_policy: Option<PolicyWrapper>,\n}\n\n#[derive(Debug, Clone, Copy, Serialize, Deserialize)]\npub struct Compression {}\n\nfn is_default<T>(v: &T) -> bool\nwhere\n    T: PartialEq<T> + Default,\n{\n    T::default() == *v\n}\n\n#[derive(Debug, Clone, Copy, Serialize, Deserialize)]\npub struct CircuitBreaker {\n    #[serde(default, skip_serializing_if = \"is_default\")]\n    pub error_threshold: f64,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct BasicAuth {\n    #[serde(default, skip_serializing_if = \"is_default\")]\n    pub credentials: Vec<BasicAuthCredential>,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]\npub struct BasicAuthCredential {\n    pub username: String,\n    #[serde(default, skip_serializing_if = \"is_default\")]\n    pub cleartext_password: String,\n    #[serde(default, skip_serializing_if = \"is_default\")]\n    #[serde(with = \"base64bytes\")]\n    pub hashed_password: Vec<u8>,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct IpRestriction {\n    #[serde(default, skip_serializing_if = \"is_default\")]\n    pub allow_cidrs: Vec<String>,\n    #[serde(default, skip_serializing_if = \"is_default\")]\n    pub deny_cidrs: Vec<String>,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct Oauth {\n    pub provider: String,\n    #[serde(default, skip_serializing_if = \"is_default\")]\n    pub client_id: String,\n    #[serde(default, skip_serializing_if = \"is_default\")]\n    pub client_secret: SecretString,\n    #[serde(default, skip_serializing_if = \"is_default\")]\n    #[serde(with = \"base64bytes\")]\n    pub sealed_client_secret: Vec<u8>,\n    #[serde(default, skip_serializing_if = \"is_default\")]\n    pub allow_emails: Vec<String>,\n    #[serde(default, skip_serializing_if = \"is_default\")]\n    pub allow_domains: Vec<String>,\n    #[serde(default, skip_serializing_if = \"is_default\")]\n    pub scopes: Vec<String>,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct Oidc {\n    pub issuer_url: String,\n    #[serde(default, skip_serializing_if = \"is_default\")]\n    pub client_id: String,\n    #[serde(default, skip_serializing_if = \"is_default\")]\n    pub client_secret: SecretString,\n    #[serde(default, skip_serializing_if = \"is_default\")]\n    #[serde(with = \"base64bytes\")]\n    pub sealed_client_secret: Vec<u8>,\n    #[serde(default, skip_serializing_if = \"is_default\")]\n    pub allow_emails: Vec<String>,\n    #[serde(default, skip_serializing_if = \"is_default\")]\n    pub allow_domains: Vec<String>,\n    #[serde(default, skip_serializing_if = \"is_default\")]\n    pub scopes: Vec<String>,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct WebhookVerification {\n    pub provider: String,\n    #[serde(default, skip_serializing_if = \"is_default\")]\n    pub secret: SecretString,\n    #[serde(default, skip_serializing_if = \"is_default\")]\n    #[serde(with = \"base64bytes\")]\n    pub sealed_secret: Vec<u8>,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct MutualTls {\n    #[serde(default, skip_serializing_if = \"is_default\")]\n    #[serde(with = \"base64bytes\")]\n    // this is snake-case on the wire\n    pub mutual_tls_ca: Vec<u8>,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Headers {\n    #[serde(default, skip_serializing_if = \"is_default\")]\n    pub add: Vec<String>,\n    #[serde(default, skip_serializing_if = \"is_default\")]\n    pub remove: Vec<String>,\n    #[serde(default, skip_serializing_if = \"is_default\")]\n    pub add_parsed: HashMap<String, String>,\n}\n\n#[derive(Debug, Clone, Copy, Serialize, Deserialize)]\npub struct WebsocketTcpConverter {}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct UserAgentFilter {\n    #[serde(default, skip_serializing_if = \"is_default\")]\n    pub allow: Vec<String>,\n    #[serde(default, skip_serializing_if = \"is_default\")]\n    pub deny: Vec<String>,\n}\n\n#[derive(Serialize, Deserialize, Debug, Clone, Default)]\n#[serde(rename_all = \"PascalCase\")]\npub struct TcpEndpoint {\n    pub addr: String,\n    pub proxy_proto: ProxyProto,\n    #[serde(rename = \"IPRestriction\")]\n    pub ip_restriction: Option<IpRestriction>,\n    #[serde(rename = \"TrafficPolicy\")]\n    pub traffic_policy: Option<PolicyWrapper>,\n}\n\n#[derive(Serialize, Deserialize, Debug, Clone, Default)]\n#[serde(rename_all = \"PascalCase\")]\npub struct TlsEndpoint {\n    #[serde(default)]\n    pub domain: String,\n    pub hostname: String,\n    pub subdomain: String,\n    pub proxy_proto: ProxyProto,\n    #[serde(rename = \"MutualTLSAtAgent\")]\n    pub mutual_tls_at_agent: bool,\n\n    #[serde(rename = \"MutualTLSAtEdge\")]\n    pub mutual_tls_at_edge: Option<MutualTls>,\n    #[serde(rename = \"TLSTermination\")]\n    pub tls_termination: Option<TlsTermination>,\n    #[serde(rename = \"IPRestriction\")]\n    pub ip_restriction: Option<IpRestriction>,\n    #[serde(rename = \"TrafficPolicy\")]\n    pub traffic_policy: Option<PolicyWrapper>,\n}\n\n#[derive(Serialize, Deserialize, Debug, Clone, Default)]\npub struct TlsTermination {\n    #[serde(default, with = \"base64bytes\", skip_serializing_if = \"is_default\")]\n    pub cert: Vec<u8>,\n    #[serde(skip_serializing_if = \"is_default\", default)]\n    pub key: SecretBytes,\n    #[serde(default, with = \"base64bytes\", skip_serializing_if = \"is_default\")]\n    pub sealed_key: Vec<u8>,\n}\n\n#[derive(Serialize, Deserialize, Debug, Clone, Default)]\n#[serde(rename_all = \"PascalCase\", default)]\npub struct Policy {\n    pub inbound: Vec<Rule>,\n    pub outbound: Vec<Rule>,\n}\n\n#[derive(Serialize, Deserialize, Debug, Clone, Default)]\n#[serde(rename_all = \"PascalCase\", default)]\npub struct Rule {\n    pub name: String,\n    pub expressions: Vec<String>,\n    pub actions: Vec<Action>,\n}\n\n#[derive(Serialize, Deserialize, Debug, Clone, Default)]\n#[serde(rename_all = \"PascalCase\", default)]\npub struct Action {\n    #[serde(rename = \"Type\")]\n    pub type_: String,\n    #[serde(default, with = \"vec_to_json\", skip_serializing_if = \"is_default\")]\n    pub config: Vec<u8>,\n}\n\n// This function converts a Policy into a valid JSON string. This is used so legacy configurations will still work\n// using the new string \"TrafficPolicy\" field.\nfn serialize_policy<S: Serializer>(v: &Policy, s: S) -> Result<S::Ok, S::Error> {\n    let abc = match serde_json::to_string(v) {\n        Ok(t) => t,\n        Err(_) => {\n            return Err(serde::ser::Error::custom(\n                \"policy could not be converted to valid json\",\n            ))\n        }\n    };\n    s.serialize_str(&abc)\n}\n\n// These are helpers to convert base64 strings to full, real json. The serialize helper also ensures that the resulting\n// representation isn't a string-escaped string.\nmod vec_to_json {\n    use serde::{\n        Deserialize,\n        Deserializer,\n        Serialize,\n        Serializer,\n    };\n\n    pub fn serialize<S: Serializer>(v: &[u8], s: S) -> Result<S::Ok, S::Error> {\n        let u: serde_json::Value = match serde_json::from_slice(v) {\n            Ok(k) => k,\n            Err(_) => return Err(serde::ser::Error::custom(\"Config is invalid JSON\")),\n        };\n\n        u.serialize(s)\n    }\n\n    pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result<Vec<u8>, D::Error> {\n        let s = serde_json::Map::deserialize(d)?;\n        let v = serde_json::to_vec(&s).unwrap();\n        Ok(v)\n    }\n}\n\n// These are helpers to facilitate the Vec<u8> <-> base64-encoded bytes\n// representation that the Go messages use\nmod base64bytes {\n    use base64::prelude::*;\n    use serde::{\n        Deserialize,\n        Deserializer,\n        Serialize,\n        Serializer,\n    };\n\n    pub fn serialize<S: Serializer>(v: &Vec<u8>, s: S) -> Result<S::Ok, S::Error> {\n        BASE64_STANDARD.encode(v).serialize(s)\n    }\n\n    pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result<Vec<u8>, D::Error> {\n        let s = String::deserialize(d)?;\n        BASE64_STANDARD\n            .decode(s.as_bytes())\n            .map_err(serde::de::Error::custom)\n    }\n}\n\n#[cfg(test)]\nmod test {\n\n    use super::*;\n\n    #[test]\n    fn test_proxy_proto_serde() {\n        let input = \"2\";\n\n        let p: ProxyProto = serde_json::from_str(input).unwrap();\n\n        assert!(matches!(p, ProxyProto::V2));\n\n        assert_eq!(serde_json::to_string(&p).unwrap(), \"2\");\n    }\n\n    pub(crate) const POLICY_JSON: &str = r###\"{\"Inbound\":[{\"Name\":\"test_in\",\"Expressions\":[\"req.Method == 'PUT'\"],\"Actions\":[{\"Type\":\"deny\"}]}],\"Outbound\":[{\"Name\":\"test_out\",\"Expressions\":[\"res.StatusCode == '200'\"],\"Actions\":[{\"Type\":\"custom-response\",\"Config\":{\"status_code\":201}}]}]}\"###;\n\n    #[test]\n    fn test_policy_proto_serde() {\n        let policy: Policy = serde_json::from_str(POLICY_JSON).unwrap();\n\n        // mainly just interested in checking outbound, as that has the\n        // special vec serialization\n        assert_eq!(1, policy.outbound.len());\n        let outbound = &policy.outbound[0];\n        assert_eq!(1, outbound.actions.len());\n        let action = &outbound.actions[0];\n        assert_eq!(r#\"{\"status_code\":201}\"#.as_bytes(), action.config);\n\n        assert_eq!(serde_json::to_string(&policy).unwrap(), POLICY_JSON);\n    }\n}\n"
  },
  {
    "path": "ngrok/src/internals/raw_session.rs",
    "content": "use std::{\n    collections::HashMap,\n    fmt::Debug,\n    future::Future,\n    io,\n    ops::{\n        Deref,\n        DerefMut,\n    },\n    sync::Arc,\n};\n\nuse async_trait::async_trait;\nuse muxado::{\n    heartbeat::{\n        HeartbeatConfig,\n        HeartbeatCtl,\n    },\n    typed::{\n        StreamType,\n        TypedAccept,\n        TypedOpenClose,\n        TypedSession,\n        TypedStream,\n    },\n    Error as MuxadoError,\n    SessionBuilder,\n};\nuse serde::{\n    de::DeserializeOwned,\n    Deserialize,\n};\nuse thiserror::Error;\nuse tokio::{\n    io::{\n        AsyncRead,\n        AsyncReadExt,\n        AsyncWrite,\n        AsyncWriteExt,\n    },\n    runtime::Handle,\n};\nuse tokio_util::either::Either;\nuse tracing::{\n    debug,\n    instrument,\n    warn,\n};\n\nuse super::{\n    proto::{\n        Auth,\n        AuthExtra,\n        AuthResp,\n        Bind,\n        BindExtra,\n        BindOpts,\n        BindResp,\n        CommandResp,\n        ErrResp,\n        Error,\n        ProxyHeader,\n        ReadHeaderError,\n        Restart,\n        StartTunnelWithLabel,\n        StartTunnelWithLabelResp,\n        Stop,\n        StopTunnel,\n        Unbind,\n        UnbindResp,\n        Update,\n        PROXY_REQ,\n        RESTART_REQ,\n        STOP_REQ,\n        STOP_TUNNEL_REQ,\n        UPDATE_REQ,\n        VERSION,\n    },\n    rpc::RpcRequest,\n};\nuse crate::{\n    tunnel::AcceptError::ListenerClosed,\n    Session,\n};\n\n/// Errors arising from tunneling protocol RPC calls.\n#[derive(Error, Debug)]\n#[non_exhaustive]\npub enum RpcError {\n    /// Failed to open a new stream to start the RPC call.\n    #[error(\"failed to open muxado stream\")]\n    Open(#[source] MuxadoError),\n    /// Some non-Open transport error occurred\n    #[error(\"transport error\")]\n    Transport(#[source] MuxadoError),\n    /// Failed to send the request over the stream.\n    #[error(\"error sending rpc request\")]\n    Send(#[source] io::Error),\n    /// Failed to read the RPC response from the stream.\n    #[error(\"error reading rpc response\")]\n    Receive(#[source] io::Error),\n    /// The RPC response was invalid.\n    #[error(\"failed to deserialize rpc response\")]\n    InvalidResponse(#[from] serde_json::Error),\n    /// There was an error in the RPC response.\n    #[error(\"rpc error response:\\n{0}\")]\n    Response(ErrResp),\n}\n\nimpl Error for RpcError {\n    fn error_code(&self) -> Option<&str> {\n        match self {\n            RpcError::Response(resp) => resp.error_code(),\n            _ => None,\n        }\n    }\n\n    fn msg(&self) -> String {\n        match self {\n            RpcError::Response(resp) => resp.msg(),\n            _ => format!(\"{self}\"),\n        }\n    }\n}\n\n#[derive(Error, Debug)]\n#[non_exhaustive]\npub enum StartSessionError {\n    #[error(\"failed to start heartbeat task\")]\n    StartHeartbeat(#[from] io::Error),\n}\n\n#[derive(Error, Debug)]\n#[non_exhaustive]\npub enum AcceptError {\n    #[error(\"transport error when accepting connection\")]\n    Transport(#[from] MuxadoError),\n    #[error(transparent)]\n    Header(#[from] ReadHeaderError),\n    #[error(\"invalid stream type: {0}\")]\n    InvalidType(StreamType),\n}\n\npub struct RpcClient {\n    // This is held so that the heartbeat task doesn't get shutdown. Eventually\n    // we may use it to request heartbeats via the `Session`.\n    _heartbeat: HeartbeatCtl,\n    open: Box<dyn TypedOpenClose + Send>,\n}\n\npub struct IncomingStreams {\n    runtime: Handle,\n    handlers: CommandHandlers,\n    pub(crate) session: Option<Session>,\n    accept: Box<dyn TypedAccept + Send>,\n}\n\npub struct RawSession {\n    client: RpcClient,\n    incoming: IncomingStreams,\n}\n\nimpl Deref for RawSession {\n    type Target = RpcClient;\n    fn deref(&self) -> &Self::Target {\n        &self.client\n    }\n}\n\nimpl DerefMut for RawSession {\n    fn deref_mut(&mut self) -> &mut Self::Target {\n        &mut self.client\n    }\n}\n\n/// Trait for a type that can handle a command from the ngrok dashboard.\n#[async_trait]\npub trait CommandHandler<T>: Send + Sync + 'static {\n    /// Handle the remote command.\n    async fn handle_command(&self, req: T) -> Result<(), String>;\n}\n\n#[async_trait]\nimpl<R, T, F> CommandHandler<R> for T\nwhere\n    R: Send + 'static,\n    T: Fn(R) -> F + Send + Sync + 'static,\n    F: Future<Output = Result<(), String>> + Send,\n{\n    async fn handle_command(&self, req: R) -> Result<(), String> {\n        self(req).await\n    }\n}\n\n#[derive(Default, Clone)]\npub struct CommandHandlers {\n    pub on_restart: Option<Arc<dyn CommandHandler<Restart>>>,\n    pub on_update: Option<Arc<dyn CommandHandler<Update>>>,\n    pub on_stop: Option<Arc<dyn CommandHandler<Stop>>>,\n}\n\nimpl RawSession {\n    pub async fn start<S, H>(\n        io_stream: S,\n        heartbeat: HeartbeatConfig,\n        handlers: H,\n    ) -> Result<Self, StartSessionError>\n    where\n        S: AsyncRead + AsyncWrite + Send + 'static,\n        H: Into<Option<CommandHandlers>>,\n    {\n        let mux_sess = SessionBuilder::new(io_stream).start();\n\n        let handlers = handlers.into().unwrap_or_default();\n\n        let typed = muxado::typed::Typed::new(mux_sess);\n        let (heartbeat, hbctl) = muxado::heartbeat::Heartbeat::start(typed, heartbeat).await?;\n        let (open, accept) = heartbeat.split_typed();\n\n        let runtime = Handle::current();\n\n        let sess = RawSession {\n            client: RpcClient {\n                _heartbeat: hbctl,\n                open: Box::new(open),\n            },\n            incoming: IncomingStreams {\n                runtime,\n                handlers,\n                session: None,\n                accept: Box::new(accept),\n            },\n        };\n\n        Ok(sess)\n    }\n\n    pub fn split(self) -> (RpcClient, IncomingStreams) {\n        (self.client, self.incoming)\n    }\n}\n\nimpl RpcClient {\n    #[instrument(level = \"debug\", skip(self))]\n    async fn rpc<R: RpcRequest>(&mut self, req: R) -> Result<R::Response, RpcError> {\n        let mut stream = self\n            .open\n            .open_typed(R::TYPE)\n            .await\n            .map_err(RpcError::Open)?;\n        let s = serde_json::to_string(&req)\n            // This should never happen, since we control the request types and\n            // know that they will always serialize correctly. Just in case\n            // though, call them \"Send\" errors.\n            .map_err(io::Error::other)\n            .map_err(RpcError::Send)?;\n\n        stream\n            .write_all(s.as_bytes())\n            .await\n            .map_err(RpcError::Send)?;\n\n        let mut buf = Vec::new();\n        stream\n            .read_to_end(&mut buf)\n            .await\n            .map_err(RpcError::Receive)?;\n\n        #[derive(Debug, Deserialize)]\n        struct ErrResp {\n            #[serde(rename = \"Error\")]\n            error: String,\n        }\n\n        let ok_resp = serde_json::from_slice::<R::Response>(&buf);\n        let err_resp = serde_json::from_slice::<ErrResp>(&buf);\n\n        if let Ok(err) = err_resp {\n            if !err.error.is_empty() {\n                debug!(?err, \"decoded rpc error response\");\n                return Err(RpcError::Response(err.error.as_str().into()));\n            }\n        }\n\n        debug!(resp = ?ok_resp, \"decoded rpc response\");\n\n        Ok(ok_resp?)\n    }\n\n    /// Close the raw ngrok session with a \"None\" muxado error.\n    pub async fn close(&mut self) -> Result<(), RpcError> {\n        self.open\n            .close(MuxadoError::None, \"\".into())\n            .await\n            .map_err(RpcError::Transport)?;\n        Ok(())\n    }\n\n    #[instrument(level = \"debug\", skip(self))]\n    pub async fn auth(\n        &mut self,\n        id: impl Into<String> + Debug,\n        extra: AuthExtra,\n    ) -> Result<AuthResp, RpcError> {\n        let id = id.into();\n        let req = Auth {\n            client_id: id.clone(),\n            extra,\n            version: VERSION.iter().map(|&x| x.into()).collect(),\n        };\n\n        let resp = self.rpc(req).await?;\n\n        Ok(resp)\n    }\n\n    #[instrument(level = \"debug\", skip(self))]\n    pub async fn listen(\n        &mut self,\n        protocol: impl Into<String> + Debug,\n        opts: BindOpts,\n        extra: BindExtra,\n        id: impl Into<String> + Debug,\n        forwards_to: impl Into<String> + Debug,\n        forwards_proto: impl Into<String> + Debug,\n    ) -> Result<BindResp<BindOpts>, RpcError> {\n        // Sorry, this is awful. Serde untagged unions are pretty fraught and\n        // hard to debug, so we're using this macro to specialize this call\n        // based on the enum variant. It drops down to the type wrapped in the\n        // enum for the actual request/response, and then re-wraps it on the way\n        // back out in the same variant.\n        // It's probably an artifact of the go -> rust translation, and could be\n        // fixed with enough refactoring and rearchitecting. But it works well\n        // enough for now and is pretty localized.\n        macro_rules! match_variant {\n            ($v:expr, $($var:tt),*) => {\n                match opts {\n                    $(BindOpts::$var (opts) => {\n                        let req = Bind {\n                            client_id: id.into(),\n                            proto: protocol.into(),\n                            forwards_to: forwards_to.into(),\n                            forwards_proto: forwards_proto.into(),\n                            opts,\n                            extra,\n                        };\n\n                        let resp = self.rpc(req).await?;\n                        BindResp {\n                            bind_opts: BindOpts::$var(resp.bind_opts),\n                            client_id: resp.client_id,\n                            url: resp.url,\n                            extra: resp.extra,\n                            proto: resp.proto,\n                        }\n                    })*\n                }\n            };\n        }\n        Ok(match_variant!(opts, Http, Tcp, Tls))\n    }\n\n    #[instrument(level = \"debug\", skip(self))]\n    pub async fn listen_label(\n        &mut self,\n        labels: HashMap<String, String>,\n        metadata: impl Into<String> + Debug,\n        forwards_to: impl Into<String> + Debug,\n        forwards_proto: impl Into<String> + Debug,\n    ) -> Result<StartTunnelWithLabelResp, RpcError> {\n        let req = StartTunnelWithLabel {\n            labels,\n            metadata: metadata.into(),\n            forwards_to: forwards_to.into(),\n            forwards_proto: forwards_proto.into(),\n        };\n\n        self.rpc(req).await\n    }\n\n    #[instrument(level = \"debug\", skip(self))]\n    pub async fn unlisten(\n        &mut self,\n        id: impl Into<String> + Debug,\n    ) -> Result<UnbindResp, RpcError> {\n        self.rpc(Unbind {\n            client_id: id.into(),\n        })\n        .await\n    }\n}\n\npub const NOT_IMPLEMENTED: &str = \"the agent has not defined a callback for this operation\";\n\nasync fn read_req<T>(stream: &mut TypedStream) -> Result<T, Either<io::Error, serde_json::Error>>\nwhere\n    T: DeserializeOwned + Debug + 'static,\n{\n    debug!(\"reading request from stream\");\n    let mut buf = vec![];\n    let req = serde_json::from_value(loop {\n        let mut tmp = vec![0u8; 256];\n        let bytes = stream.read(&mut tmp).await.map_err(Either::Left)?;\n        buf.extend_from_slice(&tmp[..bytes]);\n\n        if let Ok(obj) = serde_json::from_slice::<serde_json::Value>(&buf) {\n            break obj;\n        }\n    })\n    .map_err(Either::Right)?;\n    debug!(?req, \"read request from stream\");\n    Ok(req)\n}\n\nasync fn handle_req<T>(\n    handler: Option<Arc<dyn CommandHandler<T>>>,\n    mut stream: TypedStream,\n) -> Result<(), Either<io::Error, serde_json::Error>>\nwhere\n    T: DeserializeOwned + Debug + 'static,\n{\n    let res = async {\n        let req = read_req(&mut stream).await?;\n        let resp = if let Some(handler) = handler {\n            debug!(\"running command handler\");\n            handler.handle_command(req).await.err()\n        } else {\n            Some(NOT_IMPLEMENTED.into())\n        };\n\n        debug!(?resp, \"writing response to stream\");\n\n        let resp_json = serde_json::to_vec(&CommandResp { error: resp }).map_err(Either::Right)?;\n\n        stream\n            .write_all(resp_json.as_slice())\n            .await\n            .map_err(Either::Left)?;\n\n        Ok(())\n    }\n    .await;\n\n    if let Err(e) = &res {\n        warn!(?e, \"error when handling dashboard command\");\n    }\n\n    res\n}\n\nimpl IncomingStreams {\n    pub async fn accept(&mut self) -> Result<TunnelStream, AcceptError> {\n        Ok(loop {\n            let mut stream = self.accept.accept_typed().await?;\n\n            match stream.typ() {\n                RESTART_REQ => {\n                    self.runtime\n                        .spawn(handle_req(self.handlers.on_restart.clone(), stream));\n                }\n                UPDATE_REQ => {\n                    self.runtime\n                        .spawn(handle_req(self.handlers.on_update.clone(), stream));\n                }\n                STOP_REQ => {\n                    self.runtime\n                        .spawn(handle_req(self.handlers.on_stop.clone(), stream));\n                }\n                STOP_TUNNEL_REQ => {\n                    // close the tunnel through the session\n                    if let Some(session) = &self.session {\n                        let req =\n                            read_req::<StopTunnel>(&mut stream)\n                                .await\n                                .map_err(|e| match e {\n                                    Either::Left(err) => ReadHeaderError::from(err),\n                                    Either::Right(err) => ReadHeaderError::from(err),\n                                })?;\n                        session\n                            .close_tunnel_with_error(\n                                req.client_id,\n                                ListenerClosed {\n                                    message: req.message,\n                                    error_code: req.error_code,\n                                },\n                            )\n                            .await;\n                    }\n                }\n                PROXY_REQ => {\n                    let header = ProxyHeader::read_from_stream(&mut *stream).await?;\n\n                    break TunnelStream { header, stream };\n                }\n                t => return Err(AcceptError::InvalidType(t)),\n            }\n        })\n    }\n}\n\npub struct TunnelStream {\n    pub header: ProxyHeader,\n    pub stream: TypedStream,\n}\n"
  },
  {
    "path": "ngrok/src/internals/rpc.rs",
    "content": "use std::fmt::Debug;\n\nuse muxado::typed::StreamType;\nuse serde::{\n    de::DeserializeOwned,\n    Serialize,\n};\n\npub trait RpcRequest: Serialize + Debug {\n    type Response: DeserializeOwned + Debug;\n    const TYPE: StreamType;\n}\n\nmacro_rules! rpc_req {\n    ($req:ty, $resp:ty, $typ:expr; $($t:tt)*) => {\n        impl <$($t)*> $crate::internals::rpc::RpcRequest for $req\n        {\n            type Response = $resp;\n            const TYPE: StreamType = $typ;\n        }\n    };\n    ($req:ty, $resp:ty, $typ:expr) => {\n        impl $crate::internals::rpc::RpcRequest for $req {\n            type Response = $resp;\n            const TYPE: StreamType = $typ;\n        }\n    };\n}\n"
  },
  {
    "path": "ngrok/src/lib.rs",
    "content": "#![doc = include_str!(\"../README.md\")]\n#![warn(missing_docs)]\n#![cfg_attr(docsrs, feature(doc_cfg))]\n\nmod internals {\n    #[macro_use]\n    pub mod rpc;\n    pub mod proto;\n    pub mod raw_session;\n}\n\n/// Tunnel and endpoint configuration types.\npub mod config {\n    #[macro_use]\n    mod common;\n    pub use common::*;\n\n    mod headers;\n    mod http;\n    pub use self::http::*;\n    mod labeled;\n    pub use labeled::*;\n    mod oauth;\n    pub use oauth::*;\n    mod oidc;\n    pub use policies::*;\n    mod policies;\n    pub use oidc::*;\n    mod tcp;\n    pub use tcp::*;\n    mod tls;\n    pub use tls::*;\n    mod webhook_verification;\n}\n\nmod proxy_proto;\n\n/// Types for working with the ngrok session.\npub mod session;\n/// Types for working with ngrok tunnels.\npub mod tunnel;\n\n/// Types for working with ngrok connections.\npub mod conn;\n\n/// Types for working with connection forwarders.\npub mod forwarder;\nmod tunnel_ext;\n\n#[doc(inline)]\npub use conn::{\n    Conn,\n    EdgeConn,\n    EndpointConn,\n};\n#[doc(inline)]\npub use internals::proto::Error;\n#[doc(inline)]\npub use session::Session;\n#[doc(inline)]\npub use tunnel::Tunnel;\n\n/// A prelude of traits for working with ngrok types.\npub mod prelude {\n    #[allow(deprecated)]\n    #[doc(inline)]\n    pub use crate::{\n        config::{\n            Action,\n            ForwarderBuilder,\n            HttpTunnelBuilder,\n            InvalidPolicy,\n            LabeledTunnelBuilder,\n            OauthOptions,\n            OidcOptions,\n            Policy,\n            ProxyProto,\n            Rule,\n            Scheme,\n            TcpTunnelBuilder,\n            TlsTunnelBuilder,\n            TunnelBuilder,\n        },\n        conn::{\n            Conn,\n            ConnInfo,\n            EdgeConnInfo,\n            EndpointConnInfo,\n        },\n        internals::proto::EdgeType,\n        internals::proto::Error,\n        tunnel::{\n            EdgeInfo,\n            EndpointInfo,\n            Tunnel,\n            TunnelCloser,\n            TunnelInfo,\n        },\n        tunnel_ext::TunnelExt,\n    };\n}\n\n#[cfg(all(test, feature = \"online-tests\"))]\nmod online_tests;\n"
  },
  {
    "path": "ngrok/src/online_tests.rs",
    "content": "use std::{\n    convert::Infallible,\n    error::Error,\n    io,\n    io::prelude::*,\n    net::SocketAddr,\n    str::FromStr,\n    sync::{\n        atomic::{\n            AtomicUsize,\n            Ordering,\n        },\n        Arc,\n    },\n    time::Duration,\n};\n\nuse anyhow::anyhow;\nuse axum::{\n    routing::get,\n    BoxError,\n    Router,\n};\nuse bytes::Bytes;\nuse flate2::read::GzDecoder;\nuse futures::{\n    channel::oneshot,\n    prelude::*,\n    stream::FuturesUnordered,\n    TryStreamExt,\n};\nuse futures_rustls::rustls::{\n    pki_types,\n    ClientConfig,\n    RootCertStore,\n};\n// use native_tls;\nuse hyper::{\n    body::Incoming,\n    HeaderMap,\n    Request,\n    Uri,\n};\nuse hyper_util::{\n    rt::TokioExecutor,\n    server,\n};\nuse once_cell::sync::Lazy;\nuse paste::paste;\nuse proxy_protocol::ProxyHeader;\nuse rand::{\n    distributions::Alphanumeric,\n    thread_rng,\n    Rng,\n};\nuse reqwest::{\n    header,\n    StatusCode,\n};\nuse tokio::{\n    io::{\n        AsyncReadExt,\n        AsyncWriteExt,\n    },\n    net::TcpStream,\n    sync::mpsc,\n    test,\n};\nuse tokio_tungstenite::{\n    connect_async,\n    tungstenite::Message,\n};\nuse tokio_util::compat::*;\nuse tower::{\n    util::ServiceExt,\n    Service,\n};\nuse tracing_test::traced_test;\nuse url::Url;\n\nuse crate::{\n    prelude::*,\n    session::{\n        SessionBuilder,\n        CERT_BYTES,\n    },\n    Session,\n};\n\nasync fn setup_session() -> Result<Session, BoxError> {\n    Ok(Session::builder().authtoken_from_env().connect().await?)\n}\n\n#[cfg_attr(not(feature = \"online-tests\"), ignore)]\n#[test]\nasync fn listen() -> Result<(), BoxError> {\n    let _ = Session::builder()\n        .authtoken_from_env()\n        .connect()\n        .await?\n        .http_endpoint()\n        .listen()\n        .await?;\n    Ok(())\n}\n\n#[cfg_attr(not(feature = \"online-tests\"), ignore)]\n#[test]\nasync fn tunnel() -> Result<(), BoxError> {\n    let tun = setup_session()\n        .await?\n        .http_endpoint()\n        .metadata(\"Hello, world!\")\n        .forwards_to(\"some application\")\n        .listen()\n        .await?;\n\n    assert_eq!(\"Hello, world!\", tun.metadata());\n    assert_eq!(\"some application\", tun.forwards_to());\n\n    Ok(())\n}\n\nstruct TunnelGuard {\n    tx: Option<oneshot::Sender<()>>,\n    url: String,\n}\n\nimpl Drop for TunnelGuard {\n    fn drop(&mut self) {\n        let _ = self.tx.take().unwrap().send(());\n    }\n}\n\n// Spawn an http server using the provided session and tunnel options, and an\n// axum router.\n// The returned guard, when dropped, will cause the server to shut down.\nasync fn serve_http(\n    build_session: impl FnOnce(&mut SessionBuilder) -> &mut SessionBuilder,\n    build_tunnel: impl FnOnce(&mut HttpTunnelBuilder) -> &mut HttpTunnelBuilder,\n    router: axum::Router,\n) -> Result<TunnelGuard, BoxError> {\n    let sess = build_session(Session::builder().authtoken_from_env())\n        .connect()\n        .await?;\n\n    let tun = build_tunnel(&mut sess.http_endpoint()).listen().await?;\n\n    Ok(start_http_server(tun, router))\n}\n\nfn start_http_server<T>(mut tun: T, router: Router) -> TunnelGuard\nwhere\n    T: EndpointInfo + Tunnel + 'static,\n    T::Conn: crate::tunnel_ext::ConnExt,\n{\n    let url = tun.url().into();\n\n    let (tx, rx) = oneshot::channel::<()>();\n\n    let mut make_service = router.into_make_service_with_connect_info::<SocketAddr>();\n\n    let server = async move {\n        while let Some(conn) = tun.try_next().await? {\n            let remote_addr = conn.remote_addr();\n            let tower_service = unwrap_infallible(make_service.call(remote_addr).await);\n\n            tokio::spawn(async move {\n                let hyper_service =\n                    hyper::service::service_fn(move |request: Request<Incoming>| {\n                        tower_service.clone().oneshot(request)\n                    });\n\n                if let Err(err) = server::conn::auto::Builder::new(TokioExecutor::new())\n                    .serve_connection_with_upgrades(conn, hyper_service)\n                    .await\n                {\n                    eprintln!(\"failed to serve connection: {err:#}\");\n                }\n            });\n        }\n        Ok::<(), BoxError>(())\n    };\n\n    tokio::spawn(futures::future::select(Box::pin(server), rx));\n    TunnelGuard { tx: tx.into(), url }\n}\n\nfn defaults<T>(opts: &mut T) -> &mut T {\n    opts\n}\n\nfn hello_router() -> Router {\n    Router::new().route(\"/\", get(|| async { \"Hello, world!\" }))\n}\n\nasync fn check_body(url: impl AsRef<str>, expected: impl AsRef<str>) -> Result<(), BoxError> {\n    let body: String = reqwest::get(url.as_ref()).await?.text().await?;\n    assert_eq!(body, expected.as_ref());\n    Ok(())\n}\n\n#[cfg_attr(not(feature = \"online-tests\"), ignore)]\n#[test]\nasync fn https() -> Result<(), BoxError> {\n    let tun = serve_http(defaults, defaults, hello_router()).await?;\n    let url = tun.url.as_str();\n\n    assert!(url.starts_with(\"https://\"));\n\n    check_body(url, \"Hello, world!\").await?;\n\n    Ok(())\n}\n\n#[cfg_attr(not(feature = \"online-tests\"), ignore)]\n#[test]\nasync fn http() -> Result<(), BoxError> {\n    let tun = serve_http(defaults, |tun| tun.scheme(Scheme::HTTP), hello_router()).await?;\n    let url = tun.url.as_str();\n\n    assert!(url.starts_with(\"http://\"));\n\n    check_body(url, \"Hello, world!\").await?;\n\n    Ok(())\n}\n\n#[cfg_attr(not(feature = \"paid-tests\"), ignore)]\n#[test]\nasync fn http_compression() -> Result<(), BoxError> {\n    let tun = serve_http(defaults, |tun| tun.compression(), hello_router()).await?;\n    let url = tun.url.as_str();\n\n    let client = reqwest::Client::new();\n    let resp = client\n        .get(url)\n        .header(header::ACCEPT_ENCODING, \"gzip\")\n        .send()\n        .await?;\n\n    assert_eq!(\n        resp.headers().get(header::CONTENT_ENCODING).unwrap(),\n        \"gzip\"\n    );\n\n    let body_bytes = resp.bytes().await?;\n\n    let mut decoder = GzDecoder::new(&*body_bytes);\n    let mut body_string = String::new();\n    decoder.read_to_string(&mut body_string).unwrap();\n\n    assert_eq!(body_string, \"Hello, world!\");\n\n    Ok(())\n}\n\n#[cfg_attr(not(feature = \"paid-tests\"), ignore)]\n#[test]\nasync fn http_headers() -> Result<(), BoxError> {\n    let (tx, mut rx) = mpsc::channel::<BoxError>(16);\n    // For some reason, the hyper machinery keeps a clone of the `tx`, which\n    // causes it to never look closed, even when we drop the tunnel guard, which\n    // shuts down the hyper server. Maybe a leaked task? Work around it by\n    // keeping only one RAII tx here, and only give the handler a weak ref to\n    // it.\n    let weak = tx.downgrade();\n    let handler = move |headers: HeaderMap| async move {\n        let tx = weak\n            .upgrade()\n            .expect(\"no more requests after server shutdown\");\n\n        if let Some(bar) = headers.get(\"foo\") {\n            if bar != \"bar\" {\n                let _ = tx\n                    .send(format!(\"unexpected value for 'foo' request header: {:?}\", bar).into())\n                    .await;\n            }\n        } else {\n            let _ = tx.send(\"missing 'foo' request header\".into()).await;\n        }\n        if headers.get(\"baz\").is_some() {\n            let _ = tx.send(\"got 'baz' request header\".into()).await;\n        }\n\n        ([(\"python\", \"lolnope\")], \"Hello, world!\")\n    };\n    let tun = serve_http(\n        defaults,\n        |tun| {\n            tun.request_header(\"foo\", \"bar\")\n                .remove_request_header(\"baz\")\n                .response_header(\"spam\", \"eggs\")\n                .remove_response_header(\"python\")\n        },\n        Router::new().route(\"/\", get(handler)),\n    )\n    .await?;\n    let url = &tun.url;\n\n    let client = reqwest::Client::new();\n    let resp = client.get(url).header(\"baz\", \"bad header\").send().await?;\n\n    assert_eq!(\n        resp.headers()\n            .get(\"spam\")\n            .expect(\"'spam' header should exist\"),\n        \"eggs\"\n    );\n    assert!(resp.headers().get(\"python\").is_none(),);\n\n    drop(tun);\n    drop(tx);\n\n    if let Some(err) = rx.recv().await {\n        return Err(err);\n    }\n\n    Ok(())\n}\n\n#[traced_test]\n#[cfg_attr(not(feature = \"authenticated-tests\"), ignore)]\n#[test]\nasync fn user_agent() -> Result<(), BoxError> {\n    let tun = serve_http(\n        defaults,\n        |tun| tun.allow_user_agent(\"foo.*\").deny_user_agent(\".*\"),\n        hello_router(),\n    )\n    .await?;\n\n    let client = reqwest::Client::new();\n    let resp = client.get(&tun.url).send().await?;\n    assert_eq!(resp.status(), StatusCode::FORBIDDEN);\n\n    let client = reqwest::Client::builder()\n        .user_agent(\"foobarbaz\")\n        .build()\n        .expect(\"build reqwest client\");\n\n    let resp = client.get(&tun.url).send().await?;\n    assert_eq!(resp.status(), StatusCode::OK);\n    assert_eq!(resp.text().await?, \"Hello, world!\");\n\n    Ok(())\n}\n\n#[traced_test]\n#[cfg_attr(not(feature = \"paid-tests\"), ignore)]\n#[test]\nasync fn basic_auth() -> Result<(), BoxError> {\n    let tun = serve_http(\n        defaults,\n        |tun| tun.basic_auth(\"user\", \"foobarbaz\"),\n        hello_router(),\n    )\n    .await?;\n\n    let client = reqwest::Client::new();\n    let resp = client.get(&tun.url).send().await?;\n    assert_eq!(resp.status(), StatusCode::UNAUTHORIZED);\n\n    let resp = client\n        .get(&tun.url)\n        .basic_auth(\"user\", \"foobarbaz\".into())\n        .send()\n        .await?;\n    assert_eq!(resp.status(), StatusCode::OK);\n    assert_eq!(resp.text().await?, \"Hello, world!\");\n\n    Ok(())\n}\n\n#[traced_test]\n#[cfg_attr(not(feature = \"paid-tests\"), ignore)]\n#[test]\nasync fn oauth() -> Result<(), BoxError> {\n    let tun = serve_http(\n        defaults,\n        |tun| tun.oauth(OauthOptions::new(\"google\")),\n        hello_router(),\n    )\n    .await?;\n\n    let client = reqwest::Client::new();\n    let resp = client.get(&tun.url).send().await?;\n    assert_eq!(resp.status(), StatusCode::OK);\n    let body = resp.text().await?;\n    assert_ne!(body, \"Hello, world!\");\n    assert!(body.contains(\"accounts.google.com\"));\n\n    Ok(())\n}\n\n#[traced_test]\n#[cfg_attr(not(feature = \"paid-tests\"), ignore)]\n#[test]\nasync fn custom_domain() -> Result<(), BoxError> {\n    let mut rng = thread_rng();\n    let subdomain = (0..7)\n        .map(|_| rng.sample(Alphanumeric) as char)\n        .collect::<String>()\n        .to_lowercase();\n    let _tun = serve_http(\n        defaults,\n        |tun| tun.domain(format!(\"{subdomain}.ngrok.io\")),\n        hello_router(),\n    )\n    .await?;\n\n    check_body(format!(\"https://{subdomain}.ngrok.io\"), \"Hello, world!\").await?;\n\n    Ok(())\n}\n\n#[traced_test]\n#[cfg_attr(not(feature = \"paid-tests\"), ignore)]\n#[test]\nasync fn policy() -> Result<(), BoxError> {\n    let tun = serve_http(\n        defaults,\n        |tun| tun.policy(create_policy()).unwrap(),\n        hello_router(),\n    )\n    .await?;\n\n    let client = reqwest::Client::new();\n    let resp = client.get(&tun.url).send().await?;\n    assert_eq!(resp.status(), 222);\n\n    Ok(())\n}\n\nfn create_policy() -> Result<Policy, InvalidPolicy> {\n    Ok(Policy::new()\n        .add_inbound(\n            Rule::new(\"deny_put\")\n                .add_expression(\"req.Method == 'PUT'\")\n                .add_action(Action::new(\"deny\", None)?),\n        )\n        .add_outbound(\n            Rule::new(\"222_response\")\n                .add_expression(\"res.StatusCode == '200'\")\n                .add_action(Action::new(\n                    \"custom-response\",\n                    Some(\"{\\\"status_code\\\": 222}\"),\n                )?),\n        )\n        .to_owned())\n}\n\n#[traced_test]\n#[cfg_attr(not(all(feature = \"paid-tests\", feature = \"long-tests\")), ignore)]\n#[test]\nasync fn circuit_breaker() -> Result<(), BoxError> {\n    let ctr = Arc::new(AtomicUsize::new(0));\n    let tun = serve_http(\n        defaults,\n        |tun| tun.circuit_breaker(0.01),\n        Router::new().route(\n            \"/\",\n            get({\n                let ctr = ctr.clone();\n                move || {\n                    ctr.fetch_add(1, Ordering::SeqCst);\n                    async { hyper::StatusCode::INTERNAL_SERVER_ERROR }\n                }\n            }),\n        ),\n    )\n    .await?;\n\n    let mut attempts = 0;\n    for _ in 0..20 {\n        let mut futs = FuturesUnordered::new();\n        // smaller batches to have less in-flight requests and break sooner\n        for _ in 0..25 {\n            attempts += 1;\n            let url = tun.url.clone();\n            futs.push(async move {\n                let resp = reqwest::get(url).await?;\n                let status = resp.status();\n                tracing::debug!(?status);\n                Result::<_, BoxError>::Ok(resp.status())\n            });\n        }\n        let mut done = false;\n        while let Some(res) = futs.next().await {\n            if res? == StatusCode::SERVICE_UNAVAILABLE {\n                // circuit breaker is working, done after this batch\n                done = true;\n            }\n        }\n        if done {\n            break;\n        }\n    }\n\n    // validate that some, but not all, requests were dropped\n    let actual = ctr.load(Ordering::SeqCst);\n    assert!(actual > 4, \"expected > 4 requests, got {actual}\");\n    assert!(\n        actual < attempts,\n        \"expected < {attempts} requests, got {actual}\"\n    );\n\n    Ok(())\n}\n\n// Shamelessly ripped from stackoverflow:\n// https://stackoverflow.com/questions/35901547/how-can-i-find-a-subsequence-in-a-u8-slice\nfn find_subsequence<T>(haystack: &[T], needle: &[T]) -> Option<usize>\nwhere\n    for<'a> &'a [T]: PartialEq,\n{\n    haystack\n        .windows(needle.len())\n        .position(|window| window == needle)\n}\n\nmacro_rules! proxy_proto_test {\n    (genone: $ept:ident, $vers:ident, $tun:ident, $req:expr, $cont:expr) => {\n        paste! {\n            #[traced_test]\n            #[cfg_attr(not(feature = \"paid-tests\"), ignore)]\n            #[test]\n            #[allow(non_snake_case)]\n            async fn [<proxy_proto_ $ept _ $vers>]() -> Result<(), BoxError> {\n                let sess = Session::builder().authtoken_from_env().connect().await?;\n                let mut $tun = sess\n                    .[<$ept _endpoint>]()\n                    .proxy_proto(ProxyProto::$vers).listen().await?;\n\n                let req = $req;\n                tokio::spawn(req);\n\n\n                let mut buf = vec![0u8; 12];\n                let mut conn = $tun\n                    .try_next()\n                    .await?\n                    .ok_or_else(|| anyhow!(\"tunnel closed\"))?;\n\n                conn.read_exact(&mut buf).await?;\n\n                assert!(find_subsequence(&buf, $cont).is_some());\n\n                Ok(())\n            }\n        }\n    };\n\n    ($vers:ident, $ex:expr, [$(($ept:ident, |$tun:ident| $req:expr)),*]) => {\n        $(\n            proxy_proto_test!(genone: $ept, $vers, $tun, $req, $ex);\n        )*\n    };\n\n    ([$(($vers:ident, $ex:expr)),*] $rest:tt) => {\n        $(\n            proxy_proto_test!($vers, $ex, $rest);\n        )*\n    };\n}\n\nproxy_proto_test!(\n    [(V1, &b\"PROXY TCP\"[..]), (V2, &b\"\\x0D\\x0A\\x0D\\x0A\\x00\\x0D\\x0A\\x51\\x55\\x49\\x54\\x0A\"[..])]\n    [\n        (http, |tun| {\n            reqwest::get(tun.url().to_string())\n        }),\n        (tcp, |tun| {\n            reqwest::get(tun.url().to_string().replacen(\"tcp\", \"http\", 1))\n        })\n    ]\n);\n\n#[traced_test]\n#[test]\n#[cfg_attr(not(feature = \"paid-tests\"), ignore)]\nasync fn http_ip_restriction() -> Result<(), BoxError> {\n    let tun = serve_http(\n        defaults,\n        |tun| tun.allow_cidr(\"127.0.0.1/32\").deny_cidr(\"0.0.0.0/0\"),\n        hello_router(),\n    )\n    .await?;\n\n    let resp = reqwest::get(&tun.url).await?;\n\n    assert_eq!(resp.status(), StatusCode::FORBIDDEN);\n\n    Ok(())\n}\n\n#[traced_test]\n#[test]\n#[cfg_attr(not(feature = \"paid-tests\"), ignore)]\nasync fn tcp_ip_restriction() -> Result<(), BoxError> {\n    let tun = Session::builder()\n        .authtoken_from_env()\n        .connect()\n        .await?\n        .tcp_endpoint()\n        .allow_cidr(\"127.0.0.1/32\")\n        .deny_cidr(\"0.0.0.0/0\")\n        .listen()\n        .await?;\n\n    let tun = start_http_server(tun, hello_router());\n\n    let url = tun.url.replacen(\"tcp\", \"http\", 1);\n\n    assert!(reqwest::get(&url).await.is_err());\n\n    Ok(())\n}\n\n#[traced_test]\n#[test]\n#[cfg_attr(not(feature = \"paid-tests\"), ignore)]\nasync fn websocket_conversion() -> Result<(), BoxError> {\n    let mut tun = Session::builder()\n        .authtoken_from_env()\n        .connect()\n        .await?\n        .http_endpoint()\n        .websocket_tcp_conversion()\n        .listen()\n        .await?;\n\n    let url = Uri::from_str(&tun.url().replacen(\"https\", \"wss\", 1))?;\n\n    tokio::spawn(async move {\n        while let Some(mut conn) = tun.try_next().await? {\n            conn.write_all(\"Hello, websockets!\".as_bytes()).await?;\n        }\n        Result::<_, BoxError>::Ok(())\n    });\n\n    let mut wss = connect_async(url).await.expect(\"connect\").0;\n\n    loop {\n        let msg = wss.try_next().await.expect(\"read\").expect(\"message\");\n\n        match msg {\n            Message::Binary(bs) => {\n                assert_eq!(String::from_utf8_lossy(&bs), \"Hello, websockets!\");\n                break;\n            }\n            Message::Text(t) => {\n                assert_eq!(t, \"Hello, websockets!\");\n                break;\n            }\n            Message::Ping(b) => {\n                wss.send(Message::Pong(b)).await?;\n            }\n            Message::Close(_) => {\n                return Err(BoxError::from(\"didn't get message before close\"));\n            }\n            _ => {}\n        }\n    }\n\n    Ok(())\n}\n\n#[traced_test]\n#[test]\n#[cfg_attr(not(feature = \"authenticated-tests\"), ignore)]\nasync fn tcp() -> Result<(), BoxError> {\n    let tun = Session::builder()\n        .authtoken_from_env()\n        .connect()\n        .await?\n        .tcp_endpoint()\n        .listen()\n        .await?;\n\n    let tun = start_http_server(tun, hello_router());\n\n    let url = tun.url.replacen(\"tcp\", \"http\", 1);\n\n    check_body(url, \"Hello, world!\").await?;\n\n    Ok(())\n}\n\nconst CERT: &[u8] = include_bytes!(\"../examples/domain.crt\");\nconst KEY: &[u8] = include_bytes!(\"../examples/domain.key\");\n\n#[traced_test]\n#[test]\n#[cfg_attr(not(feature = \"authenticated-tests\"), ignore)]\nasync fn tls() -> Result<(), BoxError> {\n    let tun = Session::builder()\n        .authtoken_from_env()\n        .connect()\n        .await?\n        .tls_endpoint()\n        .termination(CERT.into(), KEY.into())\n        .listen()\n        .await?;\n\n    let tun = start_http_server(tun, hello_router());\n    let url = tun.url.replacen(\"tls\", \"http\", 1);\n\n    // Create a client with verbose logging and longer timeout\n    let client = reqwest::Client::new();\n\n    let resp = client.get(url.clone()).send().await;\n\n    assert!(resp.is_err());\n\n    let err = resp.err().unwrap();\n\n    // Check if the error is a certificate error\n    let is_certificate_error = if let Some(source) = err.source() {\n        // Try to downcast to hyper_util::client::legacy::Error\n        if let Some(hyper_error) = source.downcast_ref::<hyper_util::client::legacy::Error>() {\n            // Convert the entire error to a string to extract the message\n            let error_str = hyper_error.source().unwrap().to_string();\n\n            error_str.contains(\"certificate\")\n        } else {\n            // If we can't downcast to the specific error type, fall back to string matching\n            let source_str = format!(\"{:?}\", source);\n            assert!(source_str.contains(\"certificate\"));\n            return Ok(());\n        }\n    } else {\n        // If there's no source, return an error\n        return Err(\"No error source found\".into());\n    };\n\n    assert!(is_certificate_error);\n\n    Ok(())\n}\n\n#[test]\n#[cfg_attr(not(feature = \"authenticated-tests\"), ignore)]\nasync fn app_protocol() -> Result<(), BoxError> {\n    let tun = Session::builder()\n        .authtoken_from_env()\n        .connect()\n        .await?\n        .http_endpoint()\n        .app_protocol(\"http2\")\n        .listen_and_forward(\"https://ngrok.com\".parse()?)\n        .await?;\n\n    // smoke test\n    let client = reqwest::Client::new();\n    let resp = client.get(tun.url()).send().await;\n\n    assert!(resp.is_ok());\n\n    Ok(())\n}\n\n#[test]\n#[cfg_attr(not(feature = \"authenticated-tests\"), ignore)]\nasync fn verify_upstream_tls() -> Result<(), BoxError> {\n    let tun = Session::builder()\n        .authtoken_from_env()\n        .connect()\n        .await?\n        .http_endpoint()\n        .verify_upstream_tls(false)\n        .listen_and_forward(\"https://ngrok.com\".parse()?)\n        .await?;\n\n    // smoke test\n    let client = reqwest::Client::new();\n    let resp = client.get(tun.url()).send().await;\n\n    assert!(resp.is_ok());\n\n    Ok(())\n}\n\n#[cfg_attr(not(feature = \"online-tests\"), ignore)]\n#[test]\nasync fn session_root_cas() -> Result<(), BoxError> {\n    // host cannot validate cert\n    let resp = Session::builder()\n        .authtoken_from_env()\n        .root_cas(\"host\")?\n        .connect()\n        .await;\n    assert!(resp.is_err());\n    let err_str = resp.err().unwrap().to_string();\n    tracing::debug!(?err_str);\n    assert!(err_str.contains(\"tls\")); // tls issue\n\n    // default of 'trusted' cannot validate the marketing site\n    let resp = Session::builder()\n        .authtoken_from_env()\n        .server_addr(\"ngrok.com:443\")?\n        .connect()\n        .await;\n    assert!(resp.is_err());\n    let err_str = resp.err().unwrap().to_string();\n    tracing::debug!(?err_str);\n    assert!(err_str.contains(\"tls\")); // tls issue\n\n    // \"host\" certs can validate the marketing site's let's encrypt cert\n    let resp = Session::builder()\n        .authtoken_from_env()\n        .root_cas(\"host\")?\n        .server_addr(\"ngrok.com:443\")?\n        .connect()\n        .await;\n    assert!(resp.is_err());\n    let err_str = resp.err().unwrap().to_string();\n    tracing::debug!(?err_str);\n    assert!(!err_str.contains(\"tls\")); // not a tls problem\n\n    // use the trusted cert, this should connect\n    Session::builder()\n        .authtoken_from_env()\n        .root_cas(\"trusted\")?\n        .connect()\n        .await?;\n\n    // use the default cert, this should connect\n    Session::builder()\n        .authtoken_from_env()\n        .root_cas(\"assets/ngrok.ca.crt\")?\n        .connect()\n        .await?;\n\n    Ok(())\n}\n\n#[cfg_attr(not(feature = \"online-tests\"), ignore)]\n#[test]\nasync fn session_ca_cert() -> Result<(), BoxError> {\n    // invalid cert\n    let resp = Session::builder()\n        .authtoken_from_env()\n        .ca_cert(CERT.into())\n        .connect()\n        .await;\n\n    assert!(resp.is_err());\n    let err_str = resp.err().unwrap().to_string();\n    tracing::debug!(?err_str);\n    assert!(err_str.contains(\"tls\"));\n\n    // use the default cert, this should connect\n    Session::builder()\n        .authtoken_from_env()\n        .ca_cert(CERT_BYTES.into())\n        .connect()\n        .await?;\n\n    Ok(())\n}\n\n#[cfg_attr(not(feature = \"online-tests\"), ignore)]\n#[test]\nasync fn session_tls_config() -> Result<(), BoxError> {\n    let default_tls_config = Session::builder().get_or_create_tls_config();\n\n    // invalid cert, but valid tls_config overrides\n    Session::builder()\n        .authtoken_from_env()\n        .ca_cert(CERT.into())\n        .tls_config(default_tls_config)\n        .connect()\n        .await?;\n\n    Ok(())\n}\n\nfn tls_client_config() -> Result<Arc<ClientConfig>, &'static io::Error> {\n    static CONFIG: Lazy<Result<Arc<ClientConfig>, io::Error>> = Lazy::new(|| {\n        let der_certs = rustls_native_certs::load_native_certs()?\n            .into_iter()\n            .collect::<Vec<_>>();\n        let mut root_store = RootCertStore::empty();\n        root_store.add_parsable_certificates(der_certs);\n        let config = ClientConfig::builder()\n            .with_root_certificates(root_store)\n            .with_no_client_auth();\n        Ok(Arc::new(config))\n    });\n\n    Ok(CONFIG.as_ref()?.clone())\n}\n\n#[traced_test]\n#[test]\nasync fn connect_proxy_http() -> Result<(), BoxError> {\n    let listener = tokio::net::TcpListener::bind(\"127.0.0.1:0\").await?;\n    let addr = listener.local_addr()?;\n    let (tx, mut rx) = mpsc::channel::<u64>(1);\n    let shutdown = tokio_util::sync::CancellationToken::new();\n\n    let ln_shutdown = shutdown.clone();\n    tokio::spawn(async move {\n        let res = connect_proxy::run_proxy(listener, ln_shutdown).await;\n        tx.send(res).await.unwrap();\n    });\n\n    let sess = Session::builder()\n        .authtoken_from_env()\n        .proxy_url(format!(\"http://{addr}\").parse().unwrap())\n        .unwrap()\n        .connect()\n        .await?;\n\n    tracing::debug!(\"{}\", sess.id());\n\n    shutdown.cancel();\n    // verify we got a request\n    let conns = rx.recv().await;\n\n    assert_eq!(Some(1), conns);\n    Ok(())\n}\n\n// connect_proxy contains code for connect_proxy tests\n// This code is adapted from https://github.com/hyperium/hyper/blob/c449528a33d266a8ca1210baca11e5d649ca6c27/examples/http_proxy.rs#L37\n// Used under the terms of the MIT license, Copyright (c) 2014-2025 Sean McArthur\nmod connect_proxy {\n    use bytes::Bytes;\n    use http_body_util::{\n        combinators::BoxBody,\n        BodyExt,\n        Empty,\n        Full,\n    };\n    use hyper::{\n        client::conn::http1::Builder,\n        http,\n        server::conn::http1,\n        service::service_fn,\n        upgrade::Upgraded,\n        Method,\n        Request,\n        Response,\n    };\n    use hyper_util::rt::TokioIo;\n    use tokio::net::TcpStream;\n    use tokio_util::sync::CancellationToken;\n\n    pub async fn run_proxy(listener: tokio::net::TcpListener, shutdown: CancellationToken) -> u64 {\n        // count requests so our caller can test that we received a request\n        let mut req_count = 0;\n        loop {\n            let (stream, _) = match shutdown.run_until_cancelled(listener.accept()).await {\n                None => {\n                    return req_count;\n                }\n                Some(r) => r.unwrap(),\n            };\n            let io = TokioIo::new(stream);\n            req_count += 1;\n\n            tokio::task::spawn(async move {\n                if let Err(err) = http1::Builder::new()\n                    .preserve_header_case(true)\n                    .title_case_headers(true)\n                    .serve_connection(io, service_fn(proxy))\n                    .with_upgrades()\n                    .await\n                {\n                    println!(\"Failed to serve connection: {:?}\", err);\n                }\n            });\n        }\n    }\n\n    async fn proxy(\n        req: Request<hyper::body::Incoming>,\n    ) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {\n        println!(\"req: {:?}\", req);\n\n        if Method::CONNECT == req.method() {\n            // Received an HTTP request like:\n            // ```\n            // CONNECT www.domain.com:443 HTTP/1.1\n            // Host: www.domain.com:443\n            // Proxy-Connection: Keep-Alive\n            // ```\n            //\n            // When HTTP method is CONNECT we should return an empty body\n            // then we can eventually upgrade the connection and talk a new protocol.\n            //\n            // Note: only after client received an empty body with STATUS_OK can the\n            // connection be upgraded, so we can't return a response inside\n            // `on_upgrade` future.\n            if let Some(addr) = host_addr(req.uri()) {\n                tokio::task::spawn(async move {\n                    match hyper::upgrade::on(req).await {\n                        Ok(upgraded) => {\n                            if let Err(e) = tunnel(upgraded, addr).await {\n                                eprintln!(\"server io error: {}\", e);\n                            };\n                        }\n                        Err(e) => eprintln!(\"upgrade error: {}\", e),\n                    }\n                });\n\n                Ok(Response::new(empty()))\n            } else {\n                eprintln!(\"CONNECT host is not socket addr: {:?}\", req.uri());\n                let mut resp = Response::new(full(\"CONNECT must be to a socket address\"));\n                *resp.status_mut() = http::StatusCode::BAD_REQUEST;\n\n                Ok(resp)\n            }\n        } else {\n            let host = req.uri().host().expect(\"uri has no host\");\n            let port = req.uri().port_u16().unwrap_or(80);\n\n            let stream = TcpStream::connect((host, port)).await.unwrap();\n            let io = TokioIo::new(stream);\n\n            let (mut sender, conn) = Builder::new()\n                .preserve_header_case(true)\n                .title_case_headers(true)\n                .handshake(io)\n                .await?;\n            tokio::task::spawn(async move {\n                if let Err(err) = conn.await {\n                    println!(\"Connection failed: {:?}\", err);\n                }\n            });\n\n            let resp = sender.send_request(req).await?;\n            Ok(resp.map(|b| b.boxed()))\n        }\n    }\n\n    fn host_addr(uri: &http::Uri) -> Option<String> {\n        uri.authority().map(|auth| auth.to_string())\n    }\n\n    fn empty() -> BoxBody<Bytes, hyper::Error> {\n        Empty::<Bytes>::new()\n            .map_err(|never| match never {})\n            .boxed()\n    }\n\n    fn full<T: Into<Bytes>>(chunk: T) -> BoxBody<Bytes, hyper::Error> {\n        Full::new(chunk.into())\n            .map_err(|never| match never {})\n            .boxed()\n    }\n\n    // Create a TCP connection to host:port, build a tunnel between the connection and\n    // the upgraded connection\n    async fn tunnel(upgraded: Upgraded, addr: String) -> std::io::Result<()> {\n        // Connect to remote server\n        let mut server = TcpStream::connect(addr).await?;\n        let mut upgraded = TokioIo::new(upgraded);\n\n        // Proxying data\n        let (from_client, from_server) =\n            tokio::io::copy_bidirectional(&mut upgraded, &mut server).await?;\n\n        // Print message when done\n        println!(\n            \"client wrote {} bytes and received {} bytes\",\n            from_client, from_server\n        );\n\n        Ok(())\n    }\n}\n\n#[traced_test]\n#[cfg_attr(not(feature = \"paid-tests\"), ignore)]\n#[test]\nasync fn forward_proxy_protocol_tls() -> Result<(), BoxError> {\n    let listener = tokio::net::TcpListener::bind(\"127.0.0.1:0\").await?;\n    let addr = listener.local_addr()?;\n\n    let sess = Session::builder().authtoken_from_env().connect().await?;\n    let forwarder = sess\n        .tls_endpoint()\n        .proxy_proto(ProxyProto::V2)\n        .termination(Bytes::default(), Bytes::default())\n        .listen_and_forward(format!(\"tls://{}\", addr).parse()?)\n        .await?;\n\n    let tunnel_url: Url = forwarder.url().to_string().parse()?;\n\n    tokio::spawn(async move {\n        tokio::time::sleep(Duration::from_millis(500)).await;\n        let tunnel_conn = TcpStream::connect(format!(\n            \"{}:{}\",\n            tunnel_url.host_str().unwrap(),\n            tunnel_url.port().unwrap_or(443)\n        ))\n        .await?;\n\n        let domain = pki_types::ServerName::try_from(tunnel_url.host_str().unwrap())\n            .map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))?\n            .to_owned();\n\n        let mut tls_conn = futures_rustls::TlsConnector::from(\n            tls_client_config().map_err(|e| io::Error::from(e.kind()))?,\n        )\n        .connect(domain, tunnel_conn.compat())\n        .await?\n        .compat();\n\n        tls_conn.write_all(b\"Hello, world!\").await\n    });\n\n    let (conn, _) = listener.accept().await?;\n\n    let mut proxy_conn = crate::proxy_proto::Stream::incoming(conn);\n    let proxy_header = proxy_conn.proxy_header().await?.unwrap().cloned().unwrap();\n\n    match proxy_header {\n        ProxyHeader::Version2 { .. } => {}\n        _ => unreachable!(\"we configured v2\"),\n    }\n\n    // TODO: actually accept the tls connection from the server side\n\n    Ok(())\n}\n\nfn unwrap_infallible<T>(result: Result<T, Infallible>) -> T {\n    match result {\n        Ok(value) => value,\n        Err(err) => match err {},\n    }\n}\n"
  },
  {
    "path": "ngrok/src/proxy_proto.rs",
    "content": "use std::{\n    io,\n    mem,\n    pin::{\n        pin,\n        Pin,\n    },\n    task::{\n        ready,\n        Context,\n        Poll,\n    },\n};\n\nuse bytes::{\n    Buf,\n    BytesMut,\n};\nuse proxy_protocol::{\n    ParseError,\n    ProxyHeader,\n};\nuse tokio::io::{\n    AsyncRead,\n    AsyncWrite,\n    ReadBuf,\n};\nuse tracing::instrument;\n\n// 536 is the smallest possible TCP segment, which both v1 and v2 are guaranteed\n// to fit into.\nconst MAX_HEADER_LEN: usize = 536;\n// v2 headers start with at least 16 bytes\nconst MIN_HEADER_LEN: usize = 16;\n\n#[derive(Debug)]\nenum ReadState {\n    Reading(Option<ParseError>, BytesMut),\n    Error(proxy_protocol::ParseError, BytesMut),\n    Header(Option<proxy_protocol::ProxyHeader>, BytesMut),\n    None,\n}\n\nimpl ReadState {\n    fn new() -> ReadState {\n        ReadState::Reading(None, BytesMut::with_capacity(MAX_HEADER_LEN))\n    }\n\n    fn header(&self) -> Result<Option<&ProxyHeader>, &ParseError> {\n        match self {\n            ReadState::Error(err, _) | ReadState::Reading(Some(err), _) => Err(err),\n            ReadState::None | ReadState::Reading(None, _) => Ok(None),\n            ReadState::Header(hdr, _) => Ok(hdr.as_ref()),\n        }\n    }\n\n    /// Read the header from the stream *once*. Once a header has been read, or\n    /// it's been determined that no header is coming, this will be a no-op.\n    #[instrument(level = \"trace\", skip(reader))]\n    fn poll_read_header_once(\n        &mut self,\n        cx: &mut Context,\n        mut reader: Pin<&mut impl AsyncRead>,\n    ) -> Poll<io::Result<()>> {\n        loop {\n            let read_state = mem::replace(self, ReadState::None);\n            let (last_err, mut hdr_buf) = match read_state {\n                // End states\n                ReadState::None | ReadState::Header(_, _) | ReadState::Error(_, _) => {\n                    *self = read_state;\n                    return Poll::Ready(Ok(()));\n                }\n                ReadState::Reading(err, hdr_buf) => (err, hdr_buf),\n            };\n\n            if hdr_buf.len() < MAX_HEADER_LEN {\n                let mut tmp_buf = ReadBuf::uninit(hdr_buf.spare_capacity_mut());\n                let read_res = reader.as_mut().poll_read(cx, &mut tmp_buf);\n                // Regardless of error, make sure we track the read bytes\n                let filled = tmp_buf.filled().len();\n                if filled > 0 {\n                    let len = hdr_buf.len();\n                    // Safety: the tmp_buf is backed by the uninitialized\n                    // portion of hdr_buf. Advancing the len to len + filled is\n                    // guaranteed to only cover the bytes initialized by the\n                    // read.\n                    unsafe { hdr_buf.set_len(len + filled) }\n                }\n                match read_res {\n                    // If we hit the end of the stream due to either an EOF or\n                    // an error, set the state to a terminal one and return the\n                    // result.\n                    Poll::Ready(ref res) if res.is_err() || filled == 0 => {\n                        *self = match last_err {\n                            Some(err) => ReadState::Error(err, hdr_buf),\n                            None => ReadState::Header(None, hdr_buf),\n                        };\n                        return read_res;\n                    }\n                    // Pending leaves the last error and buffer unchanged.\n                    Poll::Pending => {\n                        *self = ReadState::Reading(last_err, hdr_buf);\n                        return read_res;\n                    }\n                    _ => {}\n                }\n            }\n\n            // Create a view into the header buffer so that failed parse\n            // attempts don't consume it.\n            let mut hdr_view = &*hdr_buf;\n\n            // Don't try to parse unless we have a minimum number of bytes to\n            // avoid spurious \"NotProxyHeader\" errors.\n            // Also hack around a bug in the proxy_protocol crate that results\n            // in panics when the input ends in \\r without the \\n.\n            if hdr_view.len() < MIN_HEADER_LEN || matches!(hdr_view.last(), Some(b'\\r')) {\n                *self = ReadState::Reading(last_err, hdr_buf);\n                continue;\n            }\n\n            match proxy_protocol::parse(&mut hdr_view) {\n                Ok(hdr) => {\n                    hdr_buf.advance(hdr_buf.len() - hdr_view.len());\n                    *self = ReadState::Header(Some(hdr), hdr_buf);\n                    return Poll::Ready(Ok(()));\n                }\n                Err(ParseError::NotProxyHeader) => {\n                    *self = ReadState::Header(None, hdr_buf);\n                    return Poll::Ready(Ok(()));\n                }\n\n                // Keep track of the last error - it might not be fatal if we\n                // simply haven't read enough\n                Err(err) => {\n                    // If we've read too much, consider the error fatal.\n                    if hdr_buf.len() >= MAX_HEADER_LEN {\n                        *self = ReadState::Error(err, hdr_buf);\n                    } else {\n                        *self = ReadState::Reading(Some(err), hdr_buf);\n                    }\n                    continue;\n                }\n            }\n        }\n    }\n}\n\n#[derive(Debug)]\nenum WriteState {\n    Writing(BytesMut),\n    None,\n}\n\nimpl WriteState {\n    fn new(hdr: proxy_protocol::ProxyHeader) -> Result<WriteState, proxy_protocol::EncodeError> {\n        proxy_protocol::encode(hdr).map(WriteState::Writing)\n    }\n\n    /// Write the header *once*. After its written to the stream, this will be a\n    /// no-op.\n    #[instrument(level = \"trace\", skip(writer))]\n    fn poll_write_header_once(\n        &mut self,\n        cx: &mut Context,\n        mut writer: Pin<&mut impl AsyncWrite>,\n    ) -> Poll<io::Result<()>> {\n        loop {\n            let state = mem::replace(self, WriteState::None);\n            match state {\n                WriteState::None => return Poll::Ready(Ok(())),\n                WriteState::Writing(mut buf) => {\n                    let write_res = writer.as_mut().poll_write(cx, &buf);\n                    match write_res {\n                        Poll::Pending | Poll::Ready(Err(_)) => {\n                            *self = WriteState::Writing(buf);\n                            ready!(write_res)?;\n                            unreachable!(\n                                \"ready! will return for us on either Pending or Ready(Err)\"\n                            );\n                        }\n                        Poll::Ready(Ok(written)) => {\n                            buf.advance(written);\n                            if !buf.is_empty() {\n                                *self = WriteState::Writing(buf);\n                                continue;\n                            } else {\n                                return Ok(()).into();\n                            }\n                        }\n                    }\n                }\n            }\n        }\n    }\n}\n\n#[derive(Debug)]\n#[pin_project::pin_project]\npub struct Stream<S> {\n    read_state: ReadState,\n    write_state: WriteState,\n    #[pin]\n    inner: S,\n}\n\nimpl<S> Stream<S> {\n    pub fn outgoing(stream: S, header: ProxyHeader) -> Result<Self, proxy_protocol::EncodeError> {\n        Ok(Stream {\n            inner: stream,\n            write_state: WriteState::new(header)?,\n            read_state: ReadState::None,\n        })\n    }\n\n    pub fn incoming(stream: S) -> Self {\n        Stream {\n            inner: stream,\n            read_state: ReadState::new(),\n            write_state: WriteState::None,\n        }\n    }\n\n    pub fn disabled(stream: S) -> Self {\n        Stream {\n            inner: stream,\n            read_state: ReadState::None,\n            write_state: WriteState::None,\n        }\n    }\n}\n\nimpl<S> Stream<S>\nwhere\n    S: AsyncRead,\n{\n    #[instrument(level = \"debug\", skip(self))]\n    pub async fn proxy_header(&mut self) -> io::Result<Result<Option<&ProxyHeader>, &ParseError>>\n    where\n        Self: Unpin,\n    {\n        let mut this = Pin::new(self);\n\n        futures::future::poll_fn(|cx| {\n            let this = this.as_mut().project();\n            this.read_state.poll_read_header_once(cx, this.inner)\n        })\n        .await?;\n\n        Ok(this.get_mut().read_state.header())\n    }\n}\n\nimpl<S> AsyncRead for Stream<S>\nwhere\n    S: AsyncRead,\n{\n    #[instrument(level = \"trace\", skip(self), fields(read_state = ?self.read_state))]\n    fn poll_read(\n        self: Pin<&mut Self>,\n        cx: &mut Context<'_>,\n        buf: &mut ReadBuf<'_>,\n    ) -> Poll<io::Result<()>> {\n        let mut this = self.project();\n\n        ready!(this\n            .read_state\n            .poll_read_header_once(cx, this.inner.as_mut()))?;\n\n        match this.read_state {\n            ReadState::Error(_, remainder) | ReadState::Header(_, remainder) => {\n                if !remainder.is_empty() {\n                    let available = std::cmp::min(remainder.len(), buf.remaining());\n                    buf.put_slice(&remainder.split_to(available));\n                    // Make sure Ready is returned regardless of inner's state\n                    return Poll::Ready(Ok(()));\n                }\n            }\n            ReadState::None => {}\n            _ => unreachable!(),\n        }\n\n        this.inner.poll_read(cx, buf)\n    }\n}\n\nimpl<S> AsyncWrite for Stream<S>\nwhere\n    S: AsyncWrite,\n{\n    #[instrument(level = \"trace\", skip(self), fields(write_state = ?self.write_state))]\n    fn poll_write(\n        self: Pin<&mut Self>,\n        cx: &mut Context<'_>,\n        buf: &[u8],\n    ) -> Poll<Result<usize, io::Error>> {\n        let mut this = self.project();\n\n        ready!(this\n            .write_state\n            .poll_write_header_once(cx, this.inner.as_mut()))?;\n\n        this.inner.poll_write(cx, buf)\n    }\n    fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {\n        self.project().inner.poll_flush(cx)\n    }\n    fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {\n        self.project().inner.poll_shutdown(cx)\n    }\n}\n\n#[cfg(feature = \"hyper\")]\nmod hyper {\n    use ::hyper::rt::{\n        Read as HyperRead,\n        Write as HyperWrite,\n    };\n\n    use super::*;\n\n    impl<S> HyperWrite for Stream<S>\n    where\n        S: AsyncWrite,\n    {\n        #[instrument(level = \"trace\", skip(self), fields(write_state = ?self.write_state))]\n        fn poll_write(\n            self: Pin<&mut Self>,\n            cx: &mut Context<'_>,\n            buf: &[u8],\n        ) -> Poll<Result<usize, io::Error>> {\n            <Self as AsyncWrite>::poll_write(self, cx, buf)\n        }\n        fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {\n            <Self as AsyncWrite>::poll_flush(self, cx)\n        }\n        fn poll_shutdown(\n            self: Pin<&mut Self>,\n            cx: &mut Context<'_>,\n        ) -> Poll<Result<(), io::Error>> {\n            <Self as AsyncWrite>::poll_shutdown(self, cx)\n        }\n    }\n\n    impl<S> HyperRead for Stream<S>\n    where\n        S: AsyncRead,\n    {\n        fn poll_read(\n            self: Pin<&mut Self>,\n            cx: &mut Context<'_>,\n            mut buf: ::hyper::rt::ReadBufCursor<'_>,\n        ) -> Poll<Result<(), std::io::Error>> {\n            let mut tokio_buf = tokio::io::ReadBuf::uninit(unsafe { buf.as_mut() });\n            let res = ready!(<Self as AsyncRead>::poll_read(self, cx, &mut tokio_buf));\n            let filled = tokio_buf.filled().len();\n            unsafe { buf.advance(filled) };\n            Poll::Ready(res)\n        }\n    }\n}\n\n#[cfg(test)]\nmod test {\n    use std::{\n        cmp,\n        io,\n        pin::Pin,\n        task::{\n            ready,\n            Context,\n            Poll,\n        },\n        time::Duration,\n    };\n\n    use bytes::{\n        BufMut,\n        BytesMut,\n    };\n    use proxy_protocol::{\n        version2::{\n            self,\n            ProxyCommand,\n        },\n        ProxyHeader,\n    };\n    use tokio::io::{\n        AsyncRead,\n        AsyncReadExt,\n        AsyncWriteExt,\n        ReadBuf,\n    };\n\n    use super::Stream;\n\n    #[pin_project::pin_project]\n    struct ShortReader<S> {\n        #[pin]\n        inner: S,\n        min: usize,\n        max: usize,\n    }\n\n    impl<S> AsyncRead for ShortReader<S>\n    where\n        S: AsyncRead,\n    {\n        fn poll_read(\n            self: Pin<&mut Self>,\n            cx: &mut Context<'_>,\n            buf: &mut ReadBuf<'_>,\n        ) -> Poll<io::Result<()>> {\n            let mut this = self.project();\n            let max_bytes =\n                *this.min + cmp::max(1, rand::random::<usize>() % (*this.max - *this.min));\n            let mut tmp = vec![0; max_bytes];\n            let mut tmp_buf = ReadBuf::new(&mut tmp);\n            let res = ready!(this.inner.as_mut().poll_read(cx, &mut tmp_buf));\n\n            buf.put_slice(tmp_buf.filled());\n\n            res?;\n\n            Poll::Ready(Ok(()))\n        }\n    }\n\n    impl<S> ShortReader<S> {\n        fn new(inner: S, min: usize, max: usize) -> Self {\n            ShortReader { inner, min, max }\n        }\n    }\n\n    const INPUT: &str = \"PROXY TCP4 192.168.0.1 192.168.0.11 56324 443\\r\\n\";\n    const PARTIAL_INPUT: &str = \"PROXY TCP4 192.168.0.1\";\n    const FINAL_INPUT: &str = \" 192.168.0.11 56324 443\\r\\n\";\n\n    // Smoke test to ensure that the proxy protocol parser works as expected.\n    // Not actually testing our code.\n    #[test]\n    fn test_proxy_protocol() {\n        let mut buf = BytesMut::from(INPUT);\n\n        assert!(proxy_protocol::parse(&mut buf).is_ok());\n\n        buf = BytesMut::from(PARTIAL_INPUT);\n\n        assert!(proxy_protocol::parse(&mut &*buf).is_err());\n\n        buf.put_slice(FINAL_INPUT.as_bytes());\n\n        assert!(proxy_protocol::parse(&mut &*buf).is_ok());\n    }\n\n    #[tokio::test]\n    #[tracing_test::traced_test]\n    async fn test_header_stream_v2() {\n        let (left, mut right) = tokio::io::duplex(1024);\n\n        let header = ProxyHeader::Version2 {\n            command: ProxyCommand::Proxy,\n            transport_protocol: version2::ProxyTransportProtocol::Stream,\n            addresses: version2::ProxyAddresses::Ipv4 {\n                source: \"127.0.0.1:1\".parse().unwrap(),\n                destination: \"127.0.0.2:2\".parse().unwrap(),\n            },\n        };\n\n        let input = proxy_protocol::encode(header).unwrap();\n\n        let mut proxy_stream = Stream::incoming(ShortReader::new(left, 2, 5));\n\n        // Chunk our writes to ensure that our reader is resilient across split inputs.\n        tokio::spawn(async move {\n            tokio::time::sleep(Duration::from_millis(50)).await;\n\n            right.write_all(&input).await.expect(\"write header\");\n\n            right\n                .write_all(b\"Hello, world!\")\n                .await\n                .expect(\"write hello\");\n\n            right.shutdown().await.expect(\"shutdown\");\n        });\n\n        let hdr = proxy_stream\n            .proxy_header()\n            .await\n            .expect(\"read header\")\n            .expect(\"decode header\")\n            .expect(\"header exists\");\n\n        assert!(matches!(hdr, ProxyHeader::Version2 { .. }));\n\n        let mut buf = String::new();\n\n        proxy_stream\n            .read_to_string(&mut buf)\n            .await\n            .expect(\"read rest\");\n\n        assert_eq!(buf, \"Hello, world!\");\n\n        // Get the header again - should be the same.\n        let hdr = proxy_stream\n            .proxy_header()\n            .await\n            .expect(\"read header\")\n            .expect(\"decode header\")\n            .expect(\"header exists\");\n\n        assert!(matches!(hdr, ProxyHeader::Version2 { .. }));\n    }\n\n    #[tokio::test]\n    #[tracing_test::traced_test]\n    async fn test_header_stream() {\n        let (left, mut right) = tokio::io::duplex(1024);\n\n        let mut proxy_stream = Stream::incoming(ShortReader::new(left, 2, 5));\n\n        // Chunk our writes to ensure that our reader is resilient across split inputs.\n        tokio::spawn(async move {\n            tokio::time::sleep(Duration::from_millis(50)).await;\n\n            right\n                .write_all(INPUT.as_bytes())\n                .await\n                .expect(\"write header\");\n\n            right\n                .write_all(b\"Hello, world!\")\n                .await\n                .expect(\"write hello\");\n\n            right.shutdown().await.expect(\"shutdown\");\n        });\n\n        let hdr = proxy_stream\n            .proxy_header()\n            .await\n            .expect(\"read header\")\n            .expect(\"decode header\")\n            .expect(\"header exists\");\n\n        assert!(matches!(hdr, ProxyHeader::Version1 { .. }));\n\n        let mut buf = String::new();\n\n        proxy_stream\n            .read_to_string(&mut buf)\n            .await\n            .expect(\"read rest\");\n\n        assert_eq!(buf, \"Hello, world!\");\n\n        // Get the header again - should be the same.\n        let hdr = proxy_stream\n            .proxy_header()\n            .await\n            .expect(\"read header\")\n            .expect(\"decode header\")\n            .expect(\"header exists\");\n\n        assert!(matches!(hdr, ProxyHeader::Version1 { .. }));\n    }\n\n    #[tokio::test]\n    #[tracing_test::traced_test]\n    async fn test_noheader() {\n        let (left, mut right) = tokio::io::duplex(1024);\n\n        let mut proxy_stream = Stream::incoming(left);\n\n        right\n            .write_all(b\"Hello, world!\")\n            .await\n            .expect(\"write stream\");\n\n        right.shutdown().await.expect(\"shutdown\");\n        drop(right);\n\n        assert!(proxy_stream\n            .proxy_header()\n            .await\n            .unwrap()\n            .unwrap()\n            .is_none());\n\n        let mut buf = String::new();\n\n        proxy_stream\n            .read_to_string(&mut buf)\n            .await\n            .expect(\"read stream\");\n\n        assert_eq!(buf, \"Hello, world!\");\n    }\n}\n"
  },
  {
    "path": "ngrok/src/session.rs",
    "content": "use std::{\n    collections::{\n        HashMap,\n        VecDeque,\n    },\n    env,\n    io,\n    sync::{\n        atomic::{\n            AtomicBool,\n            Ordering,\n        },\n        Arc,\n    },\n    time::Duration,\n};\n\nuse arc_swap::ArcSwap;\nuse async_trait::async_trait;\nuse bytes::Bytes;\nuse futures::{\n    prelude::*,\n    FutureExt,\n};\nuse futures_rustls::rustls::{\n    self,\n    pki_types,\n    RootCertStore,\n};\nuse hyper_http_proxy::{\n    Intercept,\n    Proxy,\n    ProxyConnector,\n};\nuse hyper_util::client::legacy::connect::HttpConnector;\nuse muxado::heartbeat::HeartbeatConfig;\npub use muxado::heartbeat::HeartbeatHandler;\nuse once_cell::sync::{\n    Lazy,\n    OnceCell,\n};\nuse regex::Regex;\nuse rustls_pemfile::Item;\nuse thiserror::Error;\nuse tokio::{\n    io::{\n        AsyncRead,\n        AsyncWrite,\n    },\n    runtime::Handle,\n    sync::{\n        mpsc::{\n            channel,\n            Sender,\n        },\n        Mutex,\n        RwLock,\n    },\n};\nuse tokio_retry::{\n    strategy::ExponentialBackoff,\n    RetryIf,\n};\nuse tokio_util::compat::{\n    FuturesAsyncReadCompatExt,\n    TokioAsyncReadCompatExt,\n};\nuse tower_service::Service;\nuse tracing::{\n    debug,\n    warn,\n};\nuse url::Url;\n\npub use crate::internals::{\n    proto::{\n        CommandResp,\n        Restart,\n        Stop,\n        StopTunnel,\n        Update,\n    },\n    raw_session::{\n        CommandHandler,\n        RpcError,\n    },\n};\nuse crate::{\n    config::{\n        HttpTunnelBuilder,\n        LabeledTunnelBuilder,\n        ProxyProto,\n        TcpTunnelBuilder,\n        TlsTunnelBuilder,\n        TunnelConfig,\n    },\n    conn::ConnInner,\n    internals::{\n        proto::{\n            AuthExtra,\n            BindExtra,\n            BindOpts,\n            Error,\n            HttpEndpoint,\n            SecretString,\n            TcpEndpoint,\n            TlsEndpoint,\n        },\n        raw_session::{\n            AcceptError as RawAcceptError,\n            CommandHandlers,\n            IncomingStreams,\n            RawSession,\n            RpcClient,\n            StartSessionError,\n            NOT_IMPLEMENTED,\n        },\n    },\n    tunnel::{\n        AcceptError,\n        TunnelInner,\n        TunnelInnerInfo,\n    },\n};\n\npub(crate) const CERT_BYTES: &[u8] = include_bytes!(\"../assets/ngrok.ca.crt\");\nconst CLIENT_TYPE: &str = \"ngrok-rust\";\nconst VERSION: &str = env!(\"CARGO_PKG_VERSION\");\n\n#[derive(Clone)]\nstruct BoundTunnel {\n    proto: String,\n    opts: Option<BindOpts>,\n    extra: BindExtra,\n    labels: HashMap<String, String>,\n    forwards_to: String,\n    forwards_proto: String,\n    verify_upstream_tls: bool,\n    tx: Sender<Result<ConnInner, AcceptError>>,\n}\n\ntype TunnelConns = HashMap<String, BoundTunnel>;\n\n/// An ngrok session.\n///\n/// Encapsulates an established session with the ngrok service. Sessions recover\n/// from network failures by automatically reconnecting.\n#[derive(Clone)]\npub struct Session {\n    // Note: this is implicitly used to detect when the session (and its\n    // tunnels) have been dropped in order to shut down the accept loop.\n    _dropref: awaitdrop::Ref,\n    inner: Arc<ArcSwap<SessionInner>>,\n}\n\nstruct SessionInner {\n    runtime: Handle,\n    client: Mutex<RpcClient>,\n    closed: AtomicBool,\n    tunnels: RwLock<TunnelConns>,\n    builder: SessionBuilder,\n}\n\n/// A trait alias for types that can provide the base ngrok transport, i.e.\n/// bidirectional byte streams.\n///\n/// It is blanket-implemented for all types that satisfy its bounds. Most\n/// commonly, it will be a tls-wrapped tcp stream.\npub trait IoStream: AsyncRead + AsyncWrite + Unpin + Send + 'static {}\nimpl<T> IoStream for T where T: AsyncRead + AsyncWrite + Unpin + Send + 'static {}\n\n/// Trait for establishing the connection to the ngrok server.\n#[async_trait]\npub trait Connector: Sync + Send + 'static {\n    /// The function used to establish the connection to the ngrok server.\n    ///\n    /// This is effectively `async |addr, tls_config, err| -> Result<IoStream>`.\n    ///\n    /// If it is being called due to a disconnect, the [AcceptError] argument will\n    /// be populated.\n    ///\n    /// If it returns `Err(ConnectError::Canceled)`, reconnecting will be canceled\n    /// and the session will be terminated. Note that this error will never be\n    /// returned from the [default_connect] function.\n    async fn connect(\n        &self,\n        host: String,\n        port: u16,\n        tls_config: Arc<rustls::ClientConfig>,\n        err: Option<AcceptError>,\n    ) -> Result<Box<dyn IoStream>, ConnectError>;\n}\n\n#[async_trait]\nimpl<F, U> Connector for F\nwhere\n    F: Fn(String, u16, Arc<rustls::ClientConfig>, Option<AcceptError>) -> U + Send + Sync + 'static,\n    U: Future<Output = Result<Box<dyn IoStream>, ConnectError>> + Send,\n{\n    async fn connect(\n        &self,\n        host: String,\n        port: u16,\n        tls_config: Arc<rustls::ClientConfig>,\n        err: Option<AcceptError>,\n    ) -> Result<Box<dyn IoStream>, ConnectError> {\n        self(host, port, tls_config, err).await\n    }\n}\n\n/// The default ngrok connector.\n///\n/// Establishes a TCP connection to `addr`, and then performs a TLS handshake\n/// using the `tls_config`.\n///\n/// Discards any errors during reconnect, allowing attempts to recur\n/// indefinitely.\npub async fn default_connect(\n    host: String,\n    port: u16,\n    tls_config: Arc<rustls::ClientConfig>,\n    _: Option<AcceptError>,\n) -> Result<Box<dyn IoStream>, ConnectError> {\n    let stream = tokio::net::TcpStream::connect(&(host.as_str(), port))\n        .await\n        .map_err(ConnectError::Tcp)?\n        .compat();\n\n    let domain = pki_types::ServerName::try_from(host)\n        .expect(\"host should have been validated by SessionBuilder::server_addr\");\n\n    let tls_conn = futures_rustls::TlsConnector::from(tls_config)\n        .connect(domain, stream)\n        .await\n        .map_err(ConnectError::Tls)?;\n    Ok(Box::new(tls_conn.compat()) as Box<dyn IoStream>)\n}\n\n#[derive(Debug, Clone, Error)]\n#[error(\"unsupported proxy address: {0}\")]\n/// An unsupported proxy address was provided.\npub struct ProxyUnsupportedError(Url);\n\nfn connect_proxy(url: Url) -> Result<Arc<dyn Connector>, ProxyUnsupportedError> {\n    Ok(match url.scheme() {\n        \"http\" | \"https\" => Arc::new(connect_http_proxy(url)),\n        \"socks5\" => {\n            let host = url.host_str().unwrap_or_default();\n            let port = url.port().unwrap_or(1080);\n            Arc::new(connect_socks_proxy(format!(\"{host}:{port}\")))\n        }\n        _ => return Err(ProxyUnsupportedError(url)),\n    })\n}\n\nfn connect_http_proxy(url: Url) -> impl Connector {\n    move |host: String, port, tls_config, _| {\n        let mut proxy = Proxy::new(\n            Intercept::All,\n            url.as_str().try_into().expect(\"urls should be valid uris\"),\n        );\n        proxy.force_connect();\n        let mut connector = HttpConnector::new();\n        connector.enforce_http(false);\n        async move {\n            let mut connector = ProxyConnector::from_proxy(connector, proxy)\n                .map_err(|e| ConnectError::ProxyConnect(Box::new(e)))?;\n\n            let server_uri = format!(\"http://{host}:{port}\")\n                .parse()\n                .expect(\"host should have been validated by SessionBuilder::server_addr\");\n\n            let conn = connector\n                .call(server_uri)\n                .await\n                .map_err(|e| ConnectError::ProxyConnect(Box::new(e)))?;\n\n            let tls_conn = futures_rustls::TlsConnector::from(tls_config)\n                .connect(\n                    pki_types::ServerName::try_from(host)\n                        .expect(\"host should have been validated by SessionBuilder::server_addr\"),\n                    hyper_util::rt::TokioIo::new(conn).compat(),\n                )\n                .await\n                .map_err(ConnectError::Tls)?;\n\n            Ok(Box::new(tls_conn.compat()) as Box<dyn IoStream>)\n        }\n    }\n}\n\nfn connect_socks_proxy(proxy_addr: String) -> impl Connector {\n    move |server_host: String, server_port, tls_config, _| {\n        let proxy_addr = proxy_addr.clone();\n        async move {\n            let conn = tokio_socks::tcp::Socks5Stream::connect(\n                proxy_addr.as_str(),\n                format!(\"{server_host}:{server_port}\"),\n            )\n            .await\n            .map_err(|e| ConnectError::ProxyConnect(Box::new(e)))?\n            .compat();\n\n            let tls_conn = futures_rustls::TlsConnector::from(tls_config)\n                .connect(\n                    pki_types::ServerName::try_from(server_host)\n                        .expect(\"host should have been validated by SessionBuilder::server_addr\"),\n                    conn,\n                )\n                .await\n                .map_err(ConnectError::Tls)?;\n\n            Ok(Box::new(tls_conn.compat()) as Box<dyn IoStream>)\n        }\n    }\n}\n\n/// The builder for an ngrok [Session].\n#[derive(Clone)]\npub struct SessionBuilder {\n    // Consuming libraries and applications can add a client type and version on\n    // top of the \"base\" type and version declared by this library.\n    versions: VecDeque<(String, String, Option<String>)>,\n    authtoken: Option<SecretString>,\n    metadata: Option<String>,\n    heartbeat_interval: Option<i64>,\n    heartbeat_tolerance: Option<i64>,\n    heartbeat_handler: Option<Arc<dyn HeartbeatHandler>>,\n    server_host: String,\n    server_port: u16,\n    ca_cert: Option<bytes::Bytes>,\n    tls_config: Option<rustls::ClientConfig>,\n    connector: Arc<dyn Connector>,\n    handlers: CommandHandlers,\n    cookie: Option<SecretString>,\n    id: Option<String>,\n}\n\n/// Errors arising at [SessionBuilder::connect] time.\n#[derive(Error, Debug)]\n#[non_exhaustive]\npub enum ConnectError {\n    /// An error occurred when establishing a TCP connection to the ngrok\n    /// server.\n    #[error(\"failed to establish tcp connection\")]\n    Tcp(#[source] io::Error),\n    /// A TLS handshake error occurred.\n    ///\n    /// This is usually a certificate validation issue, or an attempt to connect\n    /// to something that doesn't actually speak TLS.\n    #[error(\"tls handshake error\")]\n    Tls(#[source] io::Error),\n    /// An error occurred when starting the ngrok session.\n    ///\n    /// This might occur when there's a protocol mismatch interfering with the\n    /// heartbeat routine.\n    #[error(\"failed to start ngrok session\")]\n    Start(#[source] StartSessionError),\n    /// An error occurred when attempting to authenticate.\n    #[error(\"authentication failure\")]\n    Auth(#[source] RpcError),\n    /// An error occurred when rebinding tunnels during a reconnect\n    #[error(\"error rebinding tunnel after reconnect\")]\n    Rebind(#[source] RpcError),\n    /// An error arising from a failure to connect through a proxy.\n    #[error(\"failed to connect through proxy\")]\n    ProxyConnect(#[source] Box<dyn std::error::Error + Send + Sync + 'static>),\n    /// The (re)connect function gave up.\n    ///\n    /// This will never be returned by the default connect function, and is\n    /// instead used to cancel the reconnect loop.\n    #[error(\"the connect function gave up\")]\n    Canceled,\n}\n\nimpl Error for ConnectError {\n    fn error_code(&self) -> Option<&str> {\n        match self {\n            ConnectError::Auth(resp) | ConnectError::Rebind(resp) => resp.error_code(),\n            _ => None,\n        }\n    }\n    fn msg(&self) -> String {\n        match self {\n            ConnectError::Auth(resp) | ConnectError::Rebind(resp) => resp.msg(),\n            _ => format!(\"{self}\"),\n        }\n    }\n}\n\n/// The builder specified an invalid heartbeat interval.\n///\n/// This is most likely caused a [Duration] that's outside of the [i64::MAX]\n/// nanosecond range.\n#[derive(Copy, Clone, Debug, Error)]\n#[error(\"invalid heartbeat interval: {0}\")]\npub struct InvalidHeartbeatInterval(u128);\n/// The builder specified an invalid heartbeat tolerance.\n///\n/// This is most likely caused a [Duration] that's outside of the [i64::MAX]\n/// nanosecond range.\n#[derive(Copy, Clone, Debug, Error)]\n#[error(\"invalid heartbeat tolerance: {0}\")]\npub struct InvalidHeartbeatTolerance(u128);\n\n/// The builder provided an invalid server address\n#[derive(Error, Debug, Clone)]\n#[error(\"invalid server address: {0}\")]\npub struct InvalidServerAddr(String);\n\nimpl Default for SessionBuilder {\n    fn default() -> Self {\n        SessionBuilder {\n            versions: [(CLIENT_TYPE.to_string(), VERSION.to_string(), None)]\n                .into_iter()\n                .collect(),\n            authtoken: None,\n            metadata: None,\n            heartbeat_interval: None,\n            heartbeat_tolerance: None,\n            heartbeat_handler: None,\n            server_host: \"connect.ngrok-agent.com\".into(),\n            server_port: 443,\n            ca_cert: None,\n            tls_config: None,\n            connector: Arc::new(default_connect),\n            handlers: Default::default(),\n            cookie: None,\n            id: None,\n        }\n    }\n}\n\nfn sanitize_ua_string(s: impl AsRef<str>) -> String {\n    static UA_BANNED: OnceCell<Regex> = OnceCell::new();\n    UA_BANNED\n        .get_or_init(|| Regex::new(\"[^/!#$%&'*+-.^_`|~0-9a-zA-Z]\").unwrap())\n        .replace_all(s.as_ref(), \"#\")\n        .replace('/', \"-\")\n}\n\nimpl SessionBuilder {\n    /// Configures the session to authenticate with the provided authtoken. You\n    /// can [find your existing authtoken] or [create a new one] in the ngrok\n    /// dashboard.\n    ///\n    /// See the [authtoken parameter in the ngrok docs] for additional details.\n    ///\n    /// [find your existing authtoken]: https://dashboard.ngrok.com/get-started/your-authtoken\n    /// [create a new one]: https://dashboard.ngrok.com/tunnels/authtokens\n    /// [authtoken parameter in the ngrok docs]: https://ngrok.com/docs/ngrok-agent/config#authtoken\n    pub fn authtoken(&mut self, authtoken: impl Into<String>) -> &mut Self {\n        self.authtoken = Some(authtoken.into().into());\n        self\n    }\n    /// Shortcut for calling [SessionBuilder::authtoken] with the value of the\n    /// NGROK_AUTHTOKEN environment variable.\n    pub fn authtoken_from_env(&mut self) -> &mut Self {\n        self.authtoken = env::var(\"NGROK_AUTHTOKEN\").ok().map(From::from);\n        self\n    }\n\n    /// Configures how often the session will send heartbeat messages to the ngrok\n    /// service to check session liveness.\n    ///\n    /// See the [heartbeat_interval parameter in the ngrok docs] for additional\n    /// details.\n    ///\n    /// [heartbeat_interval parameter in the ngrok docs]: https://ngrok.com/docs/ngrok-agent/config#heartbeat_interval\n    pub fn heartbeat_interval(\n        &mut self,\n        heartbeat_interval: Duration,\n    ) -> Result<&mut Self, InvalidHeartbeatInterval> {\n        let nanos = heartbeat_interval.as_nanos();\n        let nanos = i64::try_from(nanos).map_err(|_| InvalidHeartbeatInterval(nanos))?;\n        self.heartbeat_interval = Some(nanos);\n        Ok(self)\n    }\n\n    /// Configures the duration to wait for a response to a heartbeat before\n    /// assuming the session connection is dead and attempting to reconnect.\n    ///\n    /// See the [heartbeat_tolerance parameter in the ngrok docs] for additional\n    /// details.\n    ///\n    /// [heartbeat_tolerance parameter in the ngrok docs]: https://ngrok.com/docs/ngrok-agent/config#heartbeat_tolerance\n    pub fn heartbeat_tolerance(\n        &mut self,\n        heartbeat_tolerance: Duration,\n    ) -> Result<&mut Self, InvalidHeartbeatTolerance> {\n        let nanos = heartbeat_tolerance.as_nanos();\n        let nanos = i64::try_from(nanos).map_err(|_| InvalidHeartbeatTolerance(nanos))?;\n        self.heartbeat_tolerance = Some(nanos);\n        Ok(self)\n    }\n\n    /// Configures the opaque, machine-readable metadata string for this session.\n    /// Metadata is made available to you in the ngrok dashboard and the Agents API\n    /// resource. It is a useful way to allow you to uniquely identify sessions. We\n    /// suggest encoding the value in a structured format like JSON.\n    ///\n    /// See the [metdata parameter in the ngrok docs] for additional details.\n    ///\n    /// [metdata parameter in the ngrok docs]: https://ngrok.com/docs/ngrok-agent/config#metadata\n    pub fn metadata(&mut self, metadata: impl Into<String>) -> &mut Self {\n        self.metadata = Some(metadata.into());\n        self\n    }\n\n    /// Configures the network address to dial to connect to the ngrok service.\n    /// Use this option only if you are connecting to a custom agent ingress.\n    ///\n    /// See the [server_addr parameter in the ngrok docs] for additional details.\n    ///\n    /// [server_addr parameter in the ngrok docs]: https://ngrok.com/docs/ngrok-agent/config#server_addr\n    pub fn server_addr(&mut self, addr: impl Into<String>) -> Result<&mut Self, InvalidServerAddr> {\n        let addr = addr.into();\n        let server_uri: Url = format!(\"http://{addr}\")\n            .parse()\n            .map_err(|_| InvalidServerAddr(addr.clone()))?;\n\n        self.server_host = server_uri\n            .host_str()\n            .map(String::from)\n            .ok_or_else(|| InvalidServerAddr(addr.clone()))?;\n\n        pki_types::ServerName::try_from(self.server_host.as_str())\n            .map_err(|_| InvalidServerAddr(addr.clone()))?;\n\n        self.server_port = server_uri.port().unwrap_or(443);\n\n        Ok(self)\n    }\n\n    /// Sets the file path to a default certificate in PEM format to validate ngrok Session TLS connections.\n    /// Setting to \"trusted\" is the default, using the ngrok CA certificate.\n    /// Setting to \"host\" will verify using the certificates on the host operating system.\n    /// A client config set via tls_config after calling root_cas will override this value.\n    ///\n    /// Corresponds to the [root_cas parameter in the ngrok docs]\n    ///\n    /// [root_cas parameter in the ngrok docs]: https://ngrok.com/docs/ngrok-agent/config#root_cas\n    pub fn root_cas(&mut self, root_cas: impl Into<String>) -> Result<&mut Self, io::Error> {\n        match root_cas.into().clone().as_str() {\n            \"trusted\" => self.ca_cert = None,\n            \"host\" => self.tls_config = Some(host_certs_tls_config().map_err(|e| e.kind())?),\n            v => {\n                std::fs::read(v).map(|root_cas| self.ca_cert = Some(Bytes::from(root_cas)))?;\n            }\n        }\n        Ok(self)\n    }\n\n    /// Sets the default certificate in PEM format to validate ngrok Session TLS connections.\n    /// A client config set via tls_config will override this value.\n    ///\n    /// Roughly corresponds to the \"path to a certificate PEM file\" option in the\n    /// [root_cas parameter in the ngrok docs]\n    ///\n    /// [root_cas parameter in the ngrok docs]: https://ngrok.com/docs/ngrok-agent/config#root_cas\n    pub fn ca_cert(&mut self, ca_cert: Bytes) -> &mut Self {\n        self.ca_cert = Some(ca_cert);\n        self\n    }\n\n    /// Configures the TLS client used to connect to the ngrok service while\n    /// establishing the session. Use this option only if you are connecting through\n    /// a man-in-the-middle or deep packet inspection proxy. Passed to the\n    /// connect callback set with `SessionBuilder::connect`.\n    ///\n    /// Roughly corresponds to the [root_cas parameter in the ngrok docs], but allows\n    /// for deeper TLS configuration.\n    ///\n    /// [root_cas parameter in the ngrok docs]: https://ngrok.com/docs/ngrok-agent/config#root_cas\n    pub fn tls_config(&mut self, config: rustls::ClientConfig) -> &mut Self {\n        self.tls_config = Some(config);\n        self\n    }\n\n    /// Configures a function which is called to establish the connection to the\n    /// ngrok service. Use this option if you need to connect through an outbound\n    /// proxy. In the event of network disruptions, it will be called each time\n    /// the session reconnects.\n    pub fn connector(&mut self, connect: impl Connector) -> &mut Self {\n        self.connector = Arc::new(connect);\n        self\n    }\n\n    /// Configures the session to connect to ngrok through an outbound\n    /// HTTP or SOCKS5 proxy. This parameter is ignored if you override the connector\n    /// with [SessionBuilder::connector].\n    ///\n    /// See the [proxy url parameter in the ngrok docs] for additional details.\n    ///\n    /// [proxy url parameter in the ngrok docs]: https://ngrok.com/docs/ngrok-agent/config#proxy_url\n    pub fn proxy_url(&mut self, url: Url) -> Result<&mut Self, ProxyUnsupportedError> {\n        self.connector = connect_proxy(url)?;\n        Ok(self)\n    }\n\n    /// Configures a function which is called when the ngrok service requests that\n    /// this [Session] stops. Your application may choose to interpret this callback\n    /// as a request to terminate the [Session] or the entire process.\n    ///\n    /// Errors returned by this function will be visible to the ngrok dashboard or\n    /// API as the response to the Stop operation.\n    ///\n    /// Do not block inside this callback. It will cause the Dashboard or API\n    /// stop operation to time out. Do not call [std::process::exit] inside this\n    /// callback, it will also cause the operation to time out.\n    pub fn handle_stop_command(&mut self, handler: impl CommandHandler<Stop>) -> &mut Self {\n        self.handlers.on_stop = Some(Arc::new(handler));\n        self\n    }\n\n    /// Configures a function which is called when the ngrok service requests\n    /// that this [Session] updates. Your application may choose to interpret\n    /// this callback as a request to restart the [Session] or the entire\n    /// process.\n    ///\n    /// Errors returned by this function will be visible to the ngrok dashboard or\n    /// API as the response to the Restart operation.\n    ///\n    /// Do not block inside this callback. It will cause the Dashboard or API\n    /// stop operation to time out. Do not call [std::process::exit] inside this\n    /// callback, it will also cause the operation to time out.\n    pub fn handle_restart_command(&mut self, handler: impl CommandHandler<Restart>) -> &mut Self {\n        self.handlers.on_restart = Some(Arc::new(handler));\n        self\n    }\n\n    /// Configures a function which is called when the ngrok service requests\n    /// that this [Session] updates. Your application may choose to interpret\n    /// this callback as a request to update its configuration, itself, or to\n    /// invoke some other application-specific behavior.\n    ///\n    /// Errors returned by this function will be visible to the ngrok dashboard or\n    /// API as the response to the Restart operation.\n    ///\n    /// Do not block inside this callback. It will cause the Dashboard or API\n    /// stop operation to time out. Do not call [std::process::exit] inside this\n    /// callback, it will also cause the operation to time out.\n    pub fn handle_update_command(&mut self, handler: impl CommandHandler<Update>) -> &mut Self {\n        self.handlers.on_update = Some(Arc::new(handler));\n        self\n    }\n\n    /// Call the provided handler whenever a heartbeat response is received.\n    ///\n    /// If the handler returns an error, the heartbeat task will exit, resulting\n    /// in the session eventually dying as well.\n    pub fn handle_heartbeat(&mut self, callback: impl HeartbeatHandler) -> &mut Self {\n        self.heartbeat_handler = Some(Arc::new(callback));\n        self\n    }\n\n    /// Add client type and version information for a client application.\n    ///\n    /// This is a way for applications and library consumers of this crate\n    /// identify themselves.\n    ///\n    /// This will add a new entry to the `User-Agent` field in the \"most significant\"\n    /// (first) position. Comments must follow [RFC 7230] or a connection error may occur.\n    ///\n    /// [RFC 7230]: https://datatracker.ietf.org/doc/html/rfc7230#section-3.2.6\n    pub fn client_info(\n        &mut self,\n        client_type: impl Into<String>,\n        version: impl Into<String>,\n        comments: Option<impl Into<String>>,\n    ) -> &mut Self {\n        self.versions.push_front((\n            client_type.into(),\n            version.into(),\n            comments.map(|c| c.into()),\n        ));\n        self\n    }\n\n    /// Begins a new ngrok [Session] by connecting to the ngrok service.\n    /// `connect` blocks until the session is successfully established or fails with\n    /// an error.\n    pub async fn connect(&self) -> Result<Session, ConnectError> {\n        let (dropref, dropped) = awaitdrop::awaitdrop();\n        let (inner, mut incoming) = self.connect_inner(None).await?;\n\n        let rt = inner.runtime.clone();\n\n        let inner = Arc::new(ArcSwap::new(inner.into()));\n\n        let session = Session {\n            _dropref: dropref,\n            inner: inner.clone(),\n        };\n\n        // store the session for use with StopTunnel\n        incoming.session = Some(session.clone());\n\n        rt.spawn(future::select(\n            accept_incoming(incoming, inner).boxed(),\n            dropped.wait(),\n        ));\n\n        Ok(session)\n    }\n\n    pub(crate) fn get_or_create_tls_config(&self) -> rustls::ClientConfig {\n        // if the user has provided a custom TLS config, use that\n        if let Some(tls_config) = &self.tls_config {\n            return tls_config.clone();\n        }\n        // generate a default TLS config\n        let mut root_store = rustls::RootCertStore::empty();\n        let cert_pem = self.ca_cert.as_ref().map_or(CERT_BYTES, |it| it.as_ref());\n        let certs = rustls_pemfile::read_all(&mut io::Cursor::new(cert_pem))\n            .filter_map(|it| match it {\n                Ok(Item::X509Certificate(bs)) => Some(bs),\n                Err(e) => {\n                    warn!(error = ?e, \"skipping certificate which failed to parse\");\n                    None\n                }\n                Ok(_) => {\n                    warn!(\"skipping non-x509 certificate\");\n                    None\n                }\n            })\n            .collect::<Vec<_>>();\n        root_store.add_parsable_certificates(certs);\n\n        rustls::ClientConfig::builder()\n            .with_root_certificates(root_store)\n            .with_no_client_auth()\n    }\n\n    async fn connect_inner(\n        &self,\n        err: impl Into<Option<AcceptError>>,\n    ) -> Result<(SessionInner, IncomingStreams), ConnectError> {\n        let conn = self\n            .connector\n            .connect(\n                self.server_host.clone(),\n                self.server_port,\n                Arc::new(self.get_or_create_tls_config()),\n                err.into(),\n            )\n            .await?;\n\n        let mut heartbeat_config = HeartbeatConfig::default();\n        if let Some(interval) = self.heartbeat_interval {\n            heartbeat_config.interval = Duration::from_nanos(interval as u64);\n        }\n        if let Some(tolerance) = self.heartbeat_tolerance {\n            heartbeat_config.tolerance = Duration::from_nanos(tolerance as u64);\n        }\n        heartbeat_config.handler = self.heartbeat_handler.clone();\n\n        // convert these while we have ownership\n        let heartbeat_interval = heartbeat_config.interval.as_nanos() as i64;\n        let heartbeat_tolerance = heartbeat_config.tolerance.as_nanos() as i64;\n\n        let mut raw = RawSession::start(conn, heartbeat_config, self.handlers.clone())\n            .await\n            .map_err(ConnectError::Start)?;\n\n        // list of possibilities: https://doc.rust-lang.org/std/env/consts/constant.OS.html\n        let os = match env::consts::OS {\n            \"macos\" => \"darwin\",\n            _ => env::consts::OS,\n        };\n\n        let user_agent = self\n            .versions\n            .iter()\n            .map(|(name, version, comments)| {\n                format!(\n                    \"{}/{}{}\",\n                    sanitize_ua_string(name),\n                    sanitize_ua_string(version),\n                    comments\n                        .as_ref()\n                        .map_or(String::new(), |f| format!(\" ({f})\"))\n                )\n            })\n            .collect::<Vec<_>>()\n            .join(\" \");\n\n        let client_type = self.versions[0].0.clone();\n        let version = self.versions[0].1.clone();\n\n        let resp = raw\n            .auth(\n                self.id.as_deref().unwrap_or_default(),\n                AuthExtra {\n                    version,\n                    client_type,\n                    user_agent,\n                    auth_token: self.authtoken.clone().unwrap_or_default(),\n                    metadata: self.metadata.clone().unwrap_or_default(),\n                    os: os.into(),\n                    arch: std::env::consts::ARCH.into(),\n                    heartbeat_interval,\n                    heartbeat_tolerance,\n                    restart_unsupported_error: self\n                        .handlers\n                        .on_restart\n                        .is_none()\n                        .then_some(NOT_IMPLEMENTED.into())\n                        .or(Some(\"\".into())),\n                    stop_unsupported_error: self\n                        .handlers\n                        .on_stop\n                        .is_none()\n                        .then_some(NOT_IMPLEMENTED.into())\n                        .or(Some(\"\".into())),\n                    update_unsupported_error: self\n                        .handlers\n                        .on_update\n                        .is_none()\n                        .then_some(NOT_IMPLEMENTED.into())\n                        .or(Some(\"\".into())),\n                    cookie: self.cookie.clone().unwrap_or_default(),\n                    ..Default::default()\n                },\n            )\n            .await\n            .map_err(ConnectError::Auth)?;\n\n        let (client, incoming) = raw.split();\n\n        let builder = SessionBuilder {\n            cookie: resp.extra.cookie,\n            id: resp.client_id.into(),\n            ..self.clone()\n        };\n\n        Ok((\n            SessionInner {\n                runtime: Handle::current(),\n                client: client.into(),\n                tunnels: Default::default(),\n                closed: Default::default(),\n                builder,\n            },\n            incoming,\n        ))\n    }\n}\n\nimpl Session {\n    /// Create a new [SessionBuilder] to configure a new ngrok session.\n    pub fn builder() -> SessionBuilder {\n        SessionBuilder::default()\n    }\n\n    /// Start building a tunnel for an HTTP endpoint.\n    ///\n    /// https://ngrok.com/docs/http/\n    pub fn http_endpoint(&self) -> HttpTunnelBuilder {\n        self.clone().into()\n    }\n\n    /// Start building a tunnel for a TCP endpoint.\n    ///\n    /// https://ngrok.com/docs/tcp/\n    pub fn tcp_endpoint(&self) -> TcpTunnelBuilder {\n        self.clone().into()\n    }\n\n    /// Start building a tunnel for a TLS endpoint.\n    ///\n    /// https://ngrok.com/docs/tls/\n    pub fn tls_endpoint(&self) -> TlsTunnelBuilder {\n        self.clone().into()\n    }\n\n    /// Start building a labeled tunnel.\n    ///\n    /// https://ngrok.com/docs/network-edge/edges/#tunnel-group\n    pub fn labeled_tunnel(&self) -> LabeledTunnelBuilder {\n        self.clone().into()\n    }\n\n    /// Get the unique ID of this session.\n    pub fn id(&self) -> String {\n        self.inner\n            .load()\n            .builder\n            .id\n            .as_ref()\n            .expect(\"Session ID not set\")\n            .clone()\n    }\n\n    /// Start a new tunnel in this session.\n    pub(crate) async fn start_tunnel<C>(&self, tunnel_cfg: C) -> Result<TunnelInner, RpcError>\n    where\n        C: TunnelConfig,\n    {\n        let inner = self.inner.load();\n        let mut client = inner.client.lock().await;\n\n        // let tunnelCfg: dyn TunnelConfig = TunnelConfig(opts);\n        let (tx, rx) = channel(64);\n\n        let proto = tunnel_cfg.proto();\n        let opts = tunnel_cfg.opts();\n        let mut extra = tunnel_cfg.extra();\n        let labels = tunnel_cfg.labels();\n        let forwards_to = tunnel_cfg.forwards_to();\n        let forwards_proto = tunnel_cfg.forwards_proto();\n        let verify_upstream_tls = tunnel_cfg.verify_upstream_tls();\n\n        // non-labeled tunnel\n        let (tunnel, bound) = if tunnel_cfg.proto() != \"\" {\n            let resp = client\n                .listen(\n                    &proto,\n                    opts.clone().unwrap(), // this is crate-defined, and must exist if proto is non-empty\n                    extra.clone(),\n                    \"\",\n                    &forwards_to,\n                    &forwards_proto,\n                )\n                .await?;\n\n            extra.token = resp.extra.token;\n            let info = TunnelInnerInfo {\n                id: resp.client_id,\n                proto: resp.proto.clone(),\n                url: resp.url,\n                labels: HashMap::new(),\n                forwards_to: tunnel_cfg.forwards_to(),\n                metadata: extra.metadata.clone(),\n            };\n\n            (\n                TunnelInner {\n                    info,\n                    session: self.clone(),\n                    incoming: rx.into(),\n                },\n                BoundTunnel {\n                    proto: resp.proto,\n                    opts: resp.bind_opts.into(),\n                    extra,\n                    labels,\n                    forwards_to,\n                    forwards_proto,\n                    verify_upstream_tls,\n                    tx,\n                },\n            )\n        } else {\n            // labeled tunnel\n            let resp = client\n                .listen_label(\n                    labels.clone(),\n                    &extra.metadata,\n                    &forwards_to,\n                    &forwards_proto,\n                )\n                .await?;\n\n            let info = TunnelInnerInfo {\n                id: resp.id,\n                proto: Default::default(),\n                url: Default::default(),\n                labels: tunnel_cfg.labels(),\n                forwards_to: tunnel_cfg.forwards_to(),\n                metadata: extra.metadata.clone(),\n            };\n\n            (\n                TunnelInner {\n                    info,\n                    session: self.clone(),\n                    incoming: rx.into(),\n                },\n                BoundTunnel {\n                    extra,\n                    proto: Default::default(),\n                    opts: Default::default(),\n                    forwards_to,\n                    forwards_proto,\n                    verify_upstream_tls,\n                    labels,\n                    tx,\n                },\n            )\n        };\n\n        let mut tunnels = inner.tunnels.write().await;\n        tunnels.insert(tunnel.info.id.clone(), bound);\n\n        Ok(tunnel)\n    }\n\n    /// Close a tunnel with an error from the remote.\n    /// Skips the call to unlisten, since the remote has already rejected it.\n    pub(crate) async fn close_tunnel_with_error(&self, id: impl AsRef<str>, err: AcceptError) {\n        let id = id.as_ref();\n        let inner = self.inner.load();\n        if let Some(tun) = inner.tunnels.write().await.remove(id) {\n            let _ = tun.tx.send(Err(err)).await;\n        };\n    }\n\n    /// Close a tunnel with the given ID.\n    pub async fn close_tunnel(&self, id: impl AsRef<str>) -> Result<(), RpcError> {\n        let id = id.as_ref();\n        let inner = self.inner.load();\n        inner.client.lock().await.unlisten(id).await?;\n        inner.tunnels.write().await.remove(id);\n        Ok(())\n    }\n\n    pub(crate) fn runtime(&self) -> Handle {\n        self.inner.load().runtime.clone()\n    }\n\n    /// Close the ngrok session.\n    pub async fn close(&mut self) -> Result<(), RpcError> {\n        let inner = self.inner.load();\n        let res = inner.client.lock().await.close().await;\n        inner.closed.store(true, Ordering::SeqCst);\n        res\n    }\n}\n\npub(crate) fn host_certs_tls_config() -> Result<rustls::ClientConfig, &'static io::Error> {\n    // The root certificate store, lazily loaded once.\n    static ROOT_STORE: Lazy<Result<RootCertStore, io::Error>> = Lazy::new(|| {\n        let der_certs = rustls_native_certs::load_native_certs()?\n            .into_iter()\n            .collect::<Vec<_>>();\n        let mut root_store = RootCertStore::empty();\n        root_store.add_parsable_certificates(der_certs);\n        Ok(root_store)\n    });\n\n    let root_store = ROOT_STORE.as_ref()?;\n    Ok(rustls::ClientConfig::builder()\n        .with_root_certificates(root_store.clone())\n        .with_no_client_auth())\n}\n\nasync fn accept_one(\n    incoming: &mut IncomingStreams,\n    inner: &ArcSwap<SessionInner>,\n) -> Result<(), AcceptError> {\n    let conn = match incoming.accept().await {\n        Ok(conn) => conn,\n        // Assume if we got a muxado error, the session is borked. Break and\n        // propagate the error to all of the tunnels out in the wild.\n        Err(RawAcceptError::Transport(error)) => return Err(error.into()),\n        // The other errors are either a bad header or an unrecognized\n        // stream type. They're non-fatal, but could signal a protocol\n        // mismatch.\n        Err(error) => {\n            warn!(?error, \"protocol error when accepting tunnel connection\");\n            return Ok(());\n        }\n    };\n    let id = conn.header.id.clone();\n    let remote_addr = conn.header.client_addr.parse().unwrap_or_else(|error| {\n        warn!(\n            client_addr = conn.header.client_addr,\n            %error,\n            \"invalid remote addr for tunnel connection\",\n        );\n        \"0.0.0.0:0\".parse().unwrap()\n    });\n    let inner = inner.load();\n    let guard = inner.tunnels.read().await;\n    let res = if let Some(tun) = guard.get(&id) {\n        let mut header = conn.header;\n        let app_protocol = Some(tun.forwards_proto.to_string()).filter(|s| !s.is_empty());\n        let verify_upstream_tls = tun.verify_upstream_tls;\n        // Note: this is a bit of a hack. Normally, passthrough_tls is only\n        // a thing on edge connections, but we're making sure it's set for\n        // endpoint connections as well. In their case, we have to look at the\n        // options used to bind the endpoint.\n        if let Some(BindOpts::Tls(opts)) = &tun.opts {\n            header.passthrough_tls = opts.tls_termination.is_none();\n        }\n        let proxy_proto = if let Some(\n            BindOpts::Tls(TlsEndpoint { proxy_proto, .. })\n            | BindOpts::Http(HttpEndpoint { proxy_proto, .. })\n            | BindOpts::Tcp(TcpEndpoint { proxy_proto, .. }),\n        ) = tun.opts\n        {\n            proxy_proto\n        } else {\n            ProxyProto::None\n        };\n        tun.tx\n            .send(Ok(ConnInner {\n                info: crate::conn::Info {\n                    app_protocol,\n                    verify_upstream_tls,\n                    remote_addr,\n                    header,\n                    proxy_proto,\n                },\n                stream: conn.stream,\n            }))\n            .await\n    } else {\n        Ok(())\n    };\n    drop(guard);\n    if res.is_err() {\n        RwLock::write(&inner.tunnels).await.remove(&id);\n    }\n    Ok(())\n}\n\nasync fn try_reconnect(\n    inner: Arc<ArcSwap<SessionInner>>,\n    err: impl Into<Option<AcceptError>>,\n) -> Result<IncomingStreams, ConnectError> {\n    let old_inner = inner.load();\n    if old_inner.closed.load(Ordering::SeqCst) {\n        return Err(ConnectError::Canceled);\n    }\n    let (new_inner, new_incoming) = old_inner.builder.connect_inner(err).await?;\n    let mut client = new_inner.client.lock().await;\n    let mut new_tunnels = new_inner.tunnels.write().await;\n    let old_tunnels = old_inner.tunnels.read().await;\n\n    for (id, tun) in old_tunnels.iter() {\n        if !tun.proto.is_empty() {\n            let resp = client\n                .listen(\n                    &tun.proto,\n                    tun.opts.clone().unwrap(),\n                    tun.extra.clone(),\n                    id,\n                    &tun.forwards_to,\n                    &tun.forwards_proto,\n                )\n                .await\n                .map_err(ConnectError::Rebind)?;\n            debug!(?resp, %id, %tun.proto, ?tun.opts, ?tun.extra, %tun.forwards_to, \"rebound tunnel\");\n            new_tunnels.insert(id.clone(), tun.clone());\n        } else {\n            let resp = client\n                .listen_label(\n                    tun.labels.clone(),\n                    &tun.extra.metadata,\n                    &tun.forwards_to,\n                    &tun.forwards_proto,\n                )\n                .await\n                .map_err(ConnectError::Rebind)?;\n\n            if !resp.id.is_empty() {\n                new_tunnels.insert(resp.id, tun.clone());\n            } else {\n                new_tunnels.insert(id.clone(), tun.clone());\n            }\n        }\n    }\n\n    drop(old_tunnels);\n    drop(client);\n    drop(new_tunnels);\n    inner.store(new_inner.into());\n\n    Ok(new_incoming)\n}\n\nasync fn accept_incoming(mut incoming: IncomingStreams, inner: Arc<ArcSwap<SessionInner>>) {\n    let error: AcceptError = loop {\n        if let Err(error) = accept_one(&mut incoming, &inner).await {\n            debug!(%error, \"failed to accept stream, attempting reconnect\");\n            // This is gross, but should perform fine. Couple of notes:\n            // * Mutex so that both the action and condition can share access to\n            //   `error`. Realistically, the lock calls should be non-concurrent,\n            //   but Rust can't prove that.\n            // * Not setting the error in the action because then a a reference\n            //   to a FnMut closure would escape via the returned Future, which is\n            //   a no-no.\n            let error = parking_lot::Mutex::new(Some(error));\n            let reconnect = RetryIf::spawn(\n                ExponentialBackoff::from_millis(50),\n                || try_reconnect(inner.clone(), error.lock().clone()).map_err(Arc::new),\n                |err: &Arc<ConnectError>| {\n                    if let ConnectError::Canceled = **err {\n                        false\n                    } else {\n                        *error.lock() = Some(AcceptError::Reconnect(err.clone()));\n                        true\n                    }\n                },\n            );\n            incoming = match reconnect.await {\n                Ok(incoming) => incoming,\n                Err(error) => {\n                    debug!(%error, \"reconnect failed, giving up\");\n                    break AcceptError::Reconnect(error);\n                }\n            };\n        }\n    };\n    for (_id, tun) in inner.load().tunnels.write().await.drain() {\n        let _ = tun.tx.send(Err(error.clone())).await;\n    }\n}\n\n#[cfg(test)]\nmod test {\n    use super::*;\n\n    #[test]\n    fn test_sanitize_ua() {\n        assert_eq!(\n            sanitize_ua_string(\"library/official/rust\"),\n            \"library-official-rust\"\n        );\n        assert_eq!(\n            sanitize_ua_string(\"something@really☺weird\"),\n            \"something#really#weird\"\n        );\n    }\n}\n"
  },
  {
    "path": "ngrok/src/tunnel.rs",
    "content": "use std::{\n    collections::HashMap,\n    pin::Pin,\n    sync::Arc,\n    task::{\n        Context,\n        Poll,\n    },\n};\n\nuse async_trait::async_trait;\nuse futures::Stream;\nuse muxado::Error as MuxadoError;\nuse thiserror::Error;\nuse tokio::sync::mpsc::Receiver;\n\nuse crate::{\n    config::{\n        HttpTunnelBuilder,\n        LabeledTunnelBuilder,\n        TcpTunnelBuilder,\n        TlsTunnelBuilder,\n    },\n    conn::{\n        ConnInner,\n        EdgeConn,\n        EndpointConn,\n    },\n    internals::raw_session::RpcError,\n    session::ConnectError,\n    Session,\n};\n\n/// Errors arising when accepting a [Conn] from an ngrok tunnel.\n#[derive(Error, Debug, Clone)]\n#[non_exhaustive]\npub enum AcceptError {\n    /// An error occurred in the underlying transport protocol.\n    #[error(\"transport error\")]\n    Transport(#[from] MuxadoError),\n    /// An error arose during reconnect\n    #[error(\"reconnect error\")]\n    Reconnect(#[from] Arc<ConnectError>),\n    /// The listener was closed.\n    #[error(\"listener closed: {message}{}\", error_code.clone().map(|s| format!(\", {s}\")).unwrap_or_else(String::new))]\n    ListenerClosed {\n        /// The error message.\n        message: String,\n        /// The error code, if any.\n        error_code: Option<String>,\n    },\n}\n\n#[derive(Clone)]\npub(crate) struct TunnelInnerInfo {\n    pub(crate) id: String,\n    pub(crate) proto: String,\n    pub(crate) url: String,\n    pub(crate) labels: HashMap<String, String>,\n    pub(crate) forwards_to: String,\n    pub(crate) metadata: String,\n}\n\npub(crate) struct TunnelInner {\n    pub(crate) info: TunnelInnerInfo,\n    pub(crate) incoming: Option<Receiver<Result<ConnInner, AcceptError>>>,\n\n    // Note: this session field is also used to detect tunnel liveness for the\n    // purposes of shutting down the accept loop. If it's ever removed, an\n    // awaitdrop::Ref field needs to be added that's derived from the one\n    // belonging to the session.\n    pub(crate) session: Session,\n}\n\nimpl Drop for TunnelInner {\n    fn drop(&mut self) {\n        let id = self.id().to_string();\n        let sess = self.session.clone();\n        let rt = sess.runtime();\n        rt.spawn(async move { sess.close_tunnel(&id).await });\n    }\n}\n\n// This codgen indirect is required to make the hyper \"Accept\" trait bound\n// dependent on the hyper feature. You can't put a #[cfg] on a single bound, so\n// we're putting the whole trait def in a macro. Gross, but gets the job done.\nmacro_rules! tunnel_trait {\n    ($($hyper_bound:tt)*) => {\n        /// An ngrok tunnel.\n        ///\n        /// ngrok [Tunnel]s act like TCP listeners and can be used as a\n        /// [futures::stream::TryStream] of [Conn]ections from endpoints created on the ngrok\n        /// service.\n        pub trait Tunnel:\n            Stream<Item = Result<<Self as Tunnel>::Conn, AcceptError>>\n            + TunnelInfo\n            + TunnelCloser\n            $($hyper_bound)*\n            + Unpin\n            + Send\n            + 'static\n        {\n            /// The type of connection associated with this tunnel type.\n            /// Agent-initiated http, tls, and tcp tunnels all produce\n            /// `EndpointConn`s, while labeled tunnels produce `EdgeConn`s.\n            type Conn: crate::Conn;\n        }\n\n        /// Information associated with an ngrok tunnel.\n        pub trait TunnelInfo {\n            /// Returns a tunnel's unique ID.\n            fn id(&self) -> &str;\n            /// Returns a human-readable string presented in the ngrok dashboard\n            /// and the Tunnels API. Use the [HttpTunnelBuilder::forwards_to],\n            /// [TcpTunnelBuilder::forwards_to], etc. to set this value\n            /// explicitly.\n            fn forwards_to(&self) -> &str;\n\n            /// Returns the arbitrary metadata string for this tunnel.\n            fn metadata(&self) -> &str;\n        }\n\n        /// An ngrok tunnel closer.\n        #[async_trait]\n        pub trait TunnelCloser {\n            /// Close the tunnel.\n            ///\n            /// This is an RPC call that must be `.await`ed.\n            /// It is equivalent to calling `Session::close_tunnel` with this\n            /// tunnel's ID.\n            ///\n            /// If the tunnel is dropped, a task will be spawned to close it\n            /// asynchronously.\n            async fn close(&mut self) -> Result<(), RpcError>;\n        }\n    }\n}\n\ntunnel_trait!();\n\n/// An ngrok tunnel backing a simple endpoint.\n/// Most agent-configured tunnels fall into this category, with the exception of\n/// labeled tunnels.\npub trait EndpointInfo {\n    /// Returns the tunnel endpoint's URL.\n    fn url(&self) -> &str;\n\n    /// Returns the protocol of the tunnel's endpoint.\n    fn proto(&self) -> &str;\n}\n\n/// An ngrok tunnel backing an edge.\n/// Since labels may be dynamically defined via the dashboard or API, the url\n/// and protocol for the tunnel is not knowable ahead of time.\npub trait EdgeInfo {\n    /// Returns the labels that the tunnel was started with.\n    fn labels(&self) -> &HashMap<String, String>;\n}\n\nimpl Stream for TunnelInner {\n    type Item = Result<ConnInner, AcceptError>;\n\n    fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {\n        self.incoming\n            .as_mut()\n            .expect(\"tunnel inner lacks a receiver\")\n            .poll_recv(cx)\n    }\n}\n\nimpl TunnelInner {\n    /// Get this tunnel's ID as returned by the ngrok server.\n    pub fn id(&self) -> &str {\n        &self.info.id\n    }\n\n    /// Get the URL for this tunnel.\n    /// Labeled tunnels will return an empty string.\n    pub fn url(&self) -> &str {\n        &self.info.url\n    }\n\n    /// Close the tunnel.\n    /// This is an RPC call and needs to be `.await`ed.\n    pub async fn close(&mut self) -> Result<(), RpcError> {\n        self.session.close_tunnel(self.id()).await?;\n        if let Some(r) = self.incoming.as_mut() {\n            r.close()\n        }\n        Ok(())\n    }\n\n    /// Get the protocol that this tunnel uses.\n    pub fn proto(&self) -> &str {\n        &self.info.proto\n    }\n\n    /// Get the labels this tunnel was started with.\n    /// The returned [`HashMap`] will be empty for non-labeled tunnels.\n    pub fn labels(&self) -> &HashMap<String, String> {\n        &self.info.labels\n    }\n\n    /// Get the address that this tunnel says it forwards to.\n    pub fn forwards_to(&self) -> &str {\n        &self.info.forwards_to\n    }\n\n    /// Get the user-supplied metadata for this tunnel.\n    pub fn metadata(&self) -> &str {\n        &self.info.metadata\n    }\n\n    /// Split the tunnel into two parts - the first contains the listener and\n    /// all tunnel information, and the second contains *only* the information.\n    pub(crate) fn make_info(&self) -> TunnelInner {\n        TunnelInner {\n            info: self.info.clone(),\n            incoming: None,\n            session: self.session.clone(),\n        }\n    }\n}\n\nmacro_rules! make_tunnel_type {\n    ($(#[$outer:meta])* $wrapper:ident, $builder:tt, $conn:tt, $($m:tt),*) => {\n        $(#[$outer])*\n        pub struct $wrapper {\n            pub(crate) inner: TunnelInner,\n        }\n\n        impl $wrapper {\n            /// Split this tunnel type into two parts - both of which have all\n            /// tunnel information, but only the former can be used as a\n            /// listener. Attempts to accept connections on the later will fail.\n            pub(crate) fn make_info(&self) -> $wrapper {\n                $wrapper {\n                    inner: self.inner.make_info(),\n                }\n            }\n        }\n\n        impl Tunnel for $wrapper {\n            type Conn = $conn;\n        }\n\n        impl TunnelInfo for $wrapper {\n            fn id(&self) -> &str {\n                self.inner.id()\n            }\n\n            fn forwards_to(&self) -> &str {\n                self.inner.forwards_to()\n            }\n\n            fn metadata(&self) -> &str {\n                self.inner.metadata()\n            }\n        }\n\n        #[async_trait]\n        impl TunnelCloser for $wrapper {\n            async fn close(&mut self) -> Result<(), RpcError> {\n                self.inner.close().await\n            }\n        }\n\n        impl $wrapper {\n            /// Create a builder for this tunnel type.\n            pub fn builder(session: Session) -> $builder {\n                $builder::from(session)\n            }\n        }\n\n        $(\n            make_tunnel_type!($m; $wrapper);\n        )*\n\n        impl Stream for $wrapper {\n            type Item = Result<$conn, AcceptError>;\n\n            fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {\n                Pin::new(&mut self.inner).poll_next(cx).map(|o| o.map(|r| r.map(|c| $conn { inner: c })))\n            }\n        }\n    };\n    (endpoint; $wrapper:ty) => {\n        impl EndpointInfo for $wrapper {\n            fn url(&self) -> &str {\n                self.inner.url()\n            }\n            fn proto(&self) -> &str {\n                self.inner.proto()\n            }\n        }\n    };\n    (edge; $wrapper:ty) => {\n        impl EdgeInfo for $wrapper {\n            fn labels(&self) -> &HashMap<String, String> {\n                self.inner.labels()\n            }\n        }\n    };\n}\n\nmake_tunnel_type! {\n    /// An ngrok tunnel for an HTTP endpoint.\n    HttpTunnel, HttpTunnelBuilder, EndpointConn, endpoint\n}\nmake_tunnel_type! {\n    /// An ngrok tunnel for a TCP endpoint.\n    TcpTunnel, TcpTunnelBuilder, EndpointConn, endpoint\n}\nmake_tunnel_type! {\n    /// An ngrok tunnel for a TLS endpoint.\n    TlsTunnel, TlsTunnelBuilder, EndpointConn, endpoint\n}\nmake_tunnel_type! {\n    /// A labeled ngrok tunnel.\n    LabeledTunnel, LabeledTunnelBuilder, EdgeConn, edge\n}\n"
  },
  {
    "path": "ngrok/src/tunnel_ext.rs",
    "content": "use std::{\n    collections::HashMap,\n    io,\n    sync::Arc,\n};\n#[cfg(feature = \"hyper\")]\nuse std::{\n    convert::Infallible,\n    fmt,\n};\n\nuse async_trait::async_trait;\nuse bitflags::bitflags;\nuse futures::stream::TryStreamExt;\nuse futures_rustls::rustls::{\n    self,\n    pki_types,\n    ClientConfig,\n};\n#[cfg(feature = \"hyper\")]\nuse hyper::{\n    server::conn::http1,\n    service::service_fn,\n    Response,\n    StatusCode,\n};\nuse once_cell::sync::Lazy;\nuse proxy_protocol::ProxyHeader;\nuse tokio::{\n    io::copy_bidirectional,\n    net::TcpStream,\n    task::JoinHandle,\n};\nuse tokio_util::compat::{\n    FuturesAsyncReadCompatExt,\n    TokioAsyncReadCompatExt,\n};\n#[cfg(feature = \"hyper\")]\nuse tracing::debug;\nuse tracing::{\n    field,\n    warn,\n    Instrument,\n    Span,\n};\nuse url::Url;\n\nuse crate::{\n    prelude::*,\n    proxy_proto,\n    session::IoStream,\n    EdgeConn,\n    EndpointConn,\n};\n\n#[allow(deprecated)]\n#[async_trait]\nimpl<T> TunnelExt for T\nwhere\n    T: Tunnel + Send,\n    <T as Tunnel>::Conn: ConnExt,\n{\n    async fn forward(&mut self, url: Url) -> Result<(), io::Error> {\n        forward_tunnel(self, url).await\n    }\n}\n\n/// Extension methods auto-implemented for all tunnel types\n#[async_trait]\n#[deprecated = \"superceded by the `listen_and_forward` builder method\"]\npub trait TunnelExt: Tunnel + Send {\n    /// Forward incoming tunnel connections to the provided url based on its\n    /// scheme.\n    /// This currently supports http, https, tls, and tcp on all platforms, unix\n    /// sockets on unix platforms, and named pipes on Windows via the \"pipe\"\n    /// scheme.\n    ///\n    /// Unix socket URLs can be formatted as `unix://path/to/socket` or\n    /// `unix:path/to/socket` for relative paths or as `unix:///path/to/socket` or\n    /// `unix:/path/to/socket` for absolute paths.\n    ///\n    /// Windows named pipe URLs can be formatted as `pipe:mypipename` or\n    /// `pipe://host/mypipename`. If no host is provided, as with\n    /// `pipe:///mypipename` or `pipe:/mypipename`, the leading slash will be\n    /// preserved.\n    async fn forward(&mut self, url: Url) -> Result<(), io::Error>;\n}\n\npub(crate) trait ConnExt {\n    fn forward_to(self, url: &Url) -> JoinHandle<io::Result<()>>;\n}\n\n#[tracing::instrument(skip_all, fields(tunnel_id = tun.id(), url = %url))]\npub(crate) async fn forward_tunnel<T>(tun: &mut T, url: Url) -> Result<(), io::Error>\nwhere\n    T: Tunnel + 'static + ?Sized,\n    <T as Tunnel>::Conn: ConnExt,\n{\n    loop {\n        let tunnel_conn = if let Some(conn) = tun\n            .try_next()\n            .await\n            .map_err(|err| io::Error::new(io::ErrorKind::NotConnected, err))?\n        {\n            conn\n        } else {\n            return Ok(());\n        };\n\n        tunnel_conn.forward_to(&url);\n    }\n}\n\nimpl ConnExt for EdgeConn {\n    fn forward_to(mut self, url: &Url) -> JoinHandle<io::Result<()>> {\n        let url = url.clone();\n        tokio::spawn(async move {\n            let mut upstream = match connect(\n                self.edge_type() == EdgeType::Tls && self.passthrough_tls(),\n                self.inner.info.verify_upstream_tls,\n                self.inner.info.app_protocol.clone(),\n                None, // Edges don't support proxyproto (afaik)\n                &url,\n            )\n            .await\n            {\n                Ok(conn) => conn,\n                Err(error) => {\n                    #[cfg(feature = \"hyper\")]\n                    if self.edge_type() == EdgeType::Https {\n                        serve_gateway_error(format!(\"{error}\"), self);\n                    }\n                    warn!(%error, \"error connecting to upstream\");\n                    return Err(error);\n                }\n            };\n\n            copy_bidirectional(&mut self, &mut upstream).await?;\n            Ok(())\n        })\n    }\n}\n\nimpl ConnExt for EndpointConn {\n    fn forward_to(self, url: &Url) -> JoinHandle<Result<(), io::Error>> {\n        let url = url.clone();\n        tokio::spawn(async move {\n            let proxy_proto = self.inner.info.proxy_proto;\n            let proto_tls = self.proto() == \"tls\";\n            #[cfg(feature = \"hyper\")]\n            let proto_http = matches!(self.proto(), \"http\" | \"https\");\n            let passthrough_tls = self.inner.info.passthrough_tls();\n            let app_protocol = self.inner.info.app_protocol.clone();\n            let verify_upstream_tls = self.inner.info.verify_upstream_tls;\n\n            let (mut stream, proxy_header) = match proxy_proto {\n                ProxyProto::None => (crate::proxy_proto::Stream::disabled(self), None),\n                _ => {\n                    let mut stream = crate::proxy_proto::Stream::incoming(self);\n                    let header = stream\n                        .proxy_header()\n                        .await?\n                        .map_err(|e| {\n                            io::Error::new(\n                                io::ErrorKind::InvalidData,\n                                format!(\"invalid proxy-protocol header: {}\", e),\n                            )\n                        })?\n                        .cloned();\n                    (stream, header)\n                }\n            };\n\n            let mut upstream = match connect(\n                proto_tls && passthrough_tls,\n                verify_upstream_tls,\n                app_protocol,\n                proxy_header,\n                &url,\n            )\n            .await\n            {\n                Ok(conn) => conn,\n                Err(error) => {\n                    #[cfg(feature = \"hyper\")]\n                    if proto_http {\n                        serve_gateway_error(format!(\"{error}\"), stream);\n                    }\n                    warn!(%error, \"error connecting to upstream\");\n                    return Err(error);\n                }\n            };\n\n            copy_bidirectional(&mut stream, &mut upstream).await?;\n            Ok(())\n        })\n    }\n}\n\nbitflags! {\n    struct TlsFlags: u8 {\n        const FLAG_HTTP2       = 0b01;\n        const FLAG_verify_upstream_tls       = 0b10;\n        const FLAG_MAX     = Self::FLAG_HTTP2.bits()\n                           | Self::FLAG_verify_upstream_tls.bits();\n    }\n}\n\nstatic NO_CRYPTO_PROVIDER_ERROR: Lazy<io::Error> = Lazy::new(|| {\n    io::Error::new(\n        io::ErrorKind::NotFound,\n        \"no default CryptoProvider installed\",\n    )\n});\n\nfn tls_config(\n    app_protocol: Option<String>,\n    verify_upstream_tls: bool,\n) -> Result<Arc<ClientConfig>, &'static io::Error> {\n    // A hashmap of tls client configs for different configurations.\n    // There won't need to be a lot of variation among these, and we'll want to\n    // reuse them as much as we can, which is why we initialize them all once\n    // and then pull out the one we need.\n    // Disabling the lint because this is a local static that doesn't escape the\n    // enclosing context. It fine.\n    #[allow(clippy::type_complexity)]\n    static CONFIGS: Lazy<Result<HashMap<u8, Arc<ClientConfig>>, &'static io::Error>> =\n        Lazy::new(|| {\n            std::ops::Range {\n                start: 0,\n                end: TlsFlags::FLAG_MAX.bits() + 1,\n            }\n            .map(|p| {\n                let http2 = (p & TlsFlags::FLAG_HTTP2.bits()) != 0;\n                let verify_upstream_tls = (p & TlsFlags::FLAG_verify_upstream_tls.bits()) != 0;\n                let mut config = crate::session::host_certs_tls_config()?;\n                if !verify_upstream_tls {\n                    let provider = rustls::crypto::CryptoProvider::get_default()\n                        .ok_or(&*NO_CRYPTO_PROVIDER_ERROR)?\n                        .as_ref()\n                        .clone();\n                    config.dangerous().set_certificate_verifier(Arc::new(\n                        danger::NoCertificateVerification::new(provider),\n                    ));\n                }\n\n                if http2 {\n                    config\n                        .alpn_protocols\n                        .extend([\"h2\", \"http/1.1\"].iter().map(|s| s.as_bytes().to_vec()));\n                }\n                Ok((p, Arc::new(config)))\n            })\n            .collect()\n        });\n\n    let configs: &HashMap<u8, Arc<ClientConfig>> = CONFIGS.as_ref().map_err(|e| *e)?;\n    let mut key = 0;\n    if Some(\"http2\").eq(&app_protocol.as_deref()) {\n        key |= TlsFlags::FLAG_HTTP2.bits();\n    }\n    if verify_upstream_tls {\n        key |= TlsFlags::FLAG_verify_upstream_tls.bits();\n    }\n\n    Ok(configs\n        .get(&key)\n        .or_else(|| configs.get(&0))\n        .unwrap()\n        .clone())\n}\n\n// Establish the connection to forward the tunnel stream to.\n// Takes the tunnel and connection to make additional decisions on how to wrap\n// the forwarded connection, i.e. reordering tls termination and proxyproto.\n// Note: this additional wrapping logic currently unimplemented.\nasync fn connect(\n    tunnel_tls: bool,\n    verify_upstream_tls: bool,\n    app_protocol: Option<String>,\n    proxy_proto_header: Option<ProxyHeader>,\n    url: &Url,\n) -> Result<Box<dyn IoStream>, io::Error> {\n    let host = url.host_str().unwrap_or(\"localhost\");\n    let mut backend_tls: bool = false;\n    let mut conn: Box<dyn IoStream> = match url.scheme() {\n        \"tcp\" => {\n            let port = url.port().ok_or_else(|| {\n                io::Error::new(\n                    io::ErrorKind::InvalidInput,\n                    format!(\"missing port for tcp forwarding url {url}\"),\n                )\n            })?;\n            let conn = connect_tcp(host, port).in_current_span().await?;\n            Box::new(conn)\n        }\n\n        \"http\" => {\n            let port = url.port().unwrap_or(80);\n            let conn = connect_tcp(host, port).in_current_span().await?;\n            Box::new(conn)\n        }\n\n        \"https\" | \"tls\" => {\n            let port = url.port().unwrap_or(443);\n            let conn = connect_tcp(host, port).in_current_span().await?;\n\n            backend_tls = true;\n            Box::new(conn)\n        }\n\n        #[cfg(not(target_os = \"windows\"))]\n        \"unix\" => {\n            use std::borrow::Cow;\n\n            use tokio::net::UnixStream;\n\n            let mut addr = Cow::Borrowed(url.path());\n            if let Some(host) = url.host_str() {\n                // note: if host exists, there should always be a leading / in\n                // the path, but we should consider it a relative path.\n                addr = Cow::Owned(format!(\"{host}{addr}\"));\n            }\n            Box::new(UnixStream::connect(&*addr).await?)\n        }\n\n        #[cfg(target_os = \"windows\")]\n        \"pipe\" => {\n            use std::time::Duration;\n\n            use tokio::net::windows::named_pipe::ClientOptions;\n            use windows_sys::Win32::Foundation::ERROR_PIPE_BUSY;\n\n            let mut pipe_name = url.path();\n            if url.host_str().is_some() {\n                pipe_name = pipe_name.strip_prefix('/').unwrap_or(pipe_name);\n            }\n            if pipe_name.is_empty() {\n                return Err(io::Error::new(\n                    io::ErrorKind::InvalidInput,\n                    format!(\"missing pipe name in forwarding url {url}\"),\n                ));\n            }\n            let host = url\n                .host_str()\n                // Consider localhost to mean \".\" for the pipe name\n                .map(|h| if h == \"localhost\" { \".\" } else { h })\n                .unwrap_or(\".\");\n            // Finally, assemble the full name.\n            let addr = format!(\"\\\\\\\\{host}\\\\pipe\\\\{pipe_name}\");\n            // loop behavior copied from docs\n            // https://docs.rs/tokio/latest/tokio/net/windows/named_pipe/struct.NamedPipeClient.html\n            let local_conn = loop {\n                match ClientOptions::new().open(&addr) {\n                    Ok(client) => break client,\n                    Err(error) if error.raw_os_error() == Some(ERROR_PIPE_BUSY as i32) => (),\n                    Err(error) => return Err(error),\n                }\n\n                tokio::time::sleep(Duration::from_millis(50)).await;\n            };\n            Box::new(local_conn)\n        }\n        _ => {\n            return Err(io::Error::new(\n                io::ErrorKind::InvalidInput,\n                format!(\"unrecognized scheme in forwarding url: {url}\"),\n            ))\n        }\n    };\n\n    // We have to write the proxy header _before_ tls termination\n    if let Some(header) = proxy_proto_header {\n        conn = Box::new(\n            proxy_proto::Stream::outgoing(conn, header)\n                .expect(\"re-serializing proxy header should always succeed\"),\n        )\n    };\n\n    if backend_tls && !tunnel_tls {\n        let domain = pki_types::ServerName::try_from(host)\n            .map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))?\n            .to_owned();\n        conn = Box::new(\n            futures_rustls::TlsConnector::from(\n                tls_config(app_protocol, verify_upstream_tls).map_err(|e| e.kind())?,\n            )\n            .connect(domain, conn.compat())\n            .await?\n            .compat(),\n        )\n    }\n\n    // TODO: header rewrites?\n\n    Ok(conn)\n}\n\nasync fn connect_tcp(host: &str, port: u16) -> Result<TcpStream, io::Error> {\n    let conn = TcpStream::connect(&format!(\"{}:{}\", host, port)).await?;\n    if let Ok(addr) = conn.peer_addr() {\n        Span::current().record(\"forward_addr\", field::display(addr));\n    }\n    Ok(conn)\n}\n\n#[cfg(feature = \"hyper\")]\nfn serve_gateway_error(\n    err: impl fmt::Display + Send + 'static,\n    conn: impl hyper::rt::Read + hyper::rt::Write + Unpin + Send + 'static,\n) -> JoinHandle<()> {\n    tokio::spawn(\n        async move {\n            let service = service_fn(move |_req| {\n                debug!(\"serving bad gateway error\");\n                let mut resp = Response::new(format!(\"failed to dial backend: {err}\"));\n                *resp.status_mut() = StatusCode::BAD_GATEWAY;\n                futures::future::ok::<_, Infallible>(resp)\n            });\n\n            let res = http1::Builder::new()\n                .keep_alive(false)\n                .serve_connection(conn, service)\n                .await;\n            debug!(?res, \"connection closed\");\n        }\n        .in_current_span(),\n    )\n}\n\n// https://github.com/rustls/rustls/blob/main/examples/src/bin/tlsclient-mio.rs#L334\nmod danger {\n    use futures_rustls::rustls;\n    use rustls::{\n        client::danger::HandshakeSignatureValid,\n        crypto::{\n            verify_tls12_signature,\n            verify_tls13_signature,\n            CryptoProvider,\n        },\n        DigitallySignedStruct,\n    };\n\n    use super::pki_types::{\n        CertificateDer,\n        ServerName,\n        UnixTime,\n    };\n\n    #[derive(Debug)]\n    pub struct NoCertificateVerification(CryptoProvider);\n\n    impl NoCertificateVerification {\n        pub fn new(provider: CryptoProvider) -> Self {\n            Self(provider)\n        }\n    }\n\n    impl rustls::client::danger::ServerCertVerifier for NoCertificateVerification {\n        fn verify_server_cert(\n            &self,\n            _end_entity: &CertificateDer<'_>,\n            _intermediates: &[CertificateDer<'_>],\n            _server_name: &ServerName<'_>,\n            _ocsp: &[u8],\n            _now: UnixTime,\n        ) -> Result<rustls::client::danger::ServerCertVerified, rustls::Error> {\n            Ok(rustls::client::danger::ServerCertVerified::assertion())\n        }\n\n        fn verify_tls12_signature(\n            &self,\n            message: &[u8],\n            cert: &CertificateDer<'_>,\n            dss: &DigitallySignedStruct,\n        ) -> Result<HandshakeSignatureValid, rustls::Error> {\n            verify_tls12_signature(\n                message,\n                cert,\n                dss,\n                &self.0.signature_verification_algorithms,\n            )\n        }\n\n        fn verify_tls13_signature(\n            &self,\n            message: &[u8],\n            cert: &CertificateDer<'_>,\n            dss: &DigitallySignedStruct,\n        ) -> Result<HandshakeSignatureValid, rustls::Error> {\n            verify_tls13_signature(\n                message,\n                cert,\n                dss,\n                &self.0.signature_verification_algorithms,\n            )\n        }\n\n        fn supported_verify_schemes(&self) -> Vec<rustls::SignatureScheme> {\n            self.0.signature_verification_algorithms.supported_schemes()\n        }\n    }\n}\n"
  },
  {
    "path": "rustfmt.toml",
    "content": "imports_layout = \"Vertical\"\nimports_granularity = \"Crate\"\ngroup_imports = \"StdExternalCrate\"\nedition = \"2021\""
  }
]