Full Code of marshallrichards/turbodrone for AI

master f1247b582345 cached
102 files
484.0 KB
125.2k tokens
524 symbols
1 requests
Download .txt
Showing preview only (517K chars total). Download the full file or copy to clipboard to get everything.
Repository: marshallrichards/turbodrone
Branch: master
Commit: f1247b582345
Files: 102
Total size: 484.0 KB

Directory structure:
gitextract_1kcujrmi/

├── .gitignore
├── LICENSE
├── NOTICE
├── README.md
├── backend/
│   ├── control/
│   │   └── strategies.py
│   ├── main.py
│   ├── models/
│   │   ├── __init__.py
│   │   ├── base_rc.py
│   │   ├── base_video_model.py
│   │   ├── control_profile.py
│   │   ├── cooingdv_rc.py
│   │   ├── cooingdv_video_model.py
│   │   ├── debug_rc.py
│   │   ├── s2x_rc.py
│   │   ├── s2x_video_model.py
│   │   ├── stick_range.py
│   │   ├── video_frame.py
│   │   ├── wifi_cam_rc.py
│   │   ├── wifi_uav_rc.py
│   │   └── wifi_uav_video_model.py
│   ├── plugins/
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── follow/
│   │   │   ├── __init__.py
│   │   │   ├── follow_controller.py
│   │   │   ├── follow_plugin.py
│   │   │   └── test_yolo.py
│   │   ├── manager.py
│   │   └── test_yaw_plugin.py
│   ├── protocols/
│   │   ├── __init__.py
│   │   ├── base_protocol_adapter.py
│   │   ├── base_video_protocol.py
│   │   ├── cooingdv_jieli_rc_protocol_adapter.py
│   │   ├── cooingdv_jieli_video_protocol.py
│   │   ├── cooingdv_rc_protocol_adapter.py
│   │   ├── cooingdv_video_protocol.py
│   │   ├── debug_rc_protocol_adapter.py
│   │   ├── debug_video_protocol.py
│   │   ├── no_video_protocol.py
│   │   ├── s2x_rc_protocol_adapter.py
│   │   ├── s2x_video_protocol.py
│   │   ├── wifi_cam_rc_protocol_adapter.py
│   │   ├── wifi_cam_video_protocol.py
│   │   ├── wifi_uav_rc_protocol_adapter.py
│   │   └── wifi_uav_video_protocol.py
│   ├── receive_video.py
│   ├── remote_control.py
│   ├── requirements.txt
│   ├── services/
│   │   ├── __init__.py
│   │   ├── flight_controller.py
│   │   └── video_receiver.py
│   ├── tests/
│   │   ├── test_cooingdv_jieli_ctp.py
│   │   ├── test_s2x_protocol.py
│   │   ├── test_s2x_video_protocol.py
│   │   └── test_wifi_cam_protocol.py
│   ├── utils/
│   │   ├── cooingdv_jieli_ctp.py
│   │   ├── dropping_queue.py
│   │   ├── logging_config.py
│   │   ├── wifi_uav_ack_state.py
│   │   ├── wifi_uav_jpeg.py
│   │   ├── wifi_uav_packets.py
│   │   └── wifi_uav_variants.py
│   ├── video_client.py
│   ├── views/
│   │   ├── __init__.py
│   │   ├── base_video_view.py
│   │   ├── cli_rc.py
│   │   └── opencv_video_view.py
│   └── web_server.py
├── docs/
│   └── research/
│       ├── S2x.md
│       ├── cooingdv.md
│       ├── wifi_cam.md
│       └── wifi_uav.md
├── experimental/
│   ├── README.md
│   └── test_e88pro.py
└── frontend/
    ├── .gitignore
    ├── eslint.config.js
    ├── index.html
    ├── package.json
    ├── postcss.config.cjs
    ├── src/
    │   ├── App.css
    │   ├── App.tsx
    │   ├── components/
    │   │   ├── AxisIndicator.tsx
    │   │   ├── CommandButtons.tsx
    │   │   ├── ControlSchemeToggle.tsx
    │   │   ├── ControlsOverlay.tsx
    │   │   ├── DrawingOverlay.tsx
    │   │   ├── PluginControls.tsx
    │   │   ├── SpeedControl.tsx
    │   │   ├── StaticDrawingOverlay.tsx
    │   │   └── VideoFeed.tsx
    │   ├── hooks/
    │   │   ├── useControls.ts
    │   │   ├── useOverlays.ts
    │   │   ├── usePlugins.ts
    │   │   └── useVideoSizing.ts
    │   ├── index.css
    │   ├── main.tsx
    │   ├── pages/
    │   │   └── TestPage.tsx
    │   └── vite-env.d.ts
    ├── tailwind.config.ts
    ├── tsconfig.app.json
    ├── tsconfig.json
    ├── tsconfig.node.json
    └── vite.config.ts

================================================
FILE CONTENTS
================================================

================================================
FILE: .gitignore
================================================
# ------------------------
# Python
# ------------------------

# Byte‐compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# pytorch models
*.pt

# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/

# Translations
*.mo
*.pot

# Django (if ever)
local_settings.py
db.sqlite3

# Flask
instance/
.webassets-cache

# Sphinx docs
docs/_build/

# PyBuilder
target/

# Jupyter
.ipynb_checkpoints

# IPython
profile_default/
ipython_config.py

# pyenv
.python-version

# pipenv / poetry
Pipfile.lock
poetry.lock

# PEP 582
__pypackages__/

# Virtual environments
.env
.venv/
env/
venv/
ENV/
env.bak/
venv.bak/

# Spyder
.spyderproject
.spyproject

# Rope
.ropeproject

# mkdocs
/site

# mypy
.mypy_cache/

# pytest
.pytest_cache/

# IDEs / editors
.vscode/
.idea/
*.iml

# OS files
.DS_Store
Thumbs.db
Desktop.ini
ehthumbs.db

# ------------------------
# Node / Frontend (React/TS/Tailwind)
# ------------------------

# dependencies
node_modules/

# logs
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*

# lockfiles (if you prefer not to commit them)
# Uncomment if you don't commit lockfiles
# package-lock.json
# yarn.lock
# pnpm-lock.yaml

# build outputs
build/
dist/
.next/
out/
public/

# caches
.cache/
.parcel-cache/

# tailwind
.tailwind-cache/

# TypeScript
*.tsbuildinfo

# environment
.env.local
.env.development.local
.env.test.local
.env.production.local

# framework-specific (if you ever add)
.serverless/
.vuepress/dist/

# assets
backend/plugins/follow/assets/*


================================================
FILE: LICENSE
================================================
                                 Apache License
                           Version 2.0, January 2004
                        http://www.apache.org/licenses/

   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION

   1. Definitions.

      "License" shall mean the terms and conditions for use, reproduction,
      and distribution as defined by Sections 1 through 9 of this document.

      "Licensor" shall mean the copyright owner or entity authorized by
      the copyright owner that is granting the License.

      "Legal Entity" shall mean the union of the acting entity and all
      other entities that control, are controlled by, or are under common
      control with that entity. For the purposes of this definition,
      "control" means (i) the power, direct or indirect, to cause the
      direction or management of such entity, whether by contract or
      otherwise, or (ii) ownership of fifty percent (50%) or more of the
      outstanding shares, or (iii) beneficial ownership of such entity.

      "You" (or "Your") shall mean an individual or Legal Entity
      exercising permissions granted by this License.

      "Source" form shall mean the preferred form for making modifications,
      including but not limited to software source code, documentation
      source, and configuration files.

      "Object" form shall mean any form resulting from mechanical
      transformation or translation of a Source form, including but
      not limited to compiled object code, generated documentation,
      and conversions to other media types.

      "Work" shall mean the work of authorship, whether in Source or
      Object form, made available under the License, as indicated by a
      copyright notice that is included in or attached to the work
      (an example is provided in the Appendix below).

      "Derivative Works" shall mean any work, whether in Source or Object
      form, that is based on (or derived from) the Work and for which the
      editorial revisions, annotations, elaborations, or other modifications
      represent, as a whole, an original work of authorship. For the purposes
      of this License, Derivative Works shall not include works that remain
      separable from, or merely link (or bind by name) to the interfaces of,
      the Work and Derivative Works thereof.

      "Contribution" shall mean any work of authorship, including
      the original version of the Work and any modifications or additions
      to that Work or Derivative Works thereof, that is intentionally
      submitted to Licensor for inclusion in the Work by the copyright owner
      or by an individual or Legal Entity authorized to submit on behalf of
      the copyright owner. For the purposes of this definition, "submitted"
      means any form of electronic, verbal, or written communication sent
      to the Licensor or its representatives, including but not limited to
      communication on electronic mailing lists, source code control systems,
      and issue tracking systems that are managed by, or on behalf of, the
      Licensor for the purpose of discussing and improving the Work, but
      excluding communication that is conspicuously marked or otherwise
      designated in writing by the copyright owner as "Not a Contribution."

      "Contributor" shall mean Licensor and any individual or Legal Entity
      on behalf of whom a Contribution has been received by Licensor and
      subsequently incorporated within the Work.

   2. Grant of Copyright License. Subject to the terms and conditions of
      this License, each Contributor hereby grants to You a perpetual,
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      copyright license to reproduce, prepare Derivative Works of,
      publicly display, publicly perform, sublicense, and distribute the
      Work and such Derivative Works in Source or Object form.

   3. Grant of Patent License. Subject to the terms and conditions of
      this License, each Contributor hereby grants to You a perpetual,
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      (except as stated in this section) patent license to make, have made,
      use, offer to sell, sell, import, and otherwise transfer the Work,
      where such license applies only to those patent claims licensable
      by such Contributor that are necessarily infringed by their
      Contribution(s) alone or by combination of their Contribution(s)
      with the Work to which such Contribution(s) was submitted. If You
      institute patent litigation against any entity (including a
      cross-claim or counterclaim in a lawsuit) alleging that the Work
      or a Contribution incorporated within the Work constitutes direct
      or contributory patent infringement, then any patent licenses
      granted to You under this License for that Work shall terminate
      as of the date such litigation is filed.

   4. Redistribution. You may reproduce and distribute copies of the
      Work or Derivative Works thereof in any medium, with or without
      modifications, and in Source or Object form, provided that You
      meet the following conditions:

      (a) You must give any other recipients of the Work or
          Derivative Works a copy of this License; and

      (b) You must cause any modified files to carry prominent notices
          stating that You changed the files; and

      (c) You must retain, in the Source form of any Derivative Works
          that You distribute, all copyright, patent, trademark, and
          attribution notices from the Source form of the Work,
          excluding those notices that do not pertain to any part of
          the Derivative Works; and

      (d) If the Work includes a "NOTICE" text file as part of its
          distribution, then any Derivative Works that You distribute must
          include a readable copy of the attribution notices contained
          within such NOTICE file, excluding those notices that do not
          pertain to any part of the Derivative Works, in at least one
          of the following places: within a NOTICE text file distributed
          as part of the Derivative Works; within the Source form or
          documentation, if provided along with the Derivative Works; or,
          within a display generated by the Derivative Works, if and
          wherever such third-party notices normally appear. The contents
          of the NOTICE file are for informational purposes only and
          do not modify the License. You may add Your own attribution
          notices within Derivative Works that You distribute, alongside
          or as an addendum to the NOTICE text from the Work, provided
          that such additional attribution notices cannot be construed
          as modifying the License.

      You may add Your own copyright statement to Your modifications and
      may provide additional or different license terms and conditions
      for use, reproduction, or distribution of Your modifications, or
      for any such Derivative Works as a whole, provided Your use,
      reproduction, and distribution of the Work otherwise complies with
      the conditions stated in this License.

   5. Submission of Contributions. Unless You explicitly state otherwise,
      any Contribution intentionally submitted for inclusion in the Work
      by You to the Licensor shall be under the terms and conditions of
      this License, without any additional terms or conditions.
      Notwithstanding the above, nothing herein shall supersede or modify
      the terms of any separate license agreement you may have executed
      with Licensor regarding such Contributions.

   6. Trademarks. This License does not grant permission to use the trade
      names, trademarks, service marks, or product names of the Licensor,
      except as required for reasonable and customary use in describing the
      origin of the Work and reproducing the content of the NOTICE file.

   7. Disclaimer of Warranty. Unless required by applicable law or
      agreed to in writing, Licensor provides the Work (and each
      Contributor provides its Contributions) on an "AS IS" BASIS,
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
      implied, including, without limitation, any warranties or conditions
      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
      PARTICULAR PURPOSE. You are solely responsible for determining the
      appropriateness of using or redistributing the Work and assume any
      risks associated with Your exercise of permissions under this License.

   8. Limitation of Liability. In no event and under no legal theory,
      whether in tort (including negligence), contract, or otherwise,
      unless required by applicable law (such as deliberate and grossly
      negligent acts) or agreed to in writing, shall any Contributor be
      liable to You for damages, including any direct, indirect, special,
      incidental, or consequential damages of any character arising as a
      result of this License or out of the use or inability to use the
      Work (including but not limited to damages for loss of goodwill,
      work stoppage, computer failure or malfunction, or any and all
      other commercial damages or losses), even if such Contributor
      has been advised of the possibility of such damages.

   9. Accepting Warranty or Additional Liability. While redistributing
      the Work or Derivative Works thereof, You may choose to offer,
      and charge a fee for, acceptance of support, warranty, indemnity,
      or other liability obligations and/or rights consistent with this
      License. However, in accepting such obligations, You may act only
      on Your own behalf and on Your sole responsibility, not on behalf
      of any other Contributor, and only if You agree to indemnify,
      defend, and hold each Contributor harmless for any liability
      incurred by, or claims asserted against, such Contributor by reason
      of your accepting any such warranty or additional liability.

   END OF TERMS AND CONDITIONS

   APPENDIX: How to apply the Apache License to your work.

      To apply the Apache License to your work, attach the following
      boilerplate notice, with the fields enclosed by brackets "[]"
      replaced with your own identifying information. (Don't include
      the brackets!)  The text should be enclosed in the appropriate
      comment syntax for the file format. We also recommend that a
      file or class name and description of purpose be included on the
      same "printed page" as the copyright notice for easier
      identification within third-party archives.

   Copyright 2025 Marshall Richards

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

       http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.


================================================
FILE: NOTICE
================================================
turbodrone — reverse-engineered control stack for cheap toy drones
Copyright © 2025 Marshall Richards

This product includes software developed by Marshall Richards
(turbodrone, https://github.com/marshallrichards/turbodrone).

Portions of this software are based on, or otherwise incorporate, the turbodrone project.

Licensed under the Apache License, Version 2.0. You may obtain a copy of
the license in the LICENSE file or at <https://www.apache.org/licenses/LICENSE-2.0>.

--------------------------------------------------------------------
You may add your own attribution notices below; this block must remain.
--------------------------------------------------------------------

================================================
FILE: README.md
================================================
# Turbodrone
Reverse-engineered API and client for controlling some of the best-selling ~$50 "toy" drones on Amazon from a computer replacing the closed-source mobile apps they come with.

![S20 Drone Short Clip](docs/images/s20-drone-short-clip-small.gif)

## Introduction
Nowadays, there are incredibly cheap "toy" drones available on Amazon that are basically pared-down clones of the early DJI mavic. Only ~$50 to have a 1080p camera for FPV and recording, tiny downward-facing optical flow sensor for position and altitude hold, and a well-tuned flight profile out-of-the-box. The only problem with drones like this is that they run closed-source firmware and are locked to only being controlled by a custom mobile app. I thought it would be cool to free some of these drones from their "jail" and write an API and client for accessing the video feed and sending control commands down to the drone by reverse-engineering how the mobile apps work. That way you can turn a highly capable $50 "toy" drone into something that can be programmatically controlled and used for all sorts of applications and experiments.

## Hardware
* WiFi Camera Drones (ranked in order of recommendation):

    | Brand      | Model Number(s) | Compatibility | Implementation | Motor      | Purchase Link                                               | Notes |
    |------------|-----------------|---------------|----------------|------------|-------------------------------------------------------------|-------|
    | Generic        | M10           | Tested    | `cooingdv` | Brushless | [Aliexpress](https://www.aliexpress.us/item/3256809708895605.html) | My current favorite. RTSP video stream is very reliable. |
    | Plegble        | PL-515        | Tested | `s2x`      | Brushless | [Amazon](https://www.amazon.com/dp/B0D2B25YQQ) | Same airframe as the S2x models and takes the same batteries, but upgraded to brushless motors. |
    | Karuisrc       | K417          | Tested    | `wifi_uav` | Brushless | [Amazon](https://www.amazon.com/Electric-Adjustable-AIdrones-Quadcopter-Beginners/dp/B0CYPSJ34H/) | First model to be supported with brushless motors. Build quality is great. |
    | Loiley         | S29           | Tested    | `s2x`      | Brushed   | Can't find link anymore | Great build quality, has servo for tilting camera (_not implemented in API yet_) |
    | Hiturbo        | S20           | Tested    | `s2x`      | Brushed   | [Amazon](https://www.amazon.com/dp/B0BBVZ849G), [Alternate Amazon Listing](https://www.amazon.com/Beginners-Foldable-Quadcopter-Gestures-Batteries/dp/B0D8LK1KJ3) | Original test platform, great build quality |
    | FlyVista       | V88           | Tested    | `wifi_uav` | Brushed   | [Amazon](https://www.amazon.com/dp/B0D5CXY6X8) | |
    | ?              | D16/GT3/V66   | Tested    | `wifi_uav` | Brushed   | cheapest on [Aliexpress](https://www.aliexpress.us/item/3256808590663347.html), [Amazon](https://www.amazon.com/AUHIFVAX-Intelligent-Avoidance-Christmas-Thanksgiving/dp/B0FJRVH76T) | 20% smaller DJI Neo clone. Only really good for indoor flight. |
    | Several Brands | E58           | Tested    | Unknown    | Brushed   | [Amazon](https://www.amazon.com/Foldable-Quadcopter-Beginners-Batteries-Waypoints/dp/B09KV8L7WN/) | |
    | Several Brands | E88           | Suspected | `cooingdv` | Brushed   | [Amazon](https://www.amazon.com/Beginners-Foldable-Quadcopter-Real-Time-Rechargable/dp/B0FKNH6Q4T) | |
    | Several Brands | E88 Pro       | Suspected | Unknown    | Brushed   | [Amazon](https://www.amazon.com/Beginners-Foldable-Quadcopter-Real-Time-Rechargable/dp/B0FKNH6Q4T) | |
    | Several Brands | E99           | Suspected | `cooingdv` | Brushed   | [Amazon](https://www.amazon.com/LJN53-Foldable-Drone-Dual-Cameras/dp/B0DRH9C6RF) | |
    | Several Brands | E99 Pro       | Suspected | Unknown    | Brushed   | [Amazon](https://www.amazon.com/LJN53-Foldable-Drone-Dual-Cameras/dp/B0DRH9C6RF) | |
    | Swifsen        | A35           | Suspected | Unknown    | Brushed   | [Amazon](https://a.co/d/bqKvloz) | Very small "toy" drone |
    | Unknown        | LSRC-S1S      | Tested | `wifi_uav` | Brushed   | | mentioned in another reverse-engineering effort for the WiFi UAV app |
    | Velcase        | S101          | TODO      | Unknown      | Brushed   | [Amazon](https://www.amazon.com/Foldable-Beginners-Quadcopter-Carrying-Positioning/dp/B0CH341G5F/) | lower quality build, smaller battery and props than S29 & S20 |
    | Redrie         | X29           | Tested      | `s2x`    | Brushed   | [Amazon](https://www.amazon.com/Adults-1080P-Foldable-Altitude-Auto-Follow-Batteries/dp/B0CZQKNYL5) | Average build quality |

    _**Tested** means the drone has been physically run with turbodrone to ensure its compatibility._

  _**Suspected** means the APK for the drone appears to use the exact same packages and libraries as one of the tested drones._

  _**TODO** means the APK operates with different byte packets and protocols and will have to be added as a new implementation in the API._

* WiFi Dongle ([recommend ALFA Network AWUS036ACM](https://www.amazon.com/Network-AWUS036ACM-Long-Range-Wide-Coverage-High-Sensitivity/dp/B08BJS8FXD) or similar) 
  * Drone broadcasts its own WiFi network so your computer will have to connect to it.
  * Not strictly necessary because you can use your computer's built-in WiFi radio to connect to the drone's network, but nice to have that way you can stay connected to the internet while flying the drone.


## Setup
Move to the `backend` directory
```
cd backend
```

Add venv
```
python -m venv venv
source venv/bin/activate
```

Install the dependencies
```
pip install -r requirements.txt
```

_If_ you are on Windows, you will need to manually install the `curses` library.
```
pip install windows-curses
```

Open a new terminal window and install the dependencies for the frontend.
_Make sure you have Node.js 20+ installed._
```
cd frontend
npm install
```

Make sure WiFi Dongle is plugged in, drone is turned on, connect to the "BRAND-MODEL-XXXXXX" network before proceeding.

Create a `.env` file in the `backend` directory. Add a DRONE_TYPE based on which drone you have:
```
# For "com.vison.macrochip" (s2x) based drones like S20 and S29:
DRONE_TYPE=s2x
# For WiFi UAV-based drones like V88 and D16:
# DRONE_TYPE=wifi_uav
# For cooingdv publisher drone apps like RC UFO and KY UFO:
# DRONE_TYPE=cooingdv
# For KY FPV / Jieli CTP control with first-pass RTP/JPEG video:
# DRONE_TYPE=cooingdv_jieli
# For WiFi_CAM native UDP apps:
# DRONE_TYPE=wifi_cam
```

Launch the backend: 
```
uvicorn web_server:app
```

In a separate terminal, launch the frontend web client:
```
npm run dev
```

Open the web client which will be at `http://localhost:5173` and you should see the drone video feed and be able to control it.

To control via a gaming controller, plug it in and move the sticks around for it to be detected and then push the toggle button to switch between keyboard and controller control.

Make sure to fly in a safe area, preferably outdoors with little wind. And note that the "Land" button _currently_ is more of an E-stop button that will stop the drone motors immediately.


## Status
Reconnection logic was solved recently.

Video feed: solid.

Controls: improved greatly via the web client. The implementation for WiFi UAV-based drones could use some fine-tuning.

Web Client: support for various inputs like keyboard, gamepad controllers, and ThinkPad TrackPoint mouse(lol).

Working on adding support for more drones from [Amazon's best-selling drone list](https://www.amazon.com/best-selling-drones/s?k=best+selling+drones).


## Contribute
To contribute support for a new "toy" drone, download the APK the drone uses on a mirror site and start reverse engineering it by decompiling to Java files with [jadx](https://github.com/skylot/jadx).
From there, look at the `AndroidManifest.xml` and see if you can find the classes that are entry points for the app. Look for port usage or protocol usage explicitly mentioned like TCP or UDP. Most of these apps will do the actual communication and video feed processing in native C++ libraries that will be embedded inside the APK. You can use a tool like Ghidra to decompile the native libraries and see if you can discover anything useful. For video feed processing you want to figure out what format it uses e.g. JPEG, YUV, etc. and also if it uses compression and what the byte structure looks like when its reforming an image frame from packets.
Additionally, Wireshark is your friend for understanding the raw data packets being sent and received by the app. Watch this [video](https://x.com/marshallrichrds/status/1923165437698670818) for an overview into the reverse engineering process used for adding support for the Hiturbo S20 drone.

Once you have the protocols and processing for RC and video figured out, make a small test program and add it to the `experimental` directory at that point if you'd like others to be able to try it out.
After that, you can work on an implementation that is compatible with the existing back-end architecture; examples of this are the `s2x` and `wifi_uav` reverse-engineered implementations.


## Experimental Support
For drones and apps with limited support which are not yet fully integrated into Turbodrone, see the `experimental` directory.


================================================
FILE: backend/control/strategies.py
================================================
from abc import ABC, abstractmethod
from typing import Dict

class ControlStrategy(ABC):
    """Maps user intent → raw stick values inside the RC model."""

    @abstractmethod
    def update(self, model, dt: float, axes: Dict[str, float]) -> None:
        """`axes` always ranges  -1 … +1  for throttle, yaw, pitch, roll"""
        ...

# 1. Same behaviour you already have for the CLI keyboard
class IncrementalStrategy(ControlStrategy):
    def update(self, model, dt, axes):
        # axes entries are  -1, 0, +1  (discrete keys)
        model._update_axes_incremental(dt, axes)

# 2. Direct mapping for joysticks / Gamepad API
class DirectStrategy(ControlStrategy):
    """
    Absolute mode: normalised stick position is mapped directly
    to the drone's raw range (optionally with expo curve).
    """
    def update(self, model, dt, axes):
        expo = getattr(model, "expo_factor", 0.0)
        for axis, v in axes.items():
            # optional expo curve
            if expo:
                sign = 1 if v >= 0 else -1
                v = sign * (abs(v) ** (1 + expo))
            setattr(model, axis, model._scale_normalised(v)) 

================================================
FILE: backend/main.py
================================================
#!/usr/bin/env python3
import logging
import os

from utils.logging_config import bootstrap_runtime, configure_logging

bootstrap_runtime()

import argparse
import threading
import queue
import signal
import sys

from models.s2x_rc import S2xDroneModel
from protocols.s2x_rc_protocol_adapter import S2xRCProtocolAdapter
from protocols.s2x_video_protocol import S2xVideoProtocolAdapter

from models.wifi_uav_rc import WifiUavRcModel
from protocols.wifi_uav_rc_protocol_adapter import WifiUavRcProtocolAdapter
from protocols.wifi_uav_video_protocol import WifiUavVideoProtocolAdapter

from models.cooingdv_rc import CooingdvRcModel
from protocols.cooingdv_rc_protocol_adapter import CooingdvRcProtocolAdapter
from protocols.cooingdv_jieli_rc_protocol_adapter import CooingdvJieliRcProtocolAdapter
from protocols.cooingdv_jieli_video_protocol import CooingdvJieliVideoProtocolAdapter
from protocols.cooingdv_video_protocol import CooingdvVideoProtocolAdapter
from models.wifi_cam_rc import WifiCamRcModel
from protocols.wifi_cam_rc_protocol_adapter import WifiCamRcProtocolAdapter
from protocols.wifi_cam_video_protocol import WifiCamVideoProtocolAdapter

from services.flight_controller import FlightController
from services.video_receiver import VideoReceiverService
from utils.wifi_uav_variants import WIFI_UAV_DRONE_TYPES, resolve_wifi_uav_variant
from views.cli_rc import CLIView
from views.opencv_video_view import OpenCVVideoView

COOINGDV_DRONE_TYPES = {"cooingdv", "cooingdv_jieli"}


configure_logging()
logger = logging.getLogger(__name__)

def main():
    parser = argparse.ArgumentParser(description="Drone teleoperation interface")
    parser.add_argument("--drone-type", type=str, default="s2x",
                        choices=["s2x", "wifi_uav", "wifi_uav_fld", "wifi_uav_uav", "cooingdv", "cooingdv_jieli", "wifi_cam"],
                        help="Type of drone to control (s2x, wifi_uav, wifi_uav_fld, wifi_uav_uav, cooingdv, cooingdv_jieli, or wifi_cam, default: s2x)")
    parser.add_argument("--drone-ip", type=str,
                        help="Drone UDP IP address (default: s2x=172.16.10.1, wifi_uav=192.168.169.1, cooingdv=192.168.1.1, wifi_cam=192.168.4.153)")
    parser.add_argument("--control-port", type=int,
                        help="Drone control port (default: s2x=8080, wifi_uav=8800)")
    parser.add_argument("--video-port", type=int,
                        help="Drone video port (default: s2x=8888, wifi_uav=8800)")
    parser.add_argument("--rate", type=float, default=None,
                        help="Control packets per second (default depends on drone type)")
    parser.add_argument("--with-video", action="store_true",
                        help="Enable video feed")
    parser.add_argument("--dump-frames", action="store_true",
                        help="Dump video frames to files")
    parser.add_argument("--dump-packets", action="store_true",
                        help="Dump raw video packets to files")
    args = parser.parse_args()

    # Create model, protocol adapter, and controller
    if args.drone_type == "s2x":
        logger.info("[main] Using S2X drone implementation.")
        default_ip = "172.16.10.1"
        default_control_port = 8080
        default_video_port = 8888
        default_rate = 80.0
        
        drone_ip = args.drone_ip if args.drone_ip else default_ip
        control_port = args.control_port if args.control_port else default_control_port
        video_port = args.video_port if args.video_port else default_video_port

        drone_model = S2xDroneModel()
        protocol_adapter = S2xRCProtocolAdapter(drone_ip, control_port)
        video_protocol_adapter_class = S2xVideoProtocolAdapter
    elif args.drone_type in WIFI_UAV_DRONE_TYPES:
        wifi_uav_variant = resolve_wifi_uav_variant(args.drone_type)
        logger.info("[main] Using WiFi UAV drone implementation (variant=%s).", wifi_uav_variant)
        default_ip = "192.168.169.1"
        default_control_port = 8800
        default_video_port = 8800 # For WifiUAV, control and video often use the same port
        default_rate = 80.0

        drone_ip = args.drone_ip if args.drone_ip else default_ip
        control_port = args.control_port if args.control_port else default_control_port
        video_port = args.video_port if args.video_port else default_video_port

        drone_model = WifiUavRcModel()
        protocol_adapter = WifiUavRcProtocolAdapter(drone_ip, control_port, variant=wifi_uav_variant)
        video_protocol_adapter_class = WifiUavVideoProtocolAdapter
    elif args.drone_type in COOINGDV_DRONE_TYPES:
        logger.info("[main] Using Cooingdv drone implementation (%s).", args.drone_type)
        if args.drone_type == "cooingdv_jieli":
            default_ip = "192.168.8.15"
            default_control_port = 2228
            default_video_port = 0
        else:
            default_ip = "192.168.1.1"
            default_control_port = 7099
            default_video_port = 7070  # RTSP port for video streaming
        default_rate = 20.0

        drone_ip = args.drone_ip if args.drone_ip else default_ip
        control_port = args.control_port if args.control_port else default_control_port
        video_port = args.video_port if args.video_port else default_video_port

        drone_model = CooingdvRcModel()
        if args.drone_type == "cooingdv_jieli":
            protocol_adapter = CooingdvJieliRcProtocolAdapter(drone_ip, control_port)
            video_protocol_adapter_class = CooingdvJieliVideoProtocolAdapter
        else:
            protocol_adapter = CooingdvRcProtocolAdapter(drone_ip, control_port)
            video_protocol_adapter_class = CooingdvVideoProtocolAdapter
    elif args.drone_type == "wifi_cam":
        logger.info("[main] Using WiFi_CAM native UDP implementation.")
        default_ip = "192.168.4.153"
        default_control_port = 8090
        default_video_port = 8080
        default_rate = 25.0

        drone_ip = args.drone_ip if args.drone_ip else default_ip
        control_port = args.control_port if args.control_port else default_control_port
        video_port = args.video_port if args.video_port else default_video_port

        drone_model = WifiCamRcModel()
        protocol_adapter = WifiCamRcProtocolAdapter(
            drone_ip,
            control_port,
            command_mode=os.getenv("WIFI_CAM_COMMAND_MODE", "auto"),
        )
        video_protocol_adapter_class = WifiCamVideoProtocolAdapter
    else:
        # Should not happen due to choices in argparse
        logger.error("[main] Unknown drone type: %s", args.drone_type)
        sys.exit(1)

    control_rate = args.rate if args.rate is not None else default_rate
    controller = FlightController(drone_model, protocol_adapter, control_rate)
    

    
    # Start video if requested
    video_view = None
    video_receiver = None
    video_thread = None
    
    if args.with_video:
        # Define the blueprint for the video protocol adapter.
        # The VideoReceiverService will create and manage the instance.
        if args.drone_type == "s2x":
            video_protocol_args = {
                "drone_ip": drone_ip,
                "control_port": control_port,
                "video_port": video_port
            }
        elif args.drone_type in WIFI_UAV_DRONE_TYPES:
            video_protocol_args = {
                "drone_ip": drone_ip,
                "control_port": control_port,
                "video_port": video_port,
                "variant": wifi_uav_variant,
            }
        elif args.drone_type in COOINGDV_DRONE_TYPES:
            video_protocol_args = {
                "drone_ip": drone_ip,
                "control_port": control_port,
                "video_port": video_port
            }
            if args.drone_type == "cooingdv_jieli":
                video_protocol_args["video_port"] = video_port or 6666
        elif args.drone_type == "wifi_cam":
            video_protocol_args = {
                "drone_ip": drone_ip,
                "control_port": control_port,
                "video_port": video_port,
            }
        
        frame_queue = queue.Queue(maxsize=100)
        video_receiver = VideoReceiverService(
            video_protocol_adapter_class, # The class to instantiate
            video_protocol_args,          # The arguments for it
            frame_queue,
            dump_frames=args.dump_frames,
            dump_packets=args.dump_packets,
            rc_adapter=protocol_adapter if args.drone_type in WIFI_UAV_DRONE_TYPES or args.drone_type == "wifi_cam" else None,
        )
        video_view = OpenCVVideoView(frame_queue)
        
        # Start video receiver service. It now handles the protocol's lifecycle.
        video_receiver.start()
        
        # Run HighGUI in its own, non-daemon thread
        video_thread = threading.Thread(
            target=video_view.run,
            name="OpenCVVideoThread"
        )
        video_thread.start()

    # Start controller
    controller.start()
    
    # Set up signal handler for clean shutdown
    def signal_handler(sig, frame):
        logger.info("[main] Caught signal, shutting down...")
        
        # First stop video components
        if video_receiver:
            video_receiver.stop()
        if video_view:
            video_view.stop()
        if video_thread:
            video_thread.join(timeout=1.0)
        
        # Then stop controller
        controller.stop()
        
        # Exit more forcefully, but only if threads haven't cleaned up
        if video_thread and video_thread.is_alive():
            logger.warning("[main] Forcing exit due to lingering threads")
            os._exit(0)
        else:
            # Normal exit
            sys.exit(0)
    
    signal.signal(signal.SIGINT, signal_handler)
    signal.signal(signal.SIGTERM, signal_handler)
    
    # Start CLI view
    try:
        view = CLIView(controller)
        view.run()
    except KeyboardInterrupt:
        pass
    finally:
        # Clean up in reverse order of creation
        controller.stop()
        
        # Clean up video components
        if video_view:
            video_view.stop()
        if video_receiver:
            video_receiver.stop()
        if video_thread:
            video_thread.join()          # wait until the window thread exits

if __name__ == "__main__":
    main()

================================================
FILE: backend/models/__init__.py
================================================
from .s2x_rc import S2xDroneModel

__all__ = ['S2xDroneModel']


================================================
FILE: backend/models/base_rc.py
================================================
from __future__ import annotations

from abc import ABC, abstractmethod
from typing import ClassVar, Dict, List, Union, Optional

from models.stick_range import StickRange
from models.control_profile import ControlProfile


class BaseRCModel(ABC):
    """Common logic for every RC model / protocol implementation."""

    # Sub-classes **must** override or pass a StickRange explicitly.
    STICK_RANGE: ClassVar[Optional[StickRange]] = None

    # Generic fall-back presets – drones override with their own set
    PRESETS: ClassVar[Dict[str, ControlProfile]] = {
        "normal":     ControlProfile("normal",     1.5,  2.5, 0.5, 0.02),
        "precise":    ControlProfile("precise",    1.0,  3.0, 0.3, 0.01),
        "aggressive": ControlProfile("aggressive", 3.0,  2.0, 1.5, 0.10),
    }
    SENSITIVITY_SEQ: ClassVar[List[str]] = ["normal", "precise", "aggressive"]

    # -----------------------------------------------------------------
    def __init__(
        self,
        stick_range: Optional[StickRange] = None,
        profile: Union[str, ControlProfile] = "normal",
    ) -> None:
        # ----- enforce STICK_RANGE -----------------------------------
        if stick_range is None:
            stick_range = self.__class__.STICK_RANGE
        if stick_range is None:
            raise TypeError(
                f"{self.__class__.__name__} must define STICK_RANGE "
                "or pass stick_range to BaseRCModel.__init__()"
            )
        # -------------------------------------------------------------

        if isinstance(profile, str):
            if profile not in self.PRESETS:
                raise ValueError(f"Unknown profile '{profile}'")
            profile = self.PRESETS[profile]

        self.range = stick_range
        self.min_control_value = float(stick_range.min_val)
        self.center_value      = float(stick_range.mid_val)
        self.max_control_value = float(stick_range.max_val)

        self._apply_profile(profile)

        # axes start centred
        self.throttle = self.yaw = self.pitch = self.roll = self.center_value

    # ----- API that concrete models MUST still implement --------------
    @abstractmethod
    def update(self, dt, axes): ...
    @abstractmethod
    def takeoff(self): ...
    @abstractmethod
    def land(self): ...
    @abstractmethod
    def get_control_state(self): ...

    # ----- shared helpers ---------------------------------------------
    def set_profile(self, name: str) -> None:
        if name not in self.PRESETS:
            raise ValueError(f"Unknown profile '{name}'")
        self._apply_profile(self.PRESETS[name])

    def set_sensitivity(self, preset: int) -> None:
        idx = preset % len(self.SENSITIVITY_SEQ)
        self.set_profile(self.SENSITIVITY_SEQ[idx])

    def set_strategy(self, strategy) -> None:
        self.strategy = strategy

    # -----------------------------------------------------------------
    def _apply_profile(self, profile: ControlProfile) -> None:
        half_range = self.max_control_value - self.center_value
        full_range = self.max_control_value - self.min_control_value

        self.profile            = profile
        self.accel_rate         = profile.accel_ratio     * half_range
        self.decel_rate         = profile.decel_ratio     * half_range
        self.expo_factor        = profile.expo_factor
        self.immediate_response = profile.immediate_ratio * full_range

    # existing helper (unchanged)
    def _scale_normalised(self, value: float) -> float:
        """
        Map a normalised [-1 … +1] input to raw protocol units using
        the model's StickRange.
        """
        if value >= 0:
            return self.center_value + value * (self.max_control_value - self.center_value)
        return self.center_value + value * (self.center_value - self.min_control_value)

    def _update_axes_incremental(self, dt, axes):
        self.update_axes(
            dt,
            axes.get("throttle", 0),
            axes.get("yaw", 0),
            axes.get("pitch", 0),
            axes.get("roll", 0),
        )

    def update_axes(self, dt, throttle_dir, yaw_dir, pitch_dir, roll_dir):
        """
        Apply the shared incremental stick logic used by keyboard-style controls.

        Pitch and roll get a small immediate boost when the pilot reverses
        direction so the craft feels less sluggish during lateral movement.
        """
        for attr, direction, boost_enabled in (
            ("throttle", throttle_dir, False),
            ("yaw", yaw_dir, False),
            ("pitch", pitch_dir, True),
            ("roll", roll_dir, True),
        ):
            cur = getattr(self, attr)
            last_dir_attr = f"last_{attr}_dir"
            last_dir = getattr(self, last_dir_attr, 0)

            if direction > 0:
                if boost_enabled and last_dir <= 0:
                    cur += min(
                        self.max_control_value - cur,
                        self.immediate_response,
                    )
                dist = self.max_control_value - cur
                accel = self.accel_rate * dt * (
                    1 + self.expo_factor * dist
                    / (self.max_control_value - self.center_value)
                )
                new = min(self.max_control_value, cur + accel)

            elif direction < 0:
                if boost_enabled and last_dir >= 0:
                    cur -= min(
                        cur - self.min_control_value,
                        self.immediate_response,
                    )
                dist = cur - self.min_control_value
                accel = self.accel_rate * dt * (
                    1 + self.expo_factor * dist
                    / (self.center_value - self.min_control_value)
                )
                new = max(self.min_control_value, cur - accel)

            else:
                if cur > self.center_value:
                    dist = cur - self.center_value
                    decel = self.decel_rate * dt * (
                        1 + 0.5 * dist
                        / (self.max_control_value - self.center_value)
                    )
                    new = max(self.center_value, cur - decel)
                elif cur < self.center_value:
                    dist = self.center_value - cur
                    decel = self.decel_rate * dt * (
                        1 + 0.5 * dist
                        / (self.center_value - self.min_control_value)
                    )
                    new = min(self.center_value, cur + decel)
                else:
                    new = cur

            setattr(self, attr, new)
            setattr(self, last_dir_attr, direction)

    def _update_axes_direct(self, axes):
        expo = getattr(self, "expo_factor", 0.0)
        for attr, value in axes.items():
            if expo:                              # optional expo curve
                sign  = 1 if value >= 0 else -1
                value = sign * (abs(value) ** (1 + expo))
            setattr(self, attr, self._scale_normalised(value))


================================================
FILE: backend/models/base_video_model.py
================================================
from abc import ABC, abstractmethod
from typing import Optional
from models.video_frame import VideoFrame

class BaseVideoModel(ABC):
    """
    Stateless interface that turns *chunks* (whatever the protocol
    thinks a chunk is: a JPEG slice, a whole JPEG, a H.264 NALU …)
    into complete VideoFrame objects.
    """

    @abstractmethod
    def ingest_chunk(
        self,
        *,
        stream_id: int | None = None,
        chunk_id: int | None = None,
        payload: bytes,
    ) -> Optional[VideoFrame]:
        """
        Feed one chunk into the model.

        Parameters
        ----------
        stream_id : int | None
            Identifier of the logical stream / frame (e.g. frame number).
        chunk_id  : int | None
            Sequential id of this chunk inside the stream (e.g. slice index).
        payload   : bytes
            Raw codec payload (JPEG slice, NALU, etc.).

        Returns
        -------
        Optional[VideoFrame]
            • VideoFrame when a frame is complete
            • None if more data is required
        """
        raise NotImplementedError

================================================
FILE: backend/models/control_profile.py
================================================
from dataclasses import dataclass

@dataclass(frozen=True)
class ControlProfile:
    """
    Profile parameters expressed as *ratios* of the stick range.
      accel_ratio       – fraction of half-range per second
      decel_ratio       – fraction of half-range per second
      expo_factor       – dimension-less (unchanged)
      immediate_ratio   – fraction of full range for one-shot boost
    """
    name: str
    accel_ratio: float
    decel_ratio: float
    expo_factor: float = 0.0
    immediate_ratio: float = 0.0


================================================
FILE: backend/models/cooingdv_rc.py
================================================
"""
RC Model for Cooingdv drones (RC UFO, KY UFO, E88 Pro, etc.)

These drones use the cooingdv publisher's mobile apps and communicate
via UDP on port 7099 with RTSP video on port 7070.

Key features:
- Soft landing (distinct from emergency stop)
- Headless mode
- Flip/somersault capability
- Gyro calibration
"""

from __future__ import annotations

from models.base_rc import BaseRCModel
from models.control_profile import ControlProfile
from models.stick_range import StickRange
from control.strategies import IncrementalStrategy


class CooingdvRcModel(BaseRCModel):
    """
    RC model for drones using cooingdv publisher apps (RC UFO, KY UFO, E88 Pro).

    Protocol details from decompiled apps:
    - Stick center: 128 (0x80)
    - Safe operating range: 50-200 (apps use these bounds)
    - The stock apps send control updates every 50 ms (about 20 Hz)
    - The CooingDV family has at least two packet variants: short "TC" and
      extended "GL". This model exposes the shared high-level commands while
      the protocol adapter maps them onto the correct flag layout.
    """

    # Stick range from decompiled FlyController.java
    # Center at 128, safe bounds 50-200 (apps clamp to these)
    STICK_RANGE = StickRange(50, 128, 200)

    PRESETS = {
        # name         accel   decel  expo  immediate-boost
        "normal":     ControlProfile("normal",     2.0, 4.0, 0.5, 0.02),
        "precise":    ControlProfile("precise",    1.2, 5.0, 0.3, 0.01),
        "aggressive": ControlProfile("aggressive", 4.0, 3.0, 1.2, 0.10),
    }

    def __init__(self, profile: str | ControlProfile = "normal") -> None:
        super().__init__(stick_range=self.STICK_RANGE, profile=profile)

        self.strategy = IncrementalStrategy()

        # One-shot command flags
        self.takeoff_flag = False
        self.land_flag = False          # Soft landing (0x02)
        self.stop_flag = False          # Emergency stop (0x04)
        self.flip_flag = False          # Flip/somersault (0x08)
        self.headless_flag = False      # Headless mode (0x10) - toggle state
        self.calibration_flag = False   # Gyro calibration (0x80)

        # Track last motion direction for each axis
        self.last_throttle_dir = 0
        self.last_yaw_dir = 0
        self.last_pitch_dir = 0
        self.last_roll_dir = 0

    # ------------------------------------------------------------------ #
    # BaseRCModel API
    # ------------------------------------------------------------------ #
    def update(self, dt, axes):
        self.strategy.update(self, dt, axes)

    def takeoff(self):
        """Initiate takeoff sequence."""
        self.takeoff_flag = True

    def land(self):
        """
        Initiate soft landing - gradual descent.
        
        This is distinct from emergency_stop() which cuts motors immediately.
        The drone will descend gracefully and land.
        """
        self.land_flag = True

    def emergency_stop(self):
        """
        Emergency motor cutoff - immediate stop.
        
        WARNING: This will cause the drone to fall from the sky!
        Use land() for normal landing operations.
        """
        self.stop_flag = True

    def flip(self):
        """Execute a 360-degree flip/somersault."""
        self.flip_flag = True

    def toggle_headless(self):
        """Toggle headless mode on/off."""
        self.headless_flag = not self.headless_flag

    def calibrate_gyro(self):
        """Initiate gyroscope calibration. Drone should be on flat surface."""
        self.calibration_flag = True

    def get_control_state(self):
        return {
            "throttle": self.throttle,
            "yaw": self.yaw,
            "pitch": self.pitch,
            "roll": self.roll,
            "headless": self.headless_flag,
        }

    def set_strategy(self, strategy) -> None:
        self.strategy = strategy



================================================
FILE: backend/models/cooingdv_video_model.py
================================================
"""
Video Model for Cooingdv drones.

This model is intentionally simple because cooingdv drones use RTSP streaming,
which delivers complete frames directly. Unlike the S2x and WiFi UAV protocols
that require assembling frames from UDP chunks, RTSP gives us ready-to-use
frames via OpenCV.

The model exists primarily for API consistency with other implementations.
Most of the heavy lifting is done in the video protocol adapter.
"""

from typing import Optional
from models.base_video_model import BaseVideoModel
from models.video_frame import VideoFrame


class CooingdvVideoModel(BaseVideoModel):
    """
    Video model for cooingdv drones using RTSP streaming.
    
    Since RTSP provides complete frames, this model simply wraps the
    incoming data into VideoFrame objects. No assembly or decoding
    is required - OpenCV handles all of that.
    """

    def __init__(self):
        self._frame_counter = 0

    def ingest_chunk(
        self,
        *,
        stream_id: int | None = None,
        chunk_id: int | None = None,
        payload: bytes,
    ) -> Optional[VideoFrame]:
        """
        Process incoming video data.
        
        For RTSP streams, each 'chunk' is actually a complete frame,
        so we simply wrap it in a VideoFrame and return it immediately.
        
        Parameters
        ----------
        stream_id : int | None
            Frame identifier (from RTSP stream)
        chunk_id : int | None
            Not used for RTSP (frames are not chunked)
        payload : bytes
            Complete JPEG frame data
            
        Returns
        -------
        VideoFrame
            Always returns a VideoFrame since RTSP delivers complete frames
        """
        self._frame_counter += 1
        
        # Use provided stream_id or auto-increment
        frame_id = stream_id if stream_id is not None else self._frame_counter
        
        return VideoFrame(
            frame_id=frame_id,
            data=payload,
        )

    def reset(self) -> None:
        """Reset the model state."""
        self._frame_counter = 0



================================================
FILE: backend/models/debug_rc.py
================================================
from __future__ import annotations
import logging
from models.base_rc import BaseRCModel
from models.stick_range import StickRange

log = logging.getLogger(__name__)

class DebugRcModel(BaseRCModel):
    """Dummy RC model for debugging purposes."""

    def __init__(self):
        # Using a default stick range, can be customized if needed
        super().__init__(stick_range=StickRange(min_val=0, mid_val=128, max_val=255))
        log.info("Debug RC model initialized.")
        self.throttle = self.yaw = self.pitch = self.roll = 128

    def update(self, dt, axes):
        # This model doesn't need to update state over time, but it must
        # be implemented to satisfy the abstract base class.
        pass

    def get_control_state(self):
        # Return a dictionary of the current control values.
        # This is what gets sent to the protocol adapter.
        return {
            "throttle": self.throttle,
            "yaw": self.yaw,
            "pitch": self.pitch,
            "roll": self.roll,
        }

    def set_throttle(self, value: int):
        log.debug(f"Debug: set_throttle({value})")
        self.throttle = value

    def set_yaw(self, value: int):
        log.debug(f"Debug: set_yaw({value})")
        self.yaw = value

    def set_pitch(self, value: int):
        log.debug(f"Debug: set_pitch({value})")
        self.pitch = value

    def set_roll(self, value: int):
        log.debug(f"Debug: set_roll({value})")
        self.roll = value

    def takeoff(self):
        log.info("Debug: takeoff()")

    def land(self):
        log.info("Debug: land()") 

================================================
FILE: backend/models/s2x_rc.py
================================================
# S20 & S29 Controller model

from models.base_rc import BaseRCModel
from models.control_profile import ControlProfile
from control.strategies import IncrementalStrategy
from models.stick_range import StickRange

class S2xDroneModel(BaseRCModel):
    """Model for S2x protocol drones (S20, S29)"""
    
    STICK_RANGE = StickRange(60, 128, 200)   # ← tailorable per drone

    PRESETS = {
        "normal":     ControlProfile("normal",     2.08, 4.86, 0.5, 0.02),
        "precise":    ControlProfile("precise",    1.39, 5.56, 0.3, 0.01),
        "aggressive": ControlProfile("aggressive", 4.17, 3.89, 1.5, 0.11),
    }

    def __init__(self, profile: str | ControlProfile = "normal"):
        # BaseRCModel handles STICK_RANGE, presets and profile application
        super().__init__(stick_range=self.STICK_RANGE, profile=profile)

        self.strategy = IncrementalStrategy()   # default

        # one-shot flags
        self.takeoff_flag = False
        self.land_flag = False
        self.stop_flag = False
        self.headless_flag = False
        self.calibration_flag = False

        # misc
        self.speed = 20    # matches 0x14 from dumps
        self.speed_index = 2

        # Track last direction for each axis
        self.last_throttle_dir = 0
        self.last_yaw_dir = 0
        self.last_pitch_dir = 0
        self.last_roll_dir = 0
    
    def update(self, dt, axes):
        self.strategy.update(self, dt, axes)
    
    def takeoff(self):
        """Set takeoff flag"""
        self.takeoff_flag = True
    
    def land(self):
        """
        Request the stock app's land action.

        In the HiTurbo app this shares the same one-shot flight bit as takeoff,
        so the protocol adapter maps the semantic intent rather than a distinct
        dedicated land bit.
        """
        self.land_flag = True

    def emergency_stop(self):
        """Set the emergency stop flag."""
        self.stop_flag = True

    def set_speed_index(self, speed_index: int) -> None:
        """Set the Macrochip app speed tier: 0=low, 1=medium, 2=full."""
        self.speed_index = max(0, min(2, int(speed_index)))
    
    def get_control_state(self):
        """Get current control state as a dict"""
        return {
            "throttle":  self.throttle,
            "yaw":       self.yaw,
            "pitch":     self.pitch,
            "roll":      self.roll,
            "speed_index": self.speed_index,
        }

    def set_strategy(self, strategy) -> None:
        self.strategy = strategy


================================================
FILE: backend/models/s2x_video_model.py
================================================
from typing import Dict, Optional

from models.video_frame import VideoFrame
from models.base_video_model import BaseVideoModel


class S2xVideoModel(BaseVideoModel):
    """
    Reassembles sliced JPEG frames used by S2x drones.

    • Ignores the unreliable "is-last-slice" flag.
    • Finishes a frame when the frame-id rolls over.
    """

    SOI_MARKER = b"\xFF\xD8"
    EOI_MARKER = b"\xFF\xD9"

    def __init__(self) -> None:
        self._cur_fid: Optional[int] = None
        self._frags: Dict[int, bytes] = {}

    # ──────────────────────────────────────────────────────────
    # BaseVideoModel interface
    # ──────────────────────────────────────────────────────────
    def ingest_chunk(
        self,
        *,
        stream_id: int | None = None,
        chunk_id: int | None = None,
        total_chunks: int | None = None,
        payload: bytes,
    ) -> Optional[VideoFrame]:

        if stream_id is None or chunk_id is None:
            return None  # S2x packets always carry both ids

        # frame-id changed? -> finish previous frame. Native VNDK emits as soon
        # as all declared chunks arrive, but this fallback preserves older
        # rollover-based behavior for captures without a usable chunk count.
        completed: Optional[VideoFrame] = None
        if self._cur_fid is None:
            self._cur_fid = stream_id
        elif stream_id != self._cur_fid:
            completed = self._assemble_current()      # may be None
            self._reset(stream_id)

        # stash slice (ignore duplicates)
        self._frags.setdefault(chunk_id, payload)

        if completed is not None:
            return completed

        if total_chunks is not None and 0 < total_chunks <= 100:
            expected = set(range(total_chunks))
            if expected.issubset(self._frags):
                return self._assemble_current(expected)

        return completed

    # ──────────────────────────────────────────────────────────
    # helpers
    # ──────────────────────────────────────────────────────────
    def _reset(self, new_fid: Optional[int]) -> None:
        self._cur_fid = new_fid
        self._frags.clear()

    def _assemble_current(self, keys_to_use: set[int] | None = None) -> Optional[VideoFrame]:
        if not self._frags:
            return None

        if keys_to_use is None:
            keys = sorted(self._frags)
            complete = len(keys) == keys[-1] - keys[0] + 1
            if not complete:
                missing = (keys[-1] - keys[0] + 1) - len(keys)
               # print(f"[s2x-model] Dropping frame {self._cur_fid}: {missing} slices missing")
                return None
        else:
            keys = sorted(keys_to_use)

        data = b"".join(self._frags[k] for k in keys)

        start = data.find(self.SOI_MARKER)
        end   = data.rfind(self.EOI_MARKER)
        if start < 0 or end < 0 or end <= start:
            #print(f"[s2x-model] JPEG markers not found on frame {self._cur_fid}")
            return None

        jpeg = data[start : end + len(self.EOI_MARKER)]
        #print(f"[s2x-model] Frame {self._cur_fid} OK "
        #      f"({len(jpeg)} bytes, {len(keys)} slices)")
        frame = VideoFrame(self._cur_fid, jpeg, "jpeg")

        self._reset(None)          # prepare for next frame
        return frame

================================================
FILE: backend/models/stick_range.py
================================================
from dataclasses import dataclass

@dataclass(frozen=True)
class StickRange:
    """Drone-specific raw protocol limits."""
    min_val: float   # e.g. 60
    mid_val: float   # e.g. 128
    max_val: float   # e.g. 200

================================================
FILE: backend/models/video_frame.py
================================================
import time

class VideoFrame:
    """Model representing a video frame from the drone"""
    
    def __init__(self, frame_id, data, format_type="jpeg", timestamp=None):
        self.frame_id = frame_id
        self.data = data
        self.format = format_type  # jpeg, h264, h265, yuv, etc.
        self.timestamp = timestamp or time.time()
        self.width = None
        self.height = None
        self.size = len(data) if data else 0
        
    def __repr__(self):
        return f"VideoFrame(id={self.frame_id}, format={self.format}, size={self.size})" 

================================================
FILE: backend/models/wifi_cam_rc.py
================================================
"""
RC model for WiFi_CAM native UDP drones.

The stock app uses the same centered 0x80 stick semantics as the CooingDV
family, but sends raw command bytes to a separate UDP command port.
"""

from __future__ import annotations

from control.strategies import IncrementalStrategy
from models.base_rc import BaseRCModel
from models.control_profile import ControlProfile
from models.stick_range import StickRange


class WifiCamRcModel(BaseRCModel):
    """High-level control state for WiFi_CAM drones."""

    STICK_RANGE = StickRange(50, 128, 200)

    PRESETS = {
        "normal": ControlProfile("normal", 2.0, 4.0, 0.5, 0.02),
        "precise": ControlProfile("precise", 1.2, 5.0, 0.3, 0.01),
        "aggressive": ControlProfile("aggressive", 4.0, 3.0, 1.2, 0.10),
    }

    def __init__(self, profile: str | ControlProfile = "normal") -> None:
        super().__init__(stick_range=self.STICK_RANGE, profile=profile)
        self.strategy = IncrementalStrategy()

        self.takeoff_flag = False
        self.land_flag = False
        self.stop_flag = False
        self.flip_flag = False
        self.headless_flag = False
        self.altitude_hold_flag = False
        self.calibration_flag = False

        self.last_throttle_dir = 0
        self.last_yaw_dir = 0
        self.last_pitch_dir = 0
        self.last_roll_dir = 0

    def update(self, dt, axes):
        self.strategy.update(self, dt, axes)

    def takeoff(self):
        self.takeoff_flag = True

    def land(self):
        self.land_flag = True

    def emergency_stop(self):
        self.stop_flag = True

    def flip(self):
        self.flip_flag = True

    def toggle_headless(self):
        self.headless_flag = not self.headless_flag

    def toggle_altitude_hold(self):
        self.altitude_hold_flag = not self.altitude_hold_flag

    def calibrate_gyro(self):
        self.calibration_flag = True

    def get_control_state(self):
        return {
            "throttle": self.throttle,
            "yaw": self.yaw,
            "pitch": self.pitch,
            "roll": self.roll,
            "headless": self.headless_flag,
            "altitude_hold": self.altitude_hold_flag,
        }

    def set_strategy(self, strategy) -> None:
        self.strategy = strategy


================================================
FILE: backend/models/wifi_uav_rc.py
================================================
from __future__ import annotations

from models.base_rc import BaseRCModel
from models.control_profile import ControlProfile
from models.stick_range import StickRange
from control.strategies import IncrementalStrategy


class WifiUavRcModel(BaseRCModel):
    """
    RC model for toy drones that use the "WiFi UAV" mobile app (E58, LH-X20, …).

    RC rate needs to be 50 - 80 Hz to work well.

    Observations from packet captures:

    • All 4 stick axes sit at 0x7F (127) when centred.
    • Min / max values hover around 0x3F (63) and 0xBF (191).
      That is the default range we expose to the user code, but it can
      be tuned per drone via STICK_RANGE.
    """

    #            min, mid, max
    # STICK_RANGE = StickRange(0, 128, 255)
    STICK_RANGE = StickRange(40, 128, 220)

    PRESETS = {
        # name         accel   decel  expo  immediate-boost
        "normal":     ControlProfile("normal",     2.0, 4.0, 0.5, 0.02),
        "precise":    ControlProfile("precise",    1.2, 5.0, 0.3, 0.01),
        "aggressive": ControlProfile("aggressive", 4.0, 3.0, 1.2, 0.10),
    }

    def __init__(self, profile: str | ControlProfile = "normal") -> None:
        super().__init__(stick_range=self.STICK_RANGE, profile=profile)

        self.strategy = IncrementalStrategy()

        # one-shot flags
        self.takeoff_flag     = False
        self.land_flag        = False
        self.stop_flag        = False
        self.calibration_flag = False
        self.headless_flag    = False
        self.flip_flag        = False
        self.speed_index      = 2
        self.camera_tilt_state = 0

        # track last motion direction for each axis
        self.last_throttle_dir = 0
        self.last_yaw_dir      = 0
        self.last_pitch_dir    = 0
        self.last_roll_dir     = 0

    # ------------------------------------------------------------------ #
    # BaseRCModel API
    # ------------------------------------------------------------------ #
    def update(self, dt, axes):          # type: ignore[override]
        self.strategy.update(self, dt, axes)

    def takeoff(self):
        self.takeoff_flag = True

    def land(self):
        """Request the app's normal land / descend action."""
        self.land_flag = True

    def emergency_stop(self):
        """Immediate motor stop, distinct from the normal land action."""
        self.stop_flag = True

    def flip(self):
        """Request a flip / roll action when supported by the active variant."""
        self.flip_flag = True

    def set_speed_index(self, speed_index: int) -> None:
        """Set WiFi-UAV app speed tier: 0=30%, 1=60%, 2=100%, 3=25%."""
        self.speed_index = max(0, min(3, int(speed_index)))

    def set_camera_tilt_state(self, tilt_state: int) -> None:
        """Set camera tilt command state: 0=neutral, 1/2=opposite tilt directions."""
        self.camera_tilt_state = max(0, min(2, int(tilt_state)))

    # unsupported – always returns 0
    def toggle_record(self):             # type: ignore[override]
        return 0

    def get_control_state(self):
        return {
            "throttle":  self.throttle,
            "yaw":       self.yaw,
            "pitch":     self.pitch,
            "roll":      self.roll,
            "headless":  self.headless_flag,
            "speed_index": self.speed_index,
            "camera_tilt": self.camera_tilt_state,
        }

    def set_strategy(self, strategy) -> None:
        self.strategy = strategy


================================================
FILE: backend/models/wifi_uav_video_model.py
================================================
from collections import defaultdict
from typing import Dict, Optional

from models.video_frame import VideoFrame
from utils.wifi_uav_jpeg import generate_jpeg_headers, EOI


class WifiUavVideoModel:
    """
    Re-assembles one JPEG frame from the WiFi-UAV stream.

    The drone sends many UDP packets per frame.  Every packet contains a
    56-byte proprietary header and a JPEG fragment:

        • payload[1] == 0x01 … «JPEG» packet marker
        • payload[2] == 0x38 … *not* last fragment
                     != 0x38 … last fragment
        • payload[16:18]      … little-endian frame counter
        • payload[32:34]      … little-endian fragment index
        • payload[56:]        … actual JPEG bytes
    """

    HEADER_LEN = 56

    def __init__(self, width: int = 640, height: int = 360, num_components: int = 3):
        self._jpeg_header = generate_jpeg_headers(width, height, num_components)

        self._current_frame_id: Optional[int] = None
        self._expected_fragments: Optional[int] = None
        self._fragments: Dict[int, bytes] = defaultdict(bytes)

    # --------------------------------------------------------------------- #
    # public API
    # --------------------------------------------------------------------- #
    def ingest_chunk(self, payload: bytes) -> Optional[VideoFrame]:
        """
        Returns a complete `VideoFrame` when all fragments of one JPEG
        have been received; otherwise returns None.
        """

        # 1. Validate basic markers
        if len(payload) <= self.HEADER_LEN or payload[1] != 0x01:
            return None  # not a JPEG packet – ignore

        last_fragment = payload[2] != 0x38
        frame_id = int.from_bytes(payload[16:18], "little")
        fragment_id = int.from_bytes(payload[32:34], "little")
        jpeg_slice = payload[self.HEADER_LEN :]

        # 2. Start a new frame if necessary
        if self._current_frame_id != frame_id:
            self._reset_state(frame_id)

        # 3. Store slice
        self._fragments[fragment_id] = jpeg_slice
        if last_fragment:
            self._expected_fragments = fragment_id + 1

        # 4. Assemble when complete
        if (
            self._expected_fragments is not None
            and len(self._fragments) == self._expected_fragments
        ):
            ordered = (self._fragments[i] for i in range(self._expected_fragments))
            full_jpeg = (
                self._jpeg_header + b"".join(ordered) + bytes(EOI)
            )  # ensure immutable bytes

            # Prepare next frame
            self._reset_state(None)

            return VideoFrame(frame_id, full_jpeg)

        return None

    # ------------------------------------------------------------------ #
    # private helpers
    # ------------------------------------------------------------------ #
    def _reset_state(self, new_frame_id: Optional[int]):
        self._current_frame_id = new_frame_id
        self._expected_fragments = None
        self._fragments.clear()


================================================
FILE: backend/plugins/__init__.py
================================================


================================================
FILE: backend/plugins/base.py
================================================
from abc import ABC, abstractmethod
from typing import Iterator
from services.flight_controller import FlightController
from models.video_frame import VideoFrame

class Plugin(ABC):
    """
    Base class all runtime plug-ins must inherit from.
    `frame_source` is ANY iterator that yields either:
      • backend.models.video_frame.VideoFrame  (format == "jpeg"),
      • or an np.ndarray BGR/RGB image.
    """

    def __init__(self,
                 name: str,
                 flight_controller: FlightController,
                 frame_source: Iterator,
                 overlay_queue = None,
                 **kwargs):
        self.name   = name
        self.fc     = flight_controller
        self.frames = frame_source
        self.overlays = overlay_queue
        self.running = False
        # Lifecycle guards: make stop() idempotent and ensure cleanup runs
        # even if subclasses flip `running` directly.
        self._started = False
        self._stopped = False
        self.loop_thread = None

    def start(self):
        if self.running:
            return
        self.running = True
        self._started = True
        self._stopped = False
        self._on_start()

    def stop(self):
        # Idempotent stop: allow cleanup to run once even if `running` was
        # already set False by a subclass or background thread.
        if self._stopped:
            return
        self.running = False
        self._stopped = True
        if self._started:
            self._on_stop()

    @abstractmethod
    def _on_start(self):
        ...

    def _on_stop(self):
        pass

    def send_overlay(self, data: list):
        if self.overlays:
            try:
                self.overlays.put_nowait(data)
            except:
                pass 

================================================
FILE: backend/plugins/follow/__init__.py
================================================


================================================
FILE: backend/plugins/follow/follow_controller.py
================================================
class FollowController:
    """
    Calculates yaw/pitch commands to keep a target centered and at a stable distance.
    
    Uses constant-rate (bang-bang) control: outputs fixed command values when
    the target is outside the deadzone, zero when inside.
    """

    def __init__(
        self,
        yaw_deadzone: float = 0.15,
        pitch_deadzone: float = 0.02,
        min_box_width: float = 0.30,
        max_box_width: float = 0.80,
        invert_yaw: bool = False,
        invert_pitch: bool = False,
        yaw_speed: float = 20.0,
        pitch_speed: float = 20.0,
    ):
        self.yaw_deadzone = yaw_deadzone
        self.pitch_deadzone = pitch_deadzone
        self.min_box_width = min_box_width
        self.max_box_width = max_box_width
        self.invert_yaw = invert_yaw
        self.invert_pitch = invert_pitch
        self.yaw_speed = min(100.0, max(0.0, yaw_speed))
        self.pitch_speed = min(100.0, max(0.0, pitch_speed))

    def compute(self, box_center_x: float, box_width: float) -> tuple[float, float]:
        """
        Compute yaw and pitch commands.
        
        Args:
            box_center_x: Normalized x position of box center (0.0 = left, 1.0 = right)
            box_width: Normalized width of box (0.0 to 1.0)
        
        Returns:
            (yaw, pitch) commands in range -100 to 100
        """
        # Yaw: rotate to center the target horizontally
        yaw = 0.0
        error_x = box_center_x - 0.5
        if abs(error_x) > self.yaw_deadzone:
            yaw = self.yaw_speed if error_x > 0 else -self.yaw_speed
            if self.invert_yaw:
                yaw = -yaw

        # Pitch: move forward/backward to keep target at desired size
        pitch = 0.0
        if box_width < (self.min_box_width - self.pitch_deadzone):
            pitch = self.pitch_speed  # too far, move forward
        elif box_width > (self.max_box_width + self.pitch_deadzone):
            pitch = -self.pitch_speed  # too close, move backward
        if self.invert_pitch:
            pitch = -pitch

        return yaw, pitch


================================================
FILE: backend/plugins/follow/follow_plugin.py
================================================
import threading
import cv2
import numpy as np
from ultralytics import YOLO
import time
import os
import logging

from ..base import Plugin
from .follow_controller import FollowController
from control.strategies import DirectStrategy

logger = logging.getLogger(__name__)


class FollowPlugin(Plugin):
    """
    Person-following plugin using YOLOv10 for detection.
    Detects people in video frames and sends yaw/pitch commands to keep them centered.
    """

    def _on_start(self):
        # ---- Thread caps for better CPU behavior ----
        try:
            import torch
            torch.set_num_threads(int(os.getenv("TORCH_NUM_THREADS", "2")))
            torch.set_num_interop_threads(1)
        except Exception:
            pass

        try:
            cv2.setNumThreads(1)
        except Exception:
            pass

        os.environ.setdefault("OMP_NUM_THREADS", "2")
        os.environ.setdefault("MKL_NUM_THREADS", "2")

        # ---- Configuration ----
        self.frame_rate = int(os.getenv("FOLLOW_FPS", "20"))
        self.img_size = int(os.getenv("YOLO_IMG_SIZE", "320"))
        self.confidence = float(os.getenv("YOLO_CONFIDENCE", "0.65"))
        self.log_interval = float(os.getenv("FOLLOW_LOG_INTERVAL", "2.0"))

        # ---- Load YOLO model ----
        weights_env = os.getenv("YOLO_WEIGHTS")
        if weights_env and os.path.exists(weights_env):
            weights_path = weights_env
        else:
            repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
            default_weights = os.path.join(repo_root, "yolov10n.pt")
            weights_path = default_weights if os.path.exists(default_weights) else "yolov10n.pt"

        self.model = YOLO(weights_path)

        # ---- Follow controller ----
        self.ctrl = FollowController(
            yaw_deadzone=float(os.getenv("FOLLOW_CENTER_DEADZONE", "0.15")),
            pitch_deadzone=float(os.getenv("FOLLOW_PITCH_DEADZONE", "0.02")),
            min_box_width=float(os.getenv("FOLLOW_MIN_BOX_WIDTH", "0.30")),
            max_box_width=float(os.getenv("FOLLOW_MAX_BOX_WIDTH", "0.80")),
            invert_yaw=os.getenv("FOLLOW_INVERT_YAW", "false").lower() in ("1", "true", "yes"),
            invert_pitch=os.getenv("FOLLOW_INVERT_PITCH", "false").lower() in ("1", "true", "yes"),
            yaw_speed=float(os.getenv("FOLLOW_YAW_SPEED", "20.0")),
            pitch_speed=float(os.getenv("FOLLOW_PITCH_SPEED", "20.0")),
        )

        # ---- Set DirectStrategy for follow control ----
        self._prev_strategy = getattr(self.fc.model, "strategy", None)
        self._prev_expo = getattr(self.fc.model, "expo_factor", None)

        try:
            self.fc.model.set_strategy(DirectStrategy())
            self.fc.model.expo_factor = 0.0
            logger.info("[FollowPlugin] Started with DirectStrategy, expo=0")
        except Exception as e:
            logger.warning("[FollowPlugin] Warning: %s", e)

        self.loop_thread = threading.Thread(target=self._loop, daemon=True)
        self.loop_thread.start()

    def _on_stop(self):
        # Restore previous strategy
        try:
            if self._prev_strategy is not None:
                self.fc.model.set_strategy(self._prev_strategy)
            if self._prev_expo is not None:
                self.fc.model.expo_factor = self._prev_expo
        except Exception:
            pass

        if self.loop_thread:
            self.loop_thread.join(timeout=1.0)

    def _loop(self):
        logger.info("[FollowPlugin] Loop started. Waiting for frames...")
        frame_interval = 1.0 / self.frame_rate
        last_frame_time = 0
        last_log_time = 0

        for frame in self.frames:
            # Rate limit
            now = time.time()
            if now - last_frame_time < frame_interval:
                continue
            last_frame_time = now

            if not self.running:
                break

            # Decode frame
            if hasattr(frame, "format") and frame.format == "jpeg":
                img = cv2.imdecode(np.frombuffer(frame.data, np.uint8), cv2.IMREAD_COLOR)
                if img is None:
                    continue
            elif isinstance(frame, np.ndarray):
                img = frame
            else:
                continue

            # Run YOLO detection
            persons = []
            try:
                results = self.model(
                    img,
                    stream=True,
                    verbose=False,
                    classes=[0],  # person class only
                    imgsz=self.img_size,
                    conf=self.confidence,
                )
                for r in results:
                    for box in r.boxes or []:
                        xyxy = box.xyxy[0].tolist()
                        persons.append(xyxy)
            except Exception:
                continue

            if not persons:
                # No detection - stop moving and clear overlay
                self.fc.set_axes(throttle=0, yaw=0, pitch=0, roll=0)
                self.send_overlay([])
                continue

            # Pick largest person (by area)
            x1, y1, x2, y2 = max(persons, key=lambda b: (b[2] - b[0]) * (b[3] - b[1]))
            box_w, box_h = x2 - x1, y2 - y1
            frame_h, frame_w = img.shape[:2]

            # Send overlay
            overlay = [
                {
                    "type": "rect",
                    "coords": [x1 / frame_w, y1 / frame_h, x2 / frame_w, y2 / frame_h],
                    "color": "lime",
                }
            ]
            self.send_overlay(overlay)

            # Calculate and send commands
            yaw, pitch = self.ctrl.compute(
                box_center_x=(x1 + box_w / 2) / frame_w,
                box_width=box_w / frame_w,
            )
            self.fc.set_axes_from("follow", throttle=0, yaw=yaw / 100.0, pitch=pitch / 100.0, roll=0)

            # Periodic logging
            if now - last_log_time >= self.log_interval:
                center_offset = ((x1 + box_w / 2) / frame_w - 0.5) * 100
                logger.debug(
                    "[FollowPlugin] offset=%+.1f%% box=%.1f%% yaw=%.0f pitch=%.0f",
                    center_offset,
                    box_w / frame_w * 100,
                    yaw,
                    pitch,
                )
                last_log_time = now



================================================
FILE: backend/plugins/follow/test_yolo.py
================================================
import cv2
from ultralytics import YOLO

def main():
    # Load the YOLOv10 model
    model = YOLO("yolov10n.pt")

    # Open a connection to the default webcam (0)
    cap = cv2.VideoCapture(0)

    if not cap.isOpened():
        print("Error: Could not open webcam.")
        return

    while True:
        # Capture frame-by-frame
        ret, frame = cap.read()
        if not ret:
            print("Error: Can't receive frame (stream end?). Exiting ...")
            break

        # Run YOLOv10 inference on the frame
        results = model(frame, verbose=False)

        # Draw bounding boxes and labels on the frame
        for r in results:
            for box in r.boxes:
                if box.cls == 0:  # Class for 'person'
                    x1, y1, x2, y2 = map(int, box.xyxy[0])
                    confidence = box.conf[0]
                    
                    # Draw rectangle
                    cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
                    
                    # Prepare label text
                    label = f"Person: {confidence:.2f}"
                    
                    # Put label on the frame
                    cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

        # Display the resulting frame
        cv2.imshow('YOLOv10 Webcam Test', frame)

        # Break the loop on 'q' key press
        if cv2.waitKey(1) == ord('q'):
            break

    # When everything done, release the capture and destroy windows
    cap.release()
    cv2.destroyAllWindows()

if __name__ == '__main__':
    main()


================================================
FILE: backend/plugins/manager.py
================================================
import importlib
import inspect
import logging
import pkgutil
import queue
import threading
from typing import Dict, Iterator, Type
from services.flight_controller import FlightController
from .base import Plugin

logger = logging.getLogger(__name__)

class PluginManager:
    def __init__(self,
                 flight_controller: FlightController,
                 frame_queue: queue.Queue,
                 overlay_queue: queue.Queue):
        self._fc      = flight_controller
        self._frames_q  = frame_queue
        self._overlay_q = overlay_queue
        self._registry: Dict[str, Type[Plugin]] = {}
        self._pool: Dict[str, Plugin] = {}
        self._frame_stop_events: Dict[str, threading.Event] = {}
        self._discover_plugins()

    def available(self) -> list[str]:
        return list(self._registry.keys())

    def running(self) -> list[str]:
        return list(self._pool.keys())

    def clear_overlays(self) -> None:
        """
        Clears any currently displayed overlays (frontend will render none).
        """
        if not self._overlay_q:
            return
        try:
            self._overlay_q.put_nowait([])
        except Exception:
            pass

    def start(self, name: str) -> bool:
        """
        Starts a plugin by name.

        Returns True if started, False if already running.
        Raises ValueError if the plugin is unknown.
        """
        if name not in self._registry:
            raise ValueError(f"Unknown plugin: {name}")
        if name in self._pool:
            return False
        
        logger.info("[PluginManager] Starting plugin: %s", name)
        cls = self._registry[name]

        stop_event = threading.Event()
        self._frame_stop_events[name] = stop_event

        def frame_iterator() -> Iterator:
            """
            Yield frames from the shared queue, but remain stoppable.

            Important: do NOT block indefinitely on Queue.get() or plugin threads
            consuming this iterator can hang forever during shutdown.
            """
            while not stop_event.is_set():
                try:
                    # Keep the block bounded so we can observe stop_event.
                    yield self._frames_q.get(timeout=0.2)
                except queue.Empty:
                    continue

        try:
            # Pass a new, unique generator instance and the overlay queue to the plugin
            inst = cls(name=name,
                       flight_controller=self._fc,
                       frame_source=frame_iterator(),
                       overlay_queue=self._overlay_q)
            inst.start()
            self._pool[name] = inst
            return True
        except Exception as e:
            logger.exception("[PluginManager] Error starting plugin %s: %s", name, e)
            # Ensure we don't leak stop events on failed startup.
            self._frame_stop_events.pop(name, None)
            raise

    def stop(self, name: str) -> bool:
        """
        Stops a plugin by name.

        Returns True if stopped, False if it wasn't running.
        Raises ValueError if the plugin is unknown.
        """
        if name not in self._registry:
            raise ValueError(f"Unknown plugin: {name}")

        inst = self._pool.pop(name, None)
        if not inst:
            return False

        logger.info("[PluginManager] Stopping plugin: %s", name)

        # Unblock any plugin thread currently waiting on the frame iterator.
        stop_evt = self._frame_stop_events.pop(name, None)
        if stop_evt:
            stop_evt.set()

        inst.stop()

        # If we just stopped the last plugin, clear overlays so stale UI doesn't linger.
        if not self._pool:
            self.clear_overlays()
        return True

    def stop_all(self):
        for name in list(self._pool.keys()):
            self.stop(name)

    def _discover_plugins(self):
        """Finds all Plugin subclasses in the 'plugins' package."""
        import plugins
        
        plugin_pkg_path = plugins.__path__
        plugin_pkg_name = plugins.__name__

        for _, mod_name, _ in pkgutil.walk_packages(path=plugin_pkg_path, prefix=f"{plugin_pkg_name}."):
            module = importlib.import_module(mod_name)
            for _, obj in inspect.getmembers(module, inspect.isclass):
                # Ensure it's a direct subclass of Plugin and not Plugin itself
                if issubclass(obj, Plugin) and obj is not Plugin:
                    self._registry[obj.__name__] = obj
                    logger.debug("[PluginManager] Discovered plugin: %s", obj.__name__)

================================================
FILE: backend/plugins/test_yaw_plugin.py
================================================
import threading
import time
import os

from .base import Plugin
from control.strategies import DirectStrategy


class TestYawPlugin(Plugin):
    """
    Simple plugin to apply a small absolute yaw deflection using DirectStrategy
    for a short, fixed duration. Intended for isolating yaw behaviour on S2x.

    Behaviour:
    - Switch model to DirectStrategy (do NOT change expo)
    - Apply yaw = TEST_YAW_PCT (default +0.30), others 0
    - Maintain for TEST_YAW_DURATION_S seconds (default 5s) or until stopped
    - Then stop sending and restore previous strategy
    """

    def _on_start(self):
        # Read tunables from environment
        try:
            self._yaw_pct = float(os.getenv("TEST_YAW_PCT", "0.30"))
        except Exception:
            self._yaw_pct = 0.30
        try:
            self._duration_s = float(os.getenv("TEST_YAW_DURATION_S", "5.0"))
        except Exception:
            self._duration_s = 5.0

        # Remember previous strategy to restore on stop
        self._prev_strategy = getattr(self.fc.model, "strategy", None)

        # Force DirectStrategy for absolute mapping; keep expo as-is
        try:
            self.fc.model.set_strategy(DirectStrategy())
            print(f"[TestYawPlugin] Using DirectStrategy, yaw={self._yaw_pct:+.2f} for {self._duration_s:.1f}s")
        except Exception as e:
            print(f"[TestYawPlugin] Failed to set DirectStrategy: {e}")

        # Start worker thread
        self._thread = threading.Thread(target=self._run, daemon=True)
        self._thread.start()

    def _on_stop(self):
        # Stop worker
        if hasattr(self, "_thread") and self._thread:
            # Let the run loop exit naturally by checking self.running.
            # If stop() is called from inside the worker thread (auto-stop),
            # do NOT join ourselves.
            try:
                if threading.current_thread() is not self._thread:
                    self._thread.join(timeout=1.0)
            except RuntimeError:
                # Defensive: joining current thread raises at runtime.
                pass

        # Send zeros once
        try:
            self.fc.set_axes(throttle=0, yaw=0, pitch=0, roll=0)
        except Exception:
            pass

        # Restore previous strategy
        try:
            if hasattr(self, "_prev_strategy") and self._prev_strategy is not None:
                self.fc.model.set_strategy(self._prev_strategy)
        except Exception:
            pass

    def _run(self):
        start = time.time()
        # Send commands at ~30 Hz; FC loop will sample the latest values
        interval = 1.0 / 30.0

        while self.running:
            now = time.time()
            elapsed = now - start
            if elapsed >= self._duration_s:
                # Auto-stop after duration
                try:
                    print("[TestYawPlugin] Duration elapsed; stopping")
                finally:
                    # Route through stop() so _on_stop() runs (zeros + restore strategy)
                    self.stop()
                    return

            # Apply yaw deflection only
            try:
                self.fc.set_axes_from("test_yaw", throttle=0.0, yaw=self._yaw_pct, pitch=0.0, roll=0.0)
            except Exception:
                pass

            time.sleep(interval)




================================================
FILE: backend/protocols/__init__.py
================================================
from .s2x_rc_protocol_adapter import S2xRCProtocolAdapter

__all__ = ['S2xRCProtocolAdapter']


================================================
FILE: backend/protocols/base_protocol_adapter.py
================================================
from abc import ABC, abstractmethod

class BaseProtocolAdapter(ABC):
    """Base abstract class for drone protocol adapters"""
    
    @abstractmethod
    def build_control_packet(self, drone_model):
        """Build a control packet for the specific drone protocol"""
        pass
        
    @abstractmethod
    def send_control_packet(self, packet):
        """Send the control packet to the drone"""
        pass
        
    @abstractmethod
    def toggle_debug(self):
        """Toggle debug packet logging"""
        pass

================================================
FILE: backend/protocols/base_video_protocol.py
================================================
from abc import ABC, abstractmethod
import socket
import threading
from typing import Optional

from models.video_frame import VideoFrame


class BaseVideoProtocolAdapter(ABC):
    """
    Owns transport (UDP or TCP socket, keep-alives) and converts
    raw payloads into VideoFrame objects via an inner VideoModel.
    """

    def __init__(self, drone_ip: str, control_port: int, video_port: int):
        self.drone_ip = drone_ip
        self.control_port = control_port
        self.video_port = video_port
        self._keepalive_thread: Optional[threading.Thread] = None

    # ────────── keep-alive helpers ────────── #
    def start_keepalive(self, interval: float = 1.0) -> None:
        if self._keepalive_thread and self._keepalive_thread.is_alive():
            return

        self._stop_evt = threading.Event()
        self._keepalive_thread = threading.Thread(
            target=self._keepalive_loop,
            args=(interval,),
            daemon=True,
        )
        self._keepalive_thread.start()

    def stop_keepalive(self) -> None:
        if hasattr(self, "_stop_evt"):
            self._stop_evt.set()
        if self._keepalive_thread:
            self._keepalive_thread.join()

    def _keepalive_loop(self, interval: float) -> None:
        while not self._stop_evt.is_set():
            self.send_start_command()
            self._stop_evt.wait(interval)

    # ────────── transport helpers ────────── #
    def recv_from_socket(self, sock) -> Optional[bytes]:
        """
        Read one payload chunk from `sock`.

        The default implementation assumes UDP; override for TCP.
        """
        try:
            pkt, _ = sock.recvfrom(4096)
            return pkt
        except socket.timeout:
            return None

    # ────────── abstract API ────────── #
    @abstractmethod
    def send_start_command(self) -> None:
        """Tell the drone to start/continue sending video."""
        raise NotImplementedError

    @abstractmethod
    def create_receiver_socket(self) -> socket.socket:
        """Return a configured socket ready for recv()."""
        raise NotImplementedError

    @abstractmethod
    def handle_payload(self, payload: bytes) -> Optional[VideoFrame]:
        """
        Convert one transport payload into a VideoFrame or return None
        if the frame is not yet complete.
        """
        raise NotImplementedError

================================================
FILE: backend/protocols/cooingdv_jieli_rc_protocol_adapter.py
================================================
"""
RC protocol adapter for the CooingDV/Jieli CTP backend.

This is the backend used by KY FPV's Jieli DeviceClient path. It keeps the
familiar CooingDV 8-byte TC stick payload, but sends it as decimal BYTE0..BYTE7
fields inside a CTP JSON packet over UDP instead of sending the raw 0x03/0x66
datagram used by the classic CooingDV apps.
"""

from __future__ import annotations

import logging
import socket
import threading
from typing import Final, Optional

from models.cooingdv_rc import CooingdvRcModel
from protocols.base_protocol_adapter import BaseProtocolAdapter
from utils.cooingdv_jieli_ctp import build_ctp_packet

logger = logging.getLogger(__name__)


class CooingdvJieliRcProtocolAdapter(BaseProtocolAdapter):
    """CooingDV Jieli/CTP UDP adapter used by KY FPV Jieli devices."""

    DEFAULT_DRONE_IP: Final = "192.168.8.15"
    DEFAULT_PORT: Final = 2228
    HEARTBEAT_INTERVAL: Final = 1.0

    START_MARKER: Final = 0x66
    END_MARKER: Final = 0x99

    TOPIC_CONTROL_MODE: Final = "CONTROL_MODE"
    TOPIC_FLYING_CTRL: Final = "FLYING_CTRL"
    TOPIC_KEEP_ALIVE: Final = "CTP_KEEP_ALIVE"

    OP_PUT: Final = "PUT"

    FLAG_FAST_FLY: Final = 0x01
    FLAG_FAST_DROP: Final = 0x02
    FLAG_EMERGENCY_STOP: Final = 0x04
    FLAG_CIRCLE_TURN_END: Final = 0x08
    FLAG_HEADLESS: Final = 0x10
    FLAG_UNLOCK_OR_RETURN: Final = 0x20
    FLAG_LIGHT: Final = 0x40
    FLAG_GYRO: Final = 0x80

    def __init__(
        self,
        drone_ip: str = DEFAULT_DRONE_IP,
        control_port: int = DEFAULT_PORT,
        *,
        bind_port: Optional[int] = DEFAULT_PORT,
    ) -> None:
        self.drone_ip = drone_ip
        self.control_port = control_port
        self.debug_packets = False
        self._pkt_counter = 0
        self._heartbeat_running = False
        self._heartbeat_stop = threading.Event()
        self._heartbeat_thread: Optional[threading.Thread] = None

        self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        # The Android UDPClientImpl binds DatagramSocket(2228). If that port is
        # busy on a development machine, fall back to an ephemeral local port so
        # packet sending still works.
        if bind_port is not None:
            try:
                self.sock.bind(("", bind_port))
            except OSError as exc:
                logger.warning(
                    "[cooingdv-jieli] Could not bind local UDP port %s (%s); using ephemeral port.",
                    bind_port,
                    exc,
                )
                self.sock.bind(("", 0))
        else:
            self.sock.bind(("", 0))

        self.start_heartbeat()
        self._send_control_mode(True)
        logger.info(
            "[cooingdv-jieli] RC adapter initialized for %s:%s (local %s)",
            drone_ip,
            control_port,
            self.sock.getsockname()[1],
        )

    def build_control_packet(self, drone_model: CooingdvRcModel) -> bytes:
        """Build a CTP FLYING_CTRL packet from the shared CooingDV model."""
        payload = self._build_flying_payload(drone_model)
        self._clear_one_shot_flags(drone_model)
        params = {f"BYTE{i}": str(value) for i, value in enumerate(payload)}
        return build_ctp_packet(self.TOPIC_FLYING_CTRL, params)

    def send_control_packet(self, packet: bytes) -> None:
        try:
            self.sock.sendto(packet, (self.drone_ip, self.control_port))
        except OSError:
            return

        if self.debug_packets:
            self._pkt_counter += 1
            logger.debug(
                "[cooingdv-jieli] #%05d %s",
                self._pkt_counter,
                packet.hex(" "),
            )

    def toggle_debug(self) -> bool:
        self.debug_packets = not self.debug_packets
        logger.info("[cooingdv-jieli] debug %s", "ON" if self.debug_packets else "OFF")
        return self.debug_packets

    def stop(self) -> None:
        self.stop_heartbeat()
        self._send_control_mode(False)
        try:
            self.sock.close()
        except OSError:
            pass

    def start_heartbeat(self) -> None:
        if self._heartbeat_thread and self._heartbeat_thread.is_alive():
            return

        self._heartbeat_stop.clear()
        self._heartbeat_running = True
        self._heartbeat_thread = threading.Thread(
            target=self._heartbeat_loop,
            daemon=True,
            name="CooingdvJieliHeartbeat",
        )
        self._heartbeat_thread.start()

    def stop_heartbeat(self) -> None:
        self._heartbeat_running = False
        self._heartbeat_stop.set()
        if self._heartbeat_thread:
            self._heartbeat_thread.join(timeout=2.0)
            self._heartbeat_thread = None

    def _heartbeat_loop(self) -> None:
        while self._heartbeat_running and not self._heartbeat_stop.is_set():
            self._send_ctp(self.TOPIC_KEEP_ALIVE, {})
            self._heartbeat_stop.wait(self.HEARTBEAT_INTERVAL)

    def _send_control_mode(self, enabled: bool) -> None:
        self._send_ctp(self.TOPIC_CONTROL_MODE, {"state": "1" if enabled else "0"})

    def _send_ctp(self, topic: str, params: dict[str, str]) -> None:
        packet = build_ctp_packet(topic, params)
        try:
            self.sock.sendto(packet, (self.drone_ip, self.control_port))
        except OSError:
            pass

    def _build_flying_payload(self, model: CooingdvRcModel) -> list[int]:
        b1 = self._clamp_axis(model.roll)
        b2 = self._clamp_axis(model.pitch)
        acc = self._clamp_axis(model.throttle)
        if acc == 1:
            acc = 0
        turn = self._clamp_axis(model.yaw)
        flags = self._build_flags(model)
        checksum = (b1 ^ b2 ^ acc ^ turn ^ flags) & 0xFF
        return [self.START_MARKER, b1, b2, acc, turn, flags, checksum, self.END_MARKER]

    def _build_flags(self, model: CooingdvRcModel) -> int:
        flags = 0
        if model.takeoff_flag:
            flags |= self.FLAG_FAST_FLY
        if model.land_flag:
            flags |= self.FLAG_FAST_DROP
        if model.stop_flag:
            flags |= self.FLAG_EMERGENCY_STOP
        if model.flip_flag:
            flags |= self.FLAG_CIRCLE_TURN_END
        if model.headless_flag:
            flags |= self.FLAG_HEADLESS
        if model.calibration_flag:
            flags |= self.FLAG_GYRO
        return flags & 0xFF

    def _clear_one_shot_flags(self, model: CooingdvRcModel) -> None:
        model.takeoff_flag = False
        model.land_flag = False
        model.stop_flag = False
        model.flip_flag = False
        model.calibration_flag = False

    def _clamp_axis(self, value: float) -> int:
        return max(1, min(255, int(value))) & 0xFF


================================================
FILE: backend/protocols/cooingdv_jieli_video_protocol.py
================================================
"""
First-pass video adapter for the CooingDV/Jieli backend.

KY FPV's Jieli path requests video with CTP JSON commands and receives an RTP
stream on local UDP ports. This adapter implements the JPEG RTP path, which is
the safest initial target because the frontend expects JPEG frames.
"""

from __future__ import annotations

import logging
import queue
import socket
import threading
import time
from typing import Final, Optional

from models.video_frame import VideoFrame
from protocols.base_video_protocol import BaseVideoProtocolAdapter
from utils.cooingdv_jieli_ctp import build_ctp_packet
from utils.wifi_uav_jpeg import EOI, generate_jpeg_headers

logger = logging.getLogger(__name__)


class CooingdvJieliVideoProtocolAdapter(BaseVideoProtocolAdapter):
    DEFAULT_DRONE_IP: Final = "192.168.8.15"
    DEFAULT_CONTROL_PORT: Final = 2228
    DEFAULT_VIDEO_PORT: Final = 6666
    DEFAULT_AUDIO_PORT: Final = 1234
    DEFAULT_SDP_PORT: Final = 6789

    TOPIC_OPEN_FRONT_RTS: Final = "OPEN_RT_STREAM"
    TOPIC_CLOSE_FRONT_RTS: Final = "CLOSE_RT_STREAM"

    FORMAT_JPEG: Final = 0
    FORMAT_H264: Final = 1

    def __init__(
        self,
        drone_ip: str = DEFAULT_DRONE_IP,
        control_port: int = DEFAULT_CONTROL_PORT,
        video_port: int = DEFAULT_VIDEO_PORT,
        *,
        audio_port: int = DEFAULT_AUDIO_PORT,
        sdp_port: int = DEFAULT_SDP_PORT,
        width: int = 640,
        height: int = 360,
        fps: int = 30,
        debug: bool = False,
    ) -> None:
        super().__init__(drone_ip, control_port, video_port)
        self.audio_port = audio_port
        self.sdp_port = sdp_port
        self.width = width
        self.height = height
        self.fps = fps
        self.debug = debug

        self._running = threading.Event()
        self._rx_thread: Optional[threading.Thread] = None
        self._sdp_thread: Optional[threading.Thread] = None
        self._rx_sock: Optional[socket.socket] = None
        self._sdp_sock: Optional[socket.socket] = None
        self._control_sock: Optional[socket.socket] = None
        self._frame_q: "queue.Queue[VideoFrame]" = queue.Queue(maxsize=2)
        self._frame_id = 0
        self._rtp_jpeg_buffers: dict[int, bytearray] = {}

    def start(self) -> None:
        if self.is_running():
            return
        self._running.set()
        self._control_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        self._control_sock.bind(("", 0))
        self._start_sdp_server()
        self._send_open_stream()
        self._rx_thread = threading.Thread(
            target=self._rx_loop,
            daemon=True,
            name="CooingdvJieliVideoRx",
        )
        self._rx_thread.start()
        logger.info(
            "[cooingdv-jieli-video] listening for RTP/JPEG on UDP %s; SDP on TCP %s",
            self.video_port,
            self.sdp_port,
        )

    def stop(self) -> None:
        self._running.clear()
        self._send_close_stream()
        for sock in (self._rx_sock, self._sdp_sock, self._control_sock):
            if sock:
                try:
                    sock.close()
                except OSError:
                    pass
        if self._rx_thread:
            self._rx_thread.join(timeout=1.0)
        if self._sdp_thread:
            self._sdp_thread.join(timeout=1.0)

    def is_running(self) -> bool:
        return self._running.is_set() and self._rx_thread is not None and self._rx_thread.is_alive()

    def get_frame(self, timeout: float = 1.0) -> Optional[VideoFrame]:
        try:
            return self._frame_q.get(timeout=timeout)
        except queue.Empty:
            return None

    def get_packets(self) -> list[bytes]:
        return []

    def send_start_command(self) -> None:
        self._send_open_stream()

    def create_receiver_socket(self):
        return None

    def handle_payload(self, payload: bytes) -> Optional[VideoFrame]:
        self._frame_id = (self._frame_id + 1) & 0xFFFF
        return VideoFrame(self._frame_id, payload, format_type="jpeg")

    def _send_open_stream(self) -> None:
        self._send_ctp(
            self.TOPIC_OPEN_FRONT_RTS,
            {
                "format": str(self.FORMAT_JPEG),
                "w": str(self.width),
                "h": str(self.height),
                "fps": str(self.fps),
            },
        )

    def _send_close_stream(self) -> None:
        self._send_ctp(self.TOPIC_CLOSE_FRONT_RTS, {"status": "1"})

    def _send_ctp(self, topic: str, params: dict[str, str]) -> None:
        sock = self._control_sock
        if sock is None:
            return
        packet = build_ctp_packet(topic, params)
        try:
            sock.sendto(packet, (self.drone_ip, self.control_port))
        except OSError:
            pass

    def _start_sdp_server(self) -> None:
        self._sdp_thread = threading.Thread(
            target=self._sdp_loop,
            daemon=True,
            name="CooingdvJieliSdpServer",
        )
        self._sdp_thread.start()

    def _sdp_loop(self) -> None:
        sdp = (
            "c=IN IP4 127.0.0.1\n"
            f"m=audio {self.audio_port} RTP/AVP 97\n"
            "a=rtpmap:97 L16/8000/1\n"
            "a=ptime:20\n"
            f"m=video {self.video_port} RTP/AVP 26\n"
            "a=rtpmap:26 JPEG/90000\n"
            f"a=framerate:{self.fps}"
        ).encode("utf-8")

        try:
            sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
            sock.bind(("", self.sdp_port))
            sock.listen(1)
            sock.settimeout(0.5)
            self._sdp_sock = sock
        except OSError as exc:
            logger.warning("[cooingdv-jieli-video] SDP server unavailable: %s", exc)
            return

        while self._running.is_set():
            try:
                conn, _ = sock.accept()
            except socket.timeout:
                continue
            except OSError:
                break
            with conn:
                try:
                    conn.sendall(sdp)
                except OSError:
                    pass

    def _rx_loop(self) -> None:
        try:
            sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
            sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
            sock.bind(("", self.video_port))
            sock.settimeout(0.5)
            self._rx_sock = sock
        except OSError as exc:
            logger.warning("[cooingdv-jieli-video] RTP socket unavailable: %s", exc)
            self._running.clear()
            return

        while self._running.is_set():
            try:
                packet, _ = sock.recvfrom(65535)
            except socket.timeout:
                continue
            except OSError:
                break

            frame = self._handle_rtp_packet(packet)
            if frame is None:
                continue
            try:
                self._frame_q.put(frame, timeout=0.1)
            except queue.Full:
                try:
                    self._frame_q.get_nowait()
                    self._frame_q.put(frame, timeout=0.1)
                except (queue.Empty, queue.Full):
                    pass

    def _handle_rtp_packet(self, packet: bytes) -> Optional[VideoFrame]:
        if len(packet) < 12 or (packet[0] >> 6) != 2:
            return self._handle_possible_raw_jpeg(packet)

        csrc_count = packet[0] & 0x0F
        marker = bool(packet[1] & 0x80)
        payload_type = packet[1] & 0x7F
        timestamp = int.from_bytes(packet[4:8], "big")
        offset = 12 + (csrc_count * 4)
        if packet[0] & 0x10:
            if len(packet) < offset + 4:
                return None
            ext_len = int.from_bytes(packet[offset + 2 : offset + 4], "big") * 4
            offset += 4 + ext_len
        payload = packet[offset:]

        if payload_type == 26:
            return self._handle_rtp_jpeg(timestamp, payload, marker)
        return self._handle_possible_raw_jpeg(payload)

    def _handle_rtp_jpeg(self, timestamp: int, payload: bytes, marker: bool) -> Optional[VideoFrame]:
        if len(payload) < 8:
            return None
        fragment_offset = int.from_bytes(payload[1:4], "big")
        width = payload[6] * 8 or self.width
        height = payload[7] * 8 or self.height
        scan = payload[8:]

        if fragment_offset == 0:
            self._rtp_jpeg_buffers[timestamp] = bytearray(generate_jpeg_headers(width, height))

        buf = self._rtp_jpeg_buffers.setdefault(timestamp, bytearray())
        buf.extend(scan)
        if not marker:
            return None

        data = bytes(buf)
        self._rtp_jpeg_buffers.pop(timestamp, None)
        if not data.endswith(EOI):
            data += bytes(EOI)
        return self.handle_payload(data)

    def _handle_possible_raw_jpeg(self, payload: bytes) -> Optional[VideoFrame]:
        start = payload.find(b"\xff\xd8")
        end = payload.rfind(b"\xff\xd9")
        if start >= 0 and end > start:
            return self.handle_payload(payload[start : end + 2])
        if self.debug and payload:
            logger.debug("[cooingdv-jieli-video] ignored payload %sB", len(payload))
        return None


================================================
FILE: backend/protocols/cooingdv_rc_protocol_adapter.py
================================================
"""
RC Protocol Adapter for CooingDV drones.

The CooingDV apps share a UDP control plane on port 7099, but they do not all
use the same control packet layout. The apps listen for UDP telemetry, classify
the drone as either a short "TC" packet variant or an extended "GL" packet
variant, and then switch the encoder accordingly.

Shared behavior observed in the apps:
  - Heartbeat {0x01, 0x01} every second
  - RTSP preview at rtsp://192.168.1.1:7070/webcam (handled elsewhere)
  - UDP telemetry on the same socket used for control

Important: {0x08, 0x01} is used by the apps when leaving control mode. It is
not a startup "init" packet.
"""

import os
import socket
import threading
import logging
from typing import Final, Optional

from models.cooingdv_rc import CooingdvRcModel
from protocols.base_protocol_adapter import BaseProtocolAdapter

logger = logging.getLogger(__name__)


class CooingdvRcProtocolAdapter(BaseProtocolAdapter):
    """Protocol adapter for the CooingDV drone family."""

    DEFAULT_DRONE_IP: Final = "192.168.1.1"
    DEFAULT_PORT: Final = 7099
    HEARTBEAT_INTERVAL: Final = 1.0
    RECV_TIMEOUT: Final = 0.25
    RECV_BUFFER_SIZE: Final = 256

    # Packet markers
    PREFIX: Final = 0x03
    START_MARKER: Final = 0x66
    EXTENDED_MARKER: Final = 0x14
    END_MARKER: Final = 0x99

    # Discrete UDP commands
    HEARTBEAT_COMMAND: Final = bytes([0x01, 0x01])
    STOP_COMMAND: Final = bytes([0x08, 0x01])

    # Variant names
    DEVICE_UNKNOWN: Final = "unknown"
    DEVICE_TC: Final = "tc"
    DEVICE_GL: Final = "gl"

    # TC packet flag bits
    FLAG_TAKEOFF: Final = 0x01
    FLAG_LAND: Final = 0x02
    FLAG_STOP: Final = 0x04
    FLAG_FLIP: Final = 0x08
    FLAG_HEADLESS: Final = 0x10
    FLAG_CALIBRATE: Final = 0x80

    # GL packet flag groups
    GL_FLAG_ONE_KEY_ACTION: Final = 0x01
    GL_FLAG_STOP: Final = 0x02
    GL_FLAG_CALIBRATE: Final = 0x04
    GL_FLAG_FLIP: Final = 0x08
    GL_FLAG_HEADLESS: Final = 0x01

    # Resolution / capability IDs seen in the CooingDV apps. The apps use the
    # first byte of received UDP telemetry to decide whether the drone belongs
    # to the GL or TC family.
    GL_RESOLUTION_IDS: Final = frozenset(set(range(90, 102)) | {82, 85, 103, 105})
    KNOWN_RESOLUTION_IDS: Final = frozenset({
        3, 5, 9, 11, 12, 19, 20, 21, 23, 24, 26, 27, 29, 30, 31,
        41, 43, 44, 45, 51, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
        80, 81, 82, 83, 84, 85, 86, 87, 90, 91, 92, 93, 94, 95, 96,
        97, 98, 99, 100, 101, 103, 105,
    })

    def __init__(
        self,
        drone_ip: str = DEFAULT_DRONE_IP,
        control_port: int = DEFAULT_PORT,
        variant: Optional[str] = None,
    ) -> None:
        self.drone_ip = drone_ip
        self.control_port = control_port

        self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        # Match Java DatagramSocket() semantics by binding to an ephemeral port
        # so replies return to the same socket we use for telemetry detection.
        self.sock.bind(("", 0))
        self.sock.settimeout(self.RECV_TIMEOUT)

        self.debug_packets = False
        self._pkt_counter = 0

        env_variant = os.getenv("COOINGDV_VARIANT")
        self._variant_override = self._normalise_variant(variant or env_variant)
        self._detected_variant: Optional[str] = None
        self.detected_resolution_id: Optional[int] = None
        self.last_rx_packet: bytes = b""
        self.last_rx_addr: Optional[tuple[str, int]] = None
        self._override_mismatch_reported = False

        # Heartbeat thread
        self._heartbeat_running = False
        self._heartbeat_thread: Optional[threading.Thread] = None
        self._heartbeat_stop = threading.Event()

        # Receive thread
        self._receiver_running = False
        self._receiver_thread: Optional[threading.Thread] = None
        self._receiver_stop = threading.Event()

        self.start_receiver()
        self.start_heartbeat()

        active_variant = self._active_variant()
        active_label = active_variant.upper()
        if self._variant_override:
            logger.info(
                "[cooingdv] RC adapter initialized for %s:%s (forced %s, local %s)",
                drone_ip,
                control_port,
                active_label,
                self.sock.getsockname()[1],
            )
        else:
            logger.info(
                "[cooingdv] RC adapter initialized for %s:%s (auto-detect, TC fallback, local %s)",
                drone_ip,
                control_port,
                self.sock.getsockname()[1],
            )

    def start_heartbeat(self) -> None:
        """Start the heartbeat thread to keep the UDP session alive."""
        if self._heartbeat_thread and self._heartbeat_thread.is_alive():
            return

        self._heartbeat_stop.clear()
        self._heartbeat_running = True
        self._heartbeat_thread = threading.Thread(
            target=self._heartbeat_loop,
            daemon=True,
            name="CooingdvHeartbeat",
        )
        self._heartbeat_thread.start()

    def stop_heartbeat(self) -> None:
        """Stop the heartbeat thread."""
        self._heartbeat_running = False
        self._heartbeat_stop.set()
        if self._heartbeat_thread:
            self._heartbeat_thread.join(timeout=2.0)
            self._heartbeat_thread = None

    def _heartbeat_loop(self) -> None:
        """Send heartbeat packets every HEARTBEAT_INTERVAL seconds."""
        while self._heartbeat_running and not self._heartbeat_stop.is_set():
            try:
                self.sock.sendto(self.HEARTBEAT_COMMAND, (self.drone_ip, self.control_port))
                if self.debug_packets:
                    logger.debug("[cooingdv] heartbeat sent: %s", self.HEARTBEAT_COMMAND.hex())
            except OSError:
                pass
            self._heartbeat_stop.wait(self.HEARTBEAT_INTERVAL)

    def start_receiver(self) -> None:
        """Start the telemetry receiver used for TC/GL auto-detection."""
        if self._receiver_thread and self._receiver_thread.is_alive():
            return

        self._receiver_stop.clear()
        self._receiver_running = True
        self._receiver_thread = threading.Thread(
            target=self._receiver_loop,
            daemon=True,
            name="CooingdvUdpRx",
        )
        self._receiver_thread.start()

    def stop_receiver(self) -> None:
        """Stop the telemetry receiver thread."""
        self._receiver_running = False
        self._receiver_stop.set()
        if self._receiver_thread:
            self._receiver_thread.join(timeout=2.0)
            self._receiver_thread = None

    def _receiver_loop(self) -> None:
        while self._receiver_running and not self._receiver_stop.is_set():
            try:
                packet, addr = self.sock.recvfrom(self.RECV_BUFFER_SIZE)
            except socket.timeout:
                continue
            except OSError:
                if self._receiver_stop.is_set():
                    break
                continue

            if not packet:
                continue

            self.last_rx_packet = packet
            self.last_rx_addr = addr
            self._process_received_packet(packet, addr)

    def _process_received_packet(self, packet: bytes, addr: tuple[str, int]) -> None:
        """Track UDP telemetry and auto-detect the control variant."""
        if self.debug_packets:
            logger.debug(
                "[cooingdv] udp recv %sB from %s:%s: %s",
                len(packet),
                addr[0],
                addr[1],
                packet.hex(),
            )

        variant = self._detect_variant(packet)
        if variant is None:
            return

        resolution_id = packet[0]
        if resolution_id in self.KNOWN_RESOLUTION_IDS:
            self.detected_resolution_id = resolution_id

        if self._variant_override:
            if variant != self._variant_override and not self._override_mismatch_reported:
                logger.warning(
                    "[cooingdv] Ignoring auto-detected %s telemetry; using override %s.",
                    variant.upper(),
                    self._variant_override.upper(),
                )
                self._override_mismatch_reported = True
            return

        if variant == self._detected_variant:
            return

        self._detected_variant = variant
        resolution_text = (
            f" resolution-id={self.detected_resolution_id}"
            if self.detected_resolution_id is not None
            else ""
        )
        logger.info("[cooingdv] Auto-detected %s control variant.%s", variant.upper(), resolution_text)

    def stop(self) -> None:
        """Clean shutdown of the adapter."""
        self.stop_heartbeat()
        try:
            self.sock.sendto(self.STOP_COMMAND, (self.drone_ip, self.control_port))
        except OSError:
            pass
        self.stop_receiver()
        try:
            self.sock.close()
        except Exception:
            pass

    # ------------------------------------------------------------------ #
    # BaseProtocolAdapter interface
    # ------------------------------------------------------------------ #
    def build_control_packet(self, drone_model: CooingdvRcModel) -> bytes:
        """Build a control packet for the detected CooingDV variant."""
        variant = self._active_variant()
        if variant == self.DEVICE_GL:
            packet = self._build_gl_control_packet(drone_model)
        else:
            packet = self._build_tc_control_packet(drone_model)

        self._clear_one_shot_flags(drone_model)
        return packet

    def send_control_packet(self, packet: bytes) -> None:
        """Send a control packet to the drone via UDP."""
        try:
            self.sock.sendto(packet, (self.drone_ip, self.control_port))
        except OSError:
            return

        if self.debug_packets:
            self._pkt_counter += 1
            variant = self._active_variant().upper()
            hex_dump = " ".join(f"{b:02x}" for b in packet)
            logger.debug("[cooingdv] #%05d [%s] %s", self._pkt_counter, variant, hex_dump)

            if len(packet) == 9:
                roll, pitch, throttle, yaw = packet[2], packet[3], packet[4], packet[5]
                flags = packet[6]
                logger.debug("  Controls: R:%s P:%s T:%s Y:%s", roll, pitch, throttle, yaw)
                flag_names = self._decode_tc_flags(flags)
                if flag_names:
                    logger.debug("  Flags: %s", ", ".join(flag_names))
            elif len(packet) == 21:
                roll, pitch, throttle, yaw = packet[3], packet[4], packet[5], packet[6]
                flags1, flags2 = packet[7], packet[8]
                logger.debug("  Controls: R:%s P:%s T:%s Y:%s", roll, pitch, throttle, yaw)
                flag_names = self._decode_gl_flags(flags1, flags2)
                if flag_names:
                    logger.debug("  Flags: %s", ", ".join(flag_names))

    def toggle_debug(self) -> bool:
        """Toggle debug packet logging on/off."""
        self.debug_packets = not self.debug_packets
        state = "ON" if self.debug_packets else "OFF"
        logger.info("[cooingdv] debug %s", state)
        return self.debug_packets

    # ------------------------------------------------------------------ #
    # Helper methods
    # ------------------------------------------------------------------ #
    def _active_variant(self) -> str:
        """Return the currently active control variant."""
        if self._variant_override:
            return self._variant_override
        if self._detected_variant:
            return self._detected_variant
        return self.DEVICE_TC

    def _normalise_variant(self, value: Optional[str]) -> Optional[str]:
        if value is None:
            return None

        raw = value.strip().lower()
        if raw in ("", "auto", "detect", "autodetect"):
            return None
        if raw in ("tc", "e88", "short"):
            return self.DEVICE_TC
        if raw in ("gl", "flow", "extended"):
            return self.DEVICE_GL

        logger.warning("[cooingdv] Unknown variant override '%s', using auto-detect.", value)
        return None

    def _detect_variant(self, packet: bytes) -> Optional[str]:
        """Infer the control family from the first telemetry byte."""
        first_byte = packet[0]
        if first_byte in self.GL_RESOLUTION_IDS:
            return self.DEVICE_GL
        if first_byte in self.KNOWN_RESOLUTION_IDS:
            return self.DEVICE_TC
        return None

    def _build_tc_control_packet(self, model: CooingdvRcModel) -> bytes:
        pkt = bytearray(9)
        pkt[0] = self.PREFIX
        pkt[1] = self.START_MARKER
        pkt[2] = self._clamp_axis(model.roll)
        pkt[3] = self._clamp_axis(model.pitch)
        pkt[4] = self._clamp_axis(model.throttle)
        pkt[5] = self._clamp_axis(model.yaw)
        pkt[6] = self._build_tc_flags(model)
        pkt[7] = self._calculate_checksum(pkt[2:7])
        pkt[8] = self.END_MARKER
        return bytes(pkt)

    def _build_gl_control_packet(self, model: CooingdvRcModel) -> bytes:
        roll = self._clamp_axis(model.roll)
        pitch = self._clamp_axis(model.pitch)
        throttle = self._clamp_axis(model.throttle)
        yaw = self._clamp_axis(model.yaw)
        flags1, flags2 = self._build_gl_flags(model)

        inner = bytearray(20)
        inner[0] = self.START_MARKER
        inner[1] = self.EXTENDED_MARKER
        inner[2] = roll
        inner[3] = pitch
        inner[4] = throttle
        inner[5] = yaw
        inner[6] = flags1
        inner[7] = flags2
        inner[18] = self._calculate_checksum(
            bytes([roll, pitch, throttle, yaw, flags1, flags2])
        )
        inner[19] = self.END_MARKER

        return bytes([self.PREFIX]) + bytes(inner)

    def _clamp_axis(self, value: float) -> int:
        """Clamp axis values into the byte range used by the apps."""
        clamped = max(1, min(255, int(value)))
        return clamped & 0xFF

    def _build_tc_flags(self, model: CooingdvRcModel) -> int:
        flags = 0

        if model.takeoff_flag:
            flags |= self.FLAG_TAKEOFF
        if model.land_flag:
            flags |= self.FLAG_LAND
        if model.stop_flag:
            flags |= self.FLAG_STOP
        if model.flip_flag:
            flags |= self.FLAG_FLIP
        if model.headless_flag:
            flags |= self.FLAG_HEADLESS
        if model.calibration_flag:
            flags |= self.FLAG_CALIBRATE

        return flags & 0xFF

    def _build_gl_flags(self, model: CooingdvRcModel) -> tuple[int, int]:
        """
        Build the extended GL flag groups.

        In the decompiled apps, the GL family uses two flag bytes and does not
        expose separate takeoff/land bits. The one-key action bit is reused for
        either button, so we mirror that behavior here.
        """
        flags1 = 0
        flags2 = 0

        if model.takeoff_flag or model.land_flag:
            flags1 |= self.GL_FLAG_ONE_KEY_ACTION
        if model.stop_flag:
            flags1 |= self.GL_FLAG_STOP
        if model.calibration_flag:
            flags1 |= self.GL_FLAG_CALIBRATE
        if model.flip_flag:
            flags1 |= self.GL_FLAG_FLIP
        if model.headless_flag:
            flags2 |= self.GL_FLAG_HEADLESS

        return flags1 & 0xFF, flags2 & 0xFF

    def _clear_one_shot_flags(self, model: CooingdvRcModel) -> None:
        model.takeoff_flag = False
        model.land_flag = False
        model.stop_flag = False
        model.flip_flag = False
        model.calibration_flag = False

    def _decode_tc_flags(self, flags: int) -> list[str]:
        names: list[str] = []
        if flags & self.FLAG_TAKEOFF:
            names.append("TAKEOFF")
        if flags & self.FLAG_LAND:
            names.append("LAND")
        if flags & self.FLAG_STOP:
            names.append("STOP")
        if flags & self.FLAG_FLIP:
            names.append("FLIP")
        if flags & self.FLAG_HEADLESS:
            names.append("HEADLESS")
        if flags & self.FLAG_CALIBRATE:
            names.append("CALIBRATE")
        return names

    def _decode_gl_flags(self, flags1: int, flags2: int) -> list[str]:
        names: list[str] = []
        if flags1 & self.GL_FLAG_ONE_KEY_ACTION:
            names.append("ONE_KEY")
        if flags1 & self.GL_FLAG_STOP:
            names.append("STOP")
        if flags1 & self.GL_FLAG_CALIBRATE:
            names.append("CALIBRATE")
        if flags1 & self.GL_FLAG_FLIP:
            names.append("FLIP")
        if flags2 & self.GL_FLAG_HEADLESS:
            names.append("HEADLESS")
        return names

    def _calculate_checksum(self, data: bytes) -> int:
        checksum = 0
        for b in data:
            checksum ^= b
        return checksum & 0xFF



================================================
FILE: backend/protocols/cooingdv_video_protocol.py
================================================
"""
Video Protocol Adapter for Cooingdv drones.

Uses RTSP streaming instead of custom UDP protocol - much simpler than 
the S2x and WiFi UAV implementations.

The drone broadcasts video at: rtsp://192.168.1.1:7070/webcam

This adapter uses OpenCV's VideoCapture to connect to the RTSP stream
and provides frames through the standard turbodrone interface.
"""

import logging
import cv2
import queue
import threading
import time
from typing import Final, Optional, List

from models.cooingdv_video_model import CooingdvVideoModel
from models.video_frame import VideoFrame
from protocols.base_video_protocol import BaseVideoProtocolAdapter

logger = logging.getLogger(__name__)


class CooingdvVideoProtocolAdapter(BaseVideoProtocolAdapter):
    """
    Video protocol adapter for cooingdv drones using RTSP streaming.
    
    Unlike the UDP-based adapters for S2x and WiFi UAV drones, this adapter
    connects to a standard RTSP stream, making it much simpler and more
    reliable.
    
    Features:
    - Automatic reconnection on stream failure
    - Frame rate limiting to prevent buffer overflow
    - Thread-safe frame queue
    """

    DEFAULT_DRONE_IP: Final = "192.168.1.1"
    DEFAULT_RTSP_PORT: Final = 7070
    DEFAULT_CONTROL_PORT: Final = 7099
    
    RTSP_PATH: Final = "/webcam"
    
    # Reconnection settings
    RECONNECT_DELAY: Final = 2.0  # seconds
    MAX_RECONNECT_ATTEMPTS: Final = 10
    
    # Frame capture settings
    FRAME_TIMEOUT: Final = 5.0  # seconds without frame triggers reconnect
    READ_FAILURE_BACKOFF: Final = 0.05  # seconds between failed reads

    def __init__(
        self,
        drone_ip: str = DEFAULT_DRONE_IP,
        control_port: int = DEFAULT_CONTROL_PORT,
        video_port: int = DEFAULT_RTSP_PORT,
        *,
        debug: bool = False,
    ):
        super().__init__(drone_ip, control_port, video_port)
        self.model = CooingdvVideoModel()
        
        self.debug = debug or logger.isEnabledFor(logging.DEBUG)
        self._dbg = logger.debug if self.debug else (lambda *a, **k: None)
        
        # Build RTSP URL
        self.rtsp_url = f"rtsp://{drone_ip}:{video_port}{self.RTSP_PATH}"
        self._dbg(f"[cooingdv-video] RTSP URL: {self.rtsp_url}")
        
        # OpenCV capture object
        self._cap: Optional[cv2.VideoCapture] = None
        self._cap_lock = threading.Lock()
        
        # Threading
        self._running = False
        self._rx_thread: Optional[threading.Thread] = None
        self._frame_q: "queue.Queue[VideoFrame]" = queue.Queue(maxsize=2)
        
        # Packet buffer for compatibility with existing interface
        self._pkt_lock = threading.Lock()
        self._pkt_buffer: List[bytes] = []
        
        # Statistics
        self.frames_ok = 0
        self.frames_dropped = 0
        self.reconnect_count = 0
        self._last_frame_time = time.time()

    # ------------------------------------------------------------------ #
    # RTSP Connection Management
    # ------------------------------------------------------------------ #
    def _open_stream(self) -> bool:
        """
        Open the RTSP stream. Returns True on success.
        
        Uses specific OpenCV settings optimized for RTSP:
        - CAP_FFMPEG backend for better RTSP support
        - Low buffer size to reduce latency
        """
        with self._cap_lock:
            if self._cap is not None:
                self._cap.release()
            
            self._dbg(f"[cooingdv-video] Opening RTSP stream: {self.rtsp_url}")
            
            # Create capture with FFMPEG backend for better RTSP handling
            self._cap = cv2.VideoCapture(self.rtsp_url, cv2.CAP_FFMPEG)
            
            # Configure for low latency
            self._cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
            
            if self._cap.isOpened():
                self._dbg("[cooingdv-video] Stream opened successfully")
                self._last_frame_time = time.time()
                return True
            else:
                self._dbg("[cooingdv-video] Failed to open stream")
                self._cap = None
                return False

    def _close_stream(self) -> None:
        """Close the RTSP stream."""
        with self._cap_lock:
            if self._cap is not None:
                self._cap.release()
                self._cap = None
                self._dbg("[cooingdv-video] Stream closed")

    def _reconnect(self) -> bool:
        """Attempt to reconnect to the stream."""
        self.reconnect_count += 1
        self._dbg(f"[cooingdv-video] Reconnection attempt #{self.reconnect_count}")
        
        self._close_stream()
        time.sleep(self.RECONNECT_DELAY)
        
        for attempt in range(self.MAX_RECONNECT_ATTEMPTS):
            if not self._running:
                return False
            
            if self._open_stream():
                return True
            
            self._dbg(f"[cooingdv-video] Reconnect attempt {attempt + 1}/{self.MAX_RECONNECT_ATTEMPTS} failed")
            time.sleep(self.RECONNECT_DELAY)
        
        return False

    # ------------------------------------------------------------------ #
    # BaseVideoProtocolAdapter interface
    # ------------------------------------------------------------------ #
    def send_start_command(self) -> None:
        """
        For RTSP, we don't need to send a start command.
        The stream starts when we connect.
        """
        pass

    def create_receiver_socket(self):
        """
        Not used for RTSP - OpenCV handles the connection.
        Returns None for compatibility.
        """
        return None

    def handle_payload(self, payload: bytes) -> Optional[VideoFrame]:
        """
        Wrap one encoded RTSP frame using the shared video-model interface.
        """
        return self.model.ingest_chunk(payload=payload)

    # ------------------------------------------------------------------ #
    # Receiver Thread API (matches existing interface)
    # ------------------------------------------------------------------ #
    def start(self) -> None:
        """Start the video receiver thread."""
        if self._rx_thread and self._rx_thread.is_alive():
            return
        
        self._running = True
        self._frame_q = queue.Queue(maxsize=2)
        
        # Open initial connection
        if not self._open_stream():
            self._dbg("[cooingdv-video] Warning: Could not open stream on start")
        
        # Start receiver thread
        self._rx_thread = threading.Thread(
            target=self._rx_loop,
            daemon=True,
            name="CooingdvVideoRx",
        )
        self._rx_thread.start()
        self._dbg("[cooingdv-video] Receiver thread started")

    def _rx_loop(self) -> None:
        """
        Main receiver loop - reads frames from RTSP stream.
        """
        while self._running:
            # Check if we have a valid capture
            with self._cap_lock:
                cap = self._cap
            
            if cap is None or not cap.isOpened():
                if not self._reconnect():
                    self._dbg("[cooingdv-video] Failed to reconnect, retrying...")
                    time.sleep(self.RECONNECT_DELAY)
                continue
            
            # Try to read a frame
            try:
                ret, frame = cap.read()
                
                if not ret or frame is None:
                    # Check for timeout
                    if time.time() - self._last_frame_time > self.FRAME_TIMEOUT:
                        self._dbg("[cooingdv-video] Frame timeout, reconnecting...")
                        self._reconnect()
                    else:
                        time.sleep(self.READ_FAILURE_BACKOFF)
                    continue
                
                self._last_frame_time = time.time()
                
                # Encode as JPEG (OpenCV handles BGR→YCbCr conversion internally)
                encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 85]
                _, jpeg_data = cv2.imencode('.jpg', frame, encode_param)
                
                video_frame = self.handle_payload(jpeg_data.tobytes())
                if video_frame is None:
                    continue

                self.frames_ok += 1
                
                # Add to queue (drop if full)
                try:
                    self._frame_q.put(video_frame, timeout=0.1)
                except queue.Full:
                    self.frames_dropped += 1
                    # Drop oldest frame and add new one
                    try:
                        self._frame_q.get_nowait()
                        self._frame_q.put(video_frame, timeout=0.1)
                    except (queue.Empty, queue.Full):
                        pass
                
            except cv2.error as e:
                self._dbg(f"[cooingdv-video] OpenCV error: {e}")
                self._reconnect()
            except Exception as e:
                self._dbg(f"[cooingdv-video] Unexpected error: {e}")
                time.sleep(0.1)

    def stop(self) -> None:
        """Stop the video receiver and clean up."""
        self._dbg("[cooingdv-video] Stopping...")
        self._running = False
        
        # Wait for thread to finish
        if self._rx_thread and self._rx_thread.is_alive():
            self._rx_thread.join(timeout=2.0)
        
        # Close stream
        self._close_stream()
        
        self._dbg(
            f"[cooingdv-video] Stopped. Stats: "
            f"frames_ok={self.frames_ok}, "
            f"frames_dropped={self.frames_dropped}, "
            f"reconnects={self.reconnect_count}"
        )

    def is_running(self) -> bool:
        """Check if the receiver is running."""
        return self._running and self._rx_thread is not None and self._rx_thread.is_alive()

    def get_frame(self, timeout: float = 1.0) -> Optional[VideoFrame]:
        """Get the next available frame from the queue."""
        try:
            return self._frame_q.get(timeout=timeout)
        except queue.Empty:
            return None

    def get_packets(self) -> List[bytes]:
        """
        Get raw packets - not really applicable for RTSP.
        Returns empty list for compatibility.
        """
        return []

    # ------------------------------------------------------------------ #
    # Keep-alive (not needed for RTSP, but required by base class)
    # ------------------------------------------------------------------ #
    def start_keepalive(self, interval: float = 1.0) -> None:
        """RTSP doesn't need keep-alive packets."""
        pass

    def stop_keepalive(self) -> None:
        """RTSP doesn't need keep-alive packets."""
        pass



================================================
FILE: backend/protocols/debug_rc_protocol_adapter.py
================================================
import logging

log = logging.getLogger(__name__)

class DebugRcProtocolAdapter:
    """Dummy RC protocol adapter for debugging."""

    def __init__(self):
        log.info("Debug RC protocol adapter initialized.")

    def send_control_data(self, data: bytes):
        log.debug(f"Debug: send_control_data({data.hex() if isinstance(data, bytes) else data})") 

================================================
FILE: backend/protocols/debug_video_protocol.py
================================================
import cv2
import logging
import time
import threading
import queue
from typing import Optional, List

from models.video_frame import VideoFrame
from protocols.base_video_protocol import BaseVideoProtocolAdapter
from utils.dropping_queue import DroppingQueue

log = logging.getLogger(__name__)


class DebugVideoProtocolAdapter(BaseVideoProtocolAdapter):
    """
    Drop-in video protocol adapter that fetches frames from the local
    webcam instead of a network socket.
    """
    def __init__(self, camera_index: int = 0, debug: bool = False, max_queue_size=100):
        super().__init__(drone_ip="localhost", control_port=0, video_port=0)

        self.camera_index = camera_index
        self.debug = debug
        self._cap = None
        self._frame_id = 0
        self._running = threading.Event()
        self._thread: Optional[threading.Thread] = None
        self.frame_queue = DroppingQueue(maxsize=max_queue_size)

    def start(self):
        self._cap = cv2.VideoCapture(self.camera_index)
        if not self._cap.isOpened():
            raise RuntimeError(f"Cannot open local camera #{self.camera_index}")

        self._running.set()
        self._thread = threading.Thread(target=self._capture_loop, daemon=True)
        self._thread.start()
        log.info(f"[debug-video] webcam #{self.camera_index} opened")

    def stop(self):
        if not self._running.is_set():
            return
        
        self._running.clear()
        if self._thread:
            self._thread.join(timeout=1.0)
        
        if self._cap and self._cap.isOpened():
            self._cap.release()
        
        log.info("[debug-video] webcam released")

    def is_running(self) -> bool:
        return self._running.is_set()

    def get_frame(self, timeout: float = 1.0) -> Optional[VideoFrame]:
        try:
            return self.frame_queue.get(timeout=timeout)
        except queue.Empty:
            return None
    
    def get_packets(self) -> List[bytes]:
        # Not applicable for webcam, but required by the service
        return []

    def _capture_loop(self):
        while self._running.is_set():
            if not self._cap or not self._cap.isOpened():
                log.error("[debug-video] camera is not open.")
                self._running.clear()
                break

            ret, frame_bgr = self._cap.read()
            if not ret:
                log.warning("[debug-video] failed to grab frame")
                time.sleep(0.1)
                continue

            ok, jpg = cv2.imencode(".jpg", frame_bgr, [int(cv2.IMWRITE_JPEG_QUALITY), 85])
            if not ok:
                log.warning("[debug-video] JPEG encode failed")
                continue

            self._frame_id = (self._frame_id + 1) & 0xFFFF
            video_frame = VideoFrame(
                frame_id=self._frame_id,
                data=jpg.tobytes(),
                format_type="jpeg",
            )
            
            try:
                self.frame_queue.put(video_frame)
            except queue.Full:
                # This is expected if the queue is full
                pass

        log.info("[debug-video] capture loop stopped.")

    # --- Stubs for methods that are not used by this adapter ---
    def create_receiver_socket(self):
        return None

    def send_start_command(self):
        pass

    def handle_payload(self, payload: bytes) -> Optional[VideoFrame]:
        return None


================================================
FILE: backend/protocols/no_video_protocol.py
================================================
import logging
import queue
import threading
import time
from typing import Optional

from models.video_frame import VideoFrame
from protocols.base_video_protocol import BaseVideoProtocolAdapter

logger = logging.getLogger(__name__)


class NoVideoProtocolAdapter(BaseVideoProtocolAdapter):
    """
    Placeholder video adapter for RC backends whose video transport is not yet
    implemented.

    It keeps the web service alive without pretending to decode a stream. The
    logs tell users that the selected drone type currently has RC-only support.
    """

    def __init__(
        self,
        drone_ip: str = "0.0.0.0",
        control_port: int = 0,
        video_port: int = 0,
        *,
        reason: str = "video transport not implemented",
    ) -> None:
        super().__init__(drone_ip, control_port, video_port)
        self.reason = reason
        self._running = threading.Event()

    def start(self) -> None:
        logger.warning("[no-video] %s", self.reason)
        self._running.set()

    def stop(self) -> None:
        self._running.clear()

    def is_running(self) -> bool:
        return self._running.is_set()

    def get_frame(self, timeout: float = 1.0) -> Optional[VideoFrame]:
        if self._running.is_set():
            time.sleep(timeout)
        raise queue.Empty

    def get_packets(self) -> list[bytes]:
        return []

    def send_start_command(self) -> None:
        pass

    def create_receiver_socket(self):
        return None

    def handle_payload(self, payload: bytes) -> Optional[VideoFrame]:
        return None


================================================
FILE: backend/protocols/s2x_rc_protocol_adapter.py
================================================
from protocols.base_protocol_adapter import BaseProtocolAdapter
import socket

class S2xRCProtocolAdapter(BaseProtocolAdapter):
    """Protocol adapter for S2x drones (S20, S29)"""
    
    def __init__(self, drone_ip, control_port=8080):
        self.drone_ip = drone_ip
        self.control_port = control_port
        self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        self.debug_packets = False
        self.packet_counter = 0
        self.swap_yaw_roll = False
        # Stock Macrochip HY packets scale roll/pitch for app speed tiers.
        # Keep full-scale as the default so existing S2X behavior is unchanged.
        self.speed_scale_by_index = {
            0: 0.7,  # PL FPV uses 0.7; older HiTurbo builds use 0.6.
            1: 0.8,
            2: 1.0,
        }
        
    def build_control_packet(self, drone_model):
        """Build a control packet for the S2x protocol"""
        pkt = bytearray(20)
        pkt[0] = 0x66
        pkt[1] = drone_model.speed & 0xFF

        roll = drone_model.roll
        yaw = drone_model.yaw
        if self.swap_yaw_roll:
            roll, yaw = yaw, roll

        speed_scale = self.speed_scale_by_index.get(getattr(drone_model, "speed_index", 2), 1.0)

        # Remap from constrained range to full 0-255 range.
        # Macrochip HY control only speed-scales roll/pitch, not throttle/yaw.
        pkt[2] = int(self._remap_to_full_range(self._scale_axis(roll, drone_model, speed_scale), drone_model)) & 0xFF
        pkt[3] = int(self._remap_to_full_range(self._scale_axis(drone_model.pitch, drone_model, speed_scale), drone_model)) & 0xFF
        pkt[4] = int(self._remap_to_full_range(drone_model.throttle, drone_model)) & 0xFF
        pkt[5] = int(self._remap_to_full_range(yaw, drone_model)) & 0xFF

        # Byte 6 for command flags
        pkt[6] = 0x00
        
        # The stock HiTurbo app uses the same one-shot bit for both takeoff and
        # land, while emergency stop is a separate bit.
        if drone_model.takeoff_flag:
            pkt[6] |= 0x01
        if drone_model.land_flag:
            pkt[6] |= 0x01
        if drone_model.stop_flag:
            pkt[6] |= 0x02
        if drone_model.calibration_flag:
            pkt[6] |= 0x04

        # Byte 7 - historical TurboDrone base mode bits plus optional headless
        # flag. Stock apps commonly send 0x02, but 0x0a is proven on hardware.
        pkt[7] = 0x0a
        if drone_model.headless_flag:
            pkt[7] |= 0x01
        
        # bytes 8-17 are zero-filled

        # Calculate checksum (bytes 2-17)
        chk = 0
        for i in range(2, 18):
            chk ^= pkt[i]
        pkt[18] = chk & 0xFF
        pkt[19] = 0x99

        # Clear one-shot flags after building packet
        drone_model.takeoff_flag = False
        drone_model.land_flag = False
        drone_model.stop_flag = False
        drone_model.calibration_flag = False

        return bytes(pkt)
        
    def send_control_packet(self, packet):
        """Send the control packet to the drone"""
        self.sock.sendto(packet, (self.drone_ip, self.control_port))
        
        # Log packet details if debug is enabled
        if self.debug_packets:
            self.packet_counter += 1
            
            # Print full packet hex dump
            hex_dump = ' '.join(f'{b:02x}' for b in packet)
            print(f"Packet #{self.packet_counter}: {hex_dump}")
            
            # Print decoded controls
            print(f"  Controls: R:{packet[2]} P:{packet[3]} T:{packet[4]} Y:{packet[5]}")
            
            # Print flags
            flags6 = packet[6]
            flags7 = packet[7]
            flags_desc = []
            if flags6 & 0x01: flags_desc.append("FLY_OR_LAND")
            if flags6 & 0x02: flags_desc.append("STOP")
            if flags6 & 0x04: flags_desc.append("CALIBRATE")
            if flags7 & 0x01: flags_desc.append("HEADLESS")
            
            print(f"  Flags: {flags_desc}")
            print(f"  Checksum: 0x{packet[18]:02x}")
            print()
    
    def toggle_debug(self):
        """Toggle debug packet logging"""
        self.debug_packets = not self.debug_packets
        return self.debug_packets
        
    def _remap_to_full_range(self, value, model):
        """Remap value from constrained range to full 0-255 range for sending to drone"""
        if value >= model.center_value:
            # Map center...max_control to 128...255
            return 128.0 + (value - model.center_value) * (255.0 - 128.0) / (model.max_control_value - model.center_value)
        else:
            # Map min_control...center to 0...128
            return (value - model.min_control_value) * 128.0 / (model.center_value - model.min_control_value)

    def _scale_axis(self, value, model, scale):
        """Scale a raw axis around center using the stock app speed tier."""
        if scale >= 1.0:
            return value
        return model.center_value + ((value - model.center_value) * scale)

================================================
FILE: backend/protocols/s2x_video_protocol.py
================================================
import ipaddress
import socket
import threading
import queue
from typing import Optional, List

from models.s2x_video_model import S2xVideoModel
from models.video_frame import VideoFrame
from protocols.base_video_protocol import BaseVideoProtocolAdapter


class S2xVideoProtocolAdapter(BaseVideoProtocolAdapter):
    """Transport + header parser for S2x JPEG stream"""

    SYNC_BYTES = b"\x40\x40"
    EOS_MARKER = b"\x23\x23"
    HEADER_LEN = 8        # S2x packets always use an 8-byte header
    LINK_DEAD_TIMEOUT = 8.0   # camera can stay silent for ~5 s on boot

    def __init__(
        self,
        drone_ip: str = "172.16.10.1",
        control_port: int = 8080,
        video_port: int = 8888,
        debug: bool = False,
    ):
        super().__init__(drone_ip, control_port, video_port)
        self.model = S2xVideoModel()
        self.local_ip = self._discover_local_ip()
        self._sock_lock = threading.Lock()
        self._sock = self.create_receiver_socket()
        self._keepalive_thread: Optional[threading.Thread] = None
        self._keepalive_stop: Optional[threading.Event] = None
        if debug:
            print(f"[s2x] Video socket on *:{self._sock.getsockname()[1]}")
        self._running = threading.Event()
        self._rx_thread: Optional[threading.Thread] = None
        self._frame_q: "queue.Queue[VideoFrame]" = queue.Queue(maxsize=2)
        self._pkt_lock = threading.Lock()
        self._pkt_buffer: List[bytes] = []

    # ────────── BaseVideoProtocolAdapter ────────── #
    def send_start_command(self) -> None:
        payload = b"\x08" + ipaddress.IPv4Address(self.local_ip).packed
        with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
            sock.sendto(payload, (self.drone_ip, self.control_port))
        print(f"[video] Start command sent ({payload.hex(' ')})")

    def start_keepalive(self, interval: float = 2.0) -> None:
        """Starts a thread to periodically send the start command."""
        if self._keepalive_thread is None:
            self._keepalive_stop = threading.Event()
            self._keepalive_thread = threading.Thread(
                target=self._keepalive_loop,
                args=(interval, self._keepalive_stop),
                daemon=True,
                name="S2xVideoKeepAlive",
            )
            self._keepalive_thread.start()

    def stop_keepalive(self) -> None:
        """Stops the keepalive thread."""
        if self._keepalive_stop:
            self._keepalive_stop.set()
        if self._keepalive_thread:
            self._keepalive_thread.join(timeout=1.0)
            self._keepalive_thread = None

    def create_receiver_socket(self) -> socket.socket:
        """UDP socket bound to the drone's video port."""
        sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        sock.bind(("0.0.0.0", self.video_port))
        sock.settimeout(1.0)
        return sock

    def get_receiver_socket(self) -> socket.socket:
        """Returns the main data socket, required by the new VideoReceiverService."""
        with self._sock_lock:
            return self._sock

    def recv_from_socket(self, sock: socket.socket) -> Optional[bytes]:
        """Receives from the socket and handles timeouts."""
        try:
            return sock.recv(4096)  # Use a reasonable buffer size
        except socket.timeout:
            return None

    def handle_payload(self, payload: bytes) -> Optional[VideoFrame]:
        """
        1. Validate & strip the native 8-byte S2x header
        2. Forward the slice payload to the model

        PL FPV's native `libvison_main.so` parser treats the header as:
        0-1 sync `0x40 0x40`, 2-3 little-endian frame id, 4 total chunks,
        5 chunk id, 6-7 little-endian datagram length.
        """
        if len(payload) <= self.HEADER_LEN or payload[:2] != self.SYNC_BYTES:
            return None

        frame_id = int.from_bytes(payload[2:4], "little")
        total_chunks = payload[4]
        slice_id_raw = payload[5]
        declared_len = int.from_bytes(payload[6:8], "little")

        if total_chunks == 0 or total_chunks > 100:
            return None
        if slice_id_raw >= total_chunks:
            return None
        if declared_len != len(payload):
            return None

        # Native 872 parsing copies declared_len - 10 bytes from offset 8,
        # effectively dropping the 2-byte datagram trailer.
        body_end = declared_len
        if declared_len >= self.HEADER_LEN + len(self.EOS_MARKER) and payload[
            declared_len - len(self.EOS_MARKER) : declared_len
        ] == self.EOS_MARKER:
            body_end -= len(self.EOS_MARKER)

        body = payload[self.HEADER_LEN:body_end]

        return self.model.ingest_chunk(
            stream_id=frame_id,
            chunk_id=slice_id_raw,
            total_chunks=total_chunks,
            payload=body,
        )

    def stop(self) -> None:
        """Shuts down the adapter, required by the new VideoReceiverService."""
        print("[s2x] Stopping protocol adapter.")
        self.stop_keepalive()
        self._running.clear()
        if self._rx_thread and self._rx_thread.is_alive():
            self._rx_thread.join(timeout=1.0)
        try:
            self._sock.close()
        except Exception:
            pass

    # ────────── helpers ────────── #
    def _keepalive_loop(self, interval: float, stop_event: threading.Event) -> None:
        while not stop_event.is_set():
            self.send_start_command()
            stop_event.wait(interval)

    # ────────── Receiver thread API (service expects) ────────── #
    def start(self) -> None:
        if self._rx_thread and self._rx_thread.is_alive():
            return
        self._running.set()
        self.start_keepalive(2.0)

        def _rx_loop() -> None:
            sock = self.get_receiver_socket()
            while self._running.is_set():
                try:
                    payload = self.recv_from_socket(sock)
                    if not payload:
                        continue
                    with self._pkt_lock:
                        self._pkt_buffer.append(payload)
                    frame = self.handle_payload(payload)
                    if frame is not None:
                        try:
                            self._frame_q.put(frame, timeout=0.2)
                        except queue.Full:
                            pass
                except OSError:
                    break
                except Exception:
                    continue

        self._rx_thread = threading.Thread(target=_rx_loop, daemon=True, name="S2xVideoRx")
        self._rx_thread.start()

    def is_running(self) -> bool:
        return self._running.is_set() and self._rx_thread is not None and self._rx_thread.is_alive()

    def get_frame(self, timeout: float = 1.0) -> Optional[VideoFrame]:
        try:
            return self._frame_q.get(timeout=timeout)
        except queue.Empty:
            return None

    def get_packets(self) -> List[bytes]:
        with self._pkt_lock:
            packets = self._pkt_buffer
            self._pkt_buffer = []
            return packets

    def _discover_local_ip(self) -> str:
        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        try:
            s.connect((self.drone_ip, 1))
            return s.getsockname()[0]
        finally:
            s.close()


================================================
FILE: backend/protocols/wifi_cam_rc_protocol_adapter.py
================================================
"""RC protocol adapter for WiFi_CAM native UDP drones."""

from __future__ import annotations

import logging
import socket
from typing import Final, Literal

from models.wifi_cam_rc import WifiCamRcModel
from protocols.base_protocol_adapter import BaseProtocolAdapter

logger = logging.getLogger(__name__)

CommandMode = Literal["auto", "short", "extended"]


class WifiCamRcProtocolAdapter(BaseProtocolAdapter):
    """Send raw WiFi_CAM stick packets to the native command socket."""

    DEFAULT_DRONE_IP: Final = "192.168.4.153"
    DEFAULT_COMMAND_PORT: Final = 8090

    START_MARKER: Final = 0x66
    END_MARKER: Final = 0x99
    EXTENDED_LENGTH: Final = 0x14

    FLAG_TAKEOFF: Final = 0x01
    FLAG_LAND: Final = 0x02
    FLAG_EMERGENCY: Final = 0x04
    FLAG_ROTATE: Final = 0x08
    FLAG_HEADLESS: Final = 0x10
    FLAG_CALIBRATE: Final = 0x80

    EXT_FLAG_TAKEOFF_OR_LAND: Final = 0x01
    EXT_FLAG_EMERGENCY: Final = 0x02
    EXT_FLAG_CALIBRATE: Final = 0x04
    EXT_FLAG_ROTATE: Final = 0x08
    EXT_FLAG_HEADLESS: Final = 0x01
    EXT_FLAG_ALTITUDE_HOLD: Final = 0x02

    def __init__(
        self,
        drone_ip: str = DEFAULT_DRONE_IP,
        command_port: int = DEFAULT_COMMAND_PORT,
        *,
        command_mode: CommandMode = "auto",
    ) -> None:
        self.drone_ip = drone_ip
        self.command_port = command_port
        self.command_mode = self._normalize_mode(command_mode)
        self.camera_type = 0
        self.debug_packets = False
        self.packet_counter = 0
        self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        self.sock.bind(("", 0))

    def build_control_packet(self, drone_model: WifiCamRcModel) -> bytes:
        if self._uses_extended_mode():
            packet = self._build_extended_packet(drone_model)
        else:
            packet = self._build_short_packet(drone_model)
        self._clear_one_shot_flags(drone_model)
        return packet

    def send_control_packet(self, packet: bytes) -> None:
        try:
            self.sock.sendto(packet, (self.drone_ip, self.command_port))
        except OSError:
            return

        if self.debug_packets:
            self.packet_counter += 1
            logger.debug("[wifi-cam-rc] #%05d %s", self.packet_counter, packet.hex(" "))

    def set_camera_type(self, camera_type: int) -> None:
        self.camera_type = int(camera_type)
        if self.command_mode == "auto":
            logger.info(
                "[wifi-cam-rc] camera type %s selected %s command mode",
                camera_type,
                "extended" if self._uses_extended_mode() else "short",
            )

    def stop(self) -> None:
        try:
            self.sock.close()
        except OSError:
            pass

    def toggle_debug(self) -> bool:
        self.debug_packets = not self.debug_packets
        logger.info("[wifi-cam-rc] debug %s", "ON" if self.debug_packets else "OFF")
        return self.debug_packets

    def _build_short_packet(self, model: WifiCamRcModel) -> bytes:
        packet = bytearray(8)
        packet[0] = self.START_MARKER
        packet[1] = self._right_data(self._axis(model.roll))
        packet[2] = self._right_data(self._axis(model.pitch))
        packet[3] = self._right_data(self._axis(model.throttle))
        packet[4] = self._right_data(self._axis(model.yaw))
        packet[5] = self._build_short_flags(model)
        packet[6] = self._right_data(self._xor(packet[1:6]))
        packet[7] = self.END_MARKER
        return bytes(packet)

    def _build_extended_packet(self, model: WifiCamRcModel) -> bytes:
        packet = bytearray(20)
        packet[0] = self.START_MARKER
        packet[1] = self.EXTENDED_LENGTH
        packet[2] = self._right_data(self._axis(model.roll))
        packet[3] = self._right_data(self._axis(model.pitch))
        packet[4] = self._right_data(self._axis(model.throttle))
        packet[5] = self._right_data(self._axis(model.yaw))
        flags1, flags2 = self._build_extended_flags(model)
        packet[6] = flags1
        packet[7] = flags2
        packet[18] = self._right_data(self._xor(packet[2:18]))
        packet[19] = self.END_MARKER
        return bytes(packet)

    def _build_short_flags(self, model: WifiCamRcModel) -> int:
        flags = 0
        if model.takeoff_flag:
            flags |= self.FLAG_TAKEOFF
        if model.land_flag:
            flags |= self.FLAG_LAND
        if model.stop_flag:
            flags |= self.FLAG_EMERGENCY
        if model.flip_flag:
            flags |= self.FLAG_ROTATE
        if model.headless_flag:
            flags |= self.FLAG_HEADLESS
        if model.calibration_flag:
            flags |= self.FLAG_CALIBRATE
        return flags & 0xFF

    def _build_extended_flags(self, model: WifiCamRcModel) -> tuple[int, int]:
        flags1 = 0
        flags2 = 0
        # The decompiled app maps both one-key fly and land to the same bit.
        if model.takeoff_flag or model.land_flag:
            flags1 |= self.EXT_FLAG_TAKEOFF_OR_LAND
        if model.stop_flag:
            flags1 |= self.EXT_FLAG_EMERGENCY
        if model.calibration_flag:
            flags1 |= self.EXT_FLAG_CALIBRATE
        if model.flip_flag:
            flags1 |= self.EXT_FLAG_ROTATE
        if model.headless_flag:
            flags2 |= self.EXT_FLAG_HEADLESS
        if model.altitude_hold_flag:
            flags2 |= self.EXT_FLAG_ALTITUDE_HOLD
        return flags1 & 0xFF, flags2 & 0xFF

    def _clear_one_shot_flags(self, model: WifiCamRcModel) -> None:
        model.takeoff_flag = False
        model.land_flag = False
        model.stop_flag = False
        model.flip_flag = False
        model.calibration_flag = False

    def _uses_extended_mode(self) -> bool:
        if self.command_mode == "extended":
            return True
        if self.command_mode == "short":
            return False
        return self.camera_type == 2

    def _normalize_mode(self, command_mode: str) -> CommandMode:
        mode = (command_mode or "auto").strip().lower()
        if mode not in {"auto", "short", "extended"}:
            logger.warning("[wifi-cam-rc] Unknown command mode %r; using auto.", command_mode)
            return "auto"
        return mode  # type: ignore[return-value]

    def _axis(self, value: float) -> int:
        return max(0, min(255, int(value))) & 0xFF

    def _right_data(self, value: int) -> int:
        value &= 0xFF
        if value in (self.START_MARKER, self.END_MARKER):
            value = (value + 1) & 0xFF
        return value

    def _xor(self, values: bytes | bytearray) -> int:
        checksum = 0
        for value in values:
            checksum ^= value
        return checksum & 0xFF


================================================
FILE: backend/protocols/wifi_cam_video_protocol.py
================================================
"""Video protocol adapter for WiFi_CAM native UDP drones."""

from __future__ import annotations

import logging
import queue
import socket
import threading
from typing import Final, Optional

from models.video_frame import VideoFrame
from protocols.base_video_protocol import BaseVideoProtocolAdapter

logger = logging.getLogger(__name__)


class WifiCamVideoProtocolAdapter(BaseVideoProtocolAdapter):
    """Start the WiFi_CAM MJPEG stream and reassemble native JPEG chunks."""

    DEFAULT_DRONE_IP: Final = "192.168.4.153"
    DEFAULT_COMMAND_PORT: Final = 8090
    DEFAULT_VIDEO_PORT: Final = 8080

    START_STREAM: Final = b"\x42\x76"
    STOP_STREAM: Final = b"\x42\x77"
    ROTATE: Final = b"\x42\x78"
    CAMERA_SWITCH: Final = b"\x42\x79"

    NATIVE_PACKET_SIZE: Final = 0x5C0
    HEADER_SIZE: Final = 8
    CHUNK_SIZE: Final = 0x5B8
    SOI: Final = b"\xff\xd8"
    EOI: Final = b"\xff\xd9"

    def __init__(
        self,
        drone_ip: str = DEFAULT_DRONE_IP,
        control_port: int = DEFAULT_COMMAND_PORT,
        video_port: int = DEFAULT_VIDEO_PORT,
        *,
        debug: bool = False,
    ) -> None:
        super().__init__(drone_ip, control_port, video_port)
        self.debug = debug
        self._running = threading.Event()
        self._rx_thread: Optional[threading.Thread] = None
        self._sock: Optional[socket.socket] = None
        self._frame_q: "queue.Queue[VideoFrame]" = queue.Queue(maxsize=2)
        self._pkt_lock = threading.Lock()
        self._pkt_buffer: list[bytes] = []
        self._rc_adapter = None

        self._current_frame_id: Optional[int] = None
        self._current_chunk_index = 0
        self._frame_buffer = bytearray()
        self._frame_counter = 0
        self.camera_type = 0

    def set_rc_adapter(self, rc_adapter) -> None:
        self._rc_adapter = rc_adapter
        if self.camera_type and hasattr(rc_adapter, "set_camera_type"):
            rc_adapter.set_camera_type(self.camera_type)

    def start(self) -> None:
        if self.is_running():
            return
        self._sock = self.create_receiver_socket()
        self._running.set()
        self.send_start_command()
        self._rx_thread = threading.Thread(
            target=self._rx_loop,
            daemon=True,
            name="WifiCamVideoRx",
        )
        self._rx_thread.start()
        logger.info("[wifi-cam-video] listening on local UDP %s", self._sock.getsockname()[1])

    def stop(self) -> None:
        self._running.clear()
        self._send_command(self.STOP_STREAM)
        sock = self._sock
        if sock:
            try:
                sock.close()
            except OSError:
                pass
        if self._rx_thread:
            self._rx_thread.join(timeout=1.0)
        self._rx_thread = None
        self._sock = None

    def is_running(self) -> bool:
        return self._running.is_set() and self._rx_thread is not None and self._rx_thread.is_alive()

    def get_frame(self, timeout: float = 1.0) -> Optional[VideoFrame]:
        try:
            return self._frame_q.get(timeout=timeout)
        except queue.Empty:
            return None

    def get_packets(self) -> list[bytes]:
        with self._pkt_lock:
            packets = self._pkt_buffer
            self._pkt_buffer = []
            return packets

    def send_start_command(self) -> None:
        self._send_command(self.START_STREAM)

    def create_receiver_socket(self) -> socket.socket:
        sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        # The Android app does not bind to 8080; it receives replies on the
        # same ephemeral local UDP port used to send 42 76 to the drone.
        sock.bind(("", 0))
        sock.settimeout(0.5)
        return sock

    def handle_payload(self, payload: bytes) -> Optional[VideoFrame]:
        camera_type = self._parse_camera_type(payload)
        if camera_type is not None:
            self._set_camera_type(camera_type)
            return None

        if len(payload) < self.HEADER_SIZE:
            return None

        frame_id = payload[0]
        final_marker = payload[1]
        total_chunks = payload[2]
        resolution = payload[3]
        retain = payload[7]
        chunk = payload[self.HEADER_SIZE :]

        if self._current_frame_id != frame_id:
            self._current_frame_id = frame_id
            self._current_chunk_index = 0
            self._frame_buffer = bytearray()

        chunk_index = self._current_chunk_index
        self._frame_buffer.extend(chunk)

        frame = None
        is_final = final_marker == 1 and total_chunks > 0 and chunk_index + 1 == total_chunks
        if is_final:
            frame = self._finish_frame(chunk, chunk_index, resolution, retain)
            self._current_chunk_index = 0
            self._current_frame_id = None
            self._frame_buffer = bytearray()
        else:
            self._current_chunk_index += 1

        return frame

    def switch_camera(self) -> None:
        self._send_command(self.CAMERA_SWITCH)

    def rotate(self) -> None:
        self._send_command(self.ROTATE)

    def _rx_loop(self) -> None:
        sock = self._sock
        if sock is None:
            return
        while self._running.is_set():
            try:
                packet, _ = sock.recvfrom(self.NATIVE_PACKET_SIZE)
            except socket.timeout:
                continue
            except OSError:
                break

            with self._pkt_lock:
                self._pkt_buffer.append(packet)

            frame = self.handle_payload(packet)
            if frame is None:
                continue
            try:
                self._frame_q.put(frame, timeout=0.1)
            except queue.Full:
                try:
                    self._frame_q.get_nowait()
                    self._frame_q.put(frame, timeout=0.1)
                except (queue.Empty, queue.Full):
                    pass

    def _send_command(self, command: bytes) -> None:
        sock = self._sock
        if sock is None:
            return
        try:
            sock.sendto(command, (self.drone_ip, self.video_port))
        except OSError:
            pass

    def _parse_camera_type(self, payload: bytes) -> Optional[int]:
        if len(payload) != 8 or payload[0] != 0x55 or payload[7] != 0x99:
            return None
        if payload == b"\x55\x00\x01\x00\x00\x00\x01\x99":
            return 1
        if payload == b"\x55\x00\x02\x00\x00\x00\x02\x99":
            return 2
        return 0

    def _set_camera_type(self, camera_type: int) -> None:
        if camera_type == self.camera_type:
            return
        self.camera_type = camera_type
        logger.info("[wifi-cam-video] detected camera type %s", camera_type)
        if self._rc_adapter is not None and hasattr(self._rc_adapter, "set_camera_type"):
            self._rc_adapter.set_camera_type(camera_type)

    def _finish_frame(
        self,
        final_chunk: bytes,
        final_chunk_index: int,
        resolution: int,
        retain: int,
    ) -> Optional[VideoFrame]:
        eoi = final_chunk.find(self.EOI)
        if eoi < 0:
            return None

        frame_len = final_chunk_index * self.CHUNK_SIZE + eoi + len(self.EOI)
        data = bytes(self._frame_buffer[:frame_len])
        if not data.startswith(self.SOI) or not data.endswith(self.EOI):
            if self.debug:
                logger.debug("[wifi-cam-video] rejected malformed JPEG frame")
            return None

        self._frame_counter = (self._frame_counter + 1) & 0xFFFF
        frame = VideoFrame(self._frame_counter, data, format_type="jpeg")
        frame.resolution = resolution  # type: ignore[attr-defined]
        frame.retain = retain  # type: ignore[attr-defined]
        return frame


================================================
FILE: backend/protocols/wifi_uav_rc_protocol_adapter.py
================================================
import socket
from typing import Final, List, Optional

from protocols.base_protocol_adapter import BaseProtocolAdapter
from models.wifi_uav_rc import WifiUavRcModel
from utils.wifi_uav_variants import get_wifi_uav_capabilities


class WifiUavRcProtocolAdapter(BaseProtocolAdapter):
    """
    Builds and transmits control packets for the WiFi-UAV family.
    Packet layout derived from reverse-engineered Android app traces.

    Turbodrone uses the extended WiFi-UAV command layout (`66 14 ...`). In
    that layout the app maps takeoff and land onto the same one-key action bit,
    while emergency stop is a separate bit.
    """

    DEFAULT_DRONE_IP: Final = "192.168.169.1"
    DEFAULT_PORT:     Final = 8800

    # ──────────────────────────────────────────────────────────
    # Static parts (taken 1:1 from packet dumps)
    # ──────────────────────────────────────────────────────────
    _HEADER         = bytes([0xef, 0x02, 0x7c, 0x00, 0x02, 0x02,
                             0x00, 0x01, 0x02, 0x00, 0x00, 0x00])

    _COUNTER1_SUFFIX = bytes([0x00, 0x00, 0x14, 0x00, 0x66, 0x14])
    _CONTROL_SUFFIX  = bytes(10)                            # 10 × 0x00

    _CHECKSUM_SUFFIX = bytes([0x99]) + bytes(44) + bytes([0x32, 0x4b, 0x14, 0x2d, 0x00, 0x00])

    _COUNTER2_SUFFIX = bytes([
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00,
        0x00, 0x00, 0x14, 0x00, 0x00, 0x00,
        0xff, 0xff, 0xff, 0xff
    ])

    _COUNTER3_SUFFIX = bytes([
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
        0x03, 0x00, 0x00, 0x00, 0x10, 0x00,
        0x00, 0x00
    ])

    FLAG_TAKEOFF_OR_LAND = 0x01
    FLAG_STOP = 0x02
    FLAG_CALIBRATION = 0x04
    FLAG_FLIP = 0x08

    _SPEED_SCALES: Final[tuple[float, ...]] = (0.30, 0.60, 1.00, 0.25)

    # ------------------------------------------------------------------ #
    def __init__(self,
                 drone_ip: str = DEFAULT_DRONE_IP,
                 control_port: int = DEFAULT_PORT,
                 shared_sock: Optional[socket.socket] = None,
                 variant: str = "auto") -> None:
        self.drone_ip = drone_ip
        self.control_port = control_port
        self.variant = (variant or "auto").strip().lower()
        self.capabilities = get_wifi_uav_capabilities(self.variant)
        self._target_ports = self._resolve_target_ports(control_port)

        self.sock = shared_sock or socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        self._is_shared_sock = shared_sock is not None
        self.debug_packets = False
        self._pkt_counter = 0

        # rolling 16-bit counters found in the original protocol
        self._ctr1 = 0x0000
        self._ctr2 = 0x0001
        self._ctr3 = 0x0002

    def set_socket(self, sock: socket.socket) -> None:
        """Use an externally managed socket instead of the internal one."""
        # Don't close the old socket if it was created here
        if self.sock and not self._is_shared_sock:
            self.sock.close()
        
        self.sock = sock
        self._is_shared_sock = True

    def stop(self) -> None:
        """Close the socket if it's not shared."""
        if self.sock and not self._is_shared_sock:
            try:
                self.sock.close()
            except Exception:
                pass # Ignore errors on shutdown

    # ------------------------------------------------------------------ #
    # BaseProtocolAdapter
    # ------------------------------------------------------------------ #
    def build_control_packet(self, drone_model: WifiUavRcModel) -> bytes:  # type: ignore[override]
        # ----- counters -------------------------------------------------
        c1 = self._ctr1.to_bytes(2, "little")
        c2 = self._ctr2.to_bytes(2, "little")
        c3 = self._ctr3.to_bytes(2, "little")

        # advance for next call
        self._ctr1 = (self._ctr1 + 1) & 0xFFFF
        self._ctr2 = (self._ctr2 + 1) & 0xFFFF
        self._ctr3 = (self._ctr3 + 1) & 0xFFFF

        # ----- command / headless --------------------------------------
        command = 0x00
        if drone_model.takeoff_flag or drone_model.land_flag:
            command |= self.FLAG_TAKEOFF_OR_LAND
        if drone_model.land_flag:
            self._last_command_intent = "LAND"
        elif drone_model.takeoff_flag:
            self._last_command_intent = "TAKEOFF"
        elif drone_model.stop_flag:
            self._last_command_intent = "STOP"
        elif drone_model.calibration_flag:
            self._last_command_intent = "CALIBRATE"
        else:
            self._last_command_intent = None
        if drone_model.stop_flag:
            command |= self.FLAG_STOP
        if drone_model.calibration_flag:
            command |= self.FLAG_CALIBRATION
        if getattr(drone_model, "flip_flag", False):
            command |= self.FLAG_FLIP
        camera_tilt_state = max(0, min(2, int(getattr(drone_model, "camera_tilt_state", 0))))
        command |= (camera_tilt_state & 0x03) << 6

        headless = 0x03 if drone_model.headless_flag else 0x02

        # ----- controls -------------------------------------------------
        speed_index = max(0, min(3, int(getattr(drone_model, "speed_index", 2))))
        controls: List[int] = [
            self._apply_speed_scale(drone_model.roll, speed_index),
            self._apply_speed_scale(drone_model.pitch, speed_index),
            self._apply_speed_scale(drone_model.throttle, speed_index),
            self._apply_speed_scale(drone_model.yaw, speed_index),
            command & 0xFF,
            headless & 0xFF,
        ]

        # Stash for debug printing at send time (roll, pitch, throttle, yaw)
        try:
            self._last_controls = tuple(controls[:4])
            self._last_command = command & 0xFF
            self._last_headless = headless & 0xFF
        except Exception:
            pass

        checksum = 0
        for b in controls:
            checksum ^= b

        # ----- assemble -------------------------------------------------
        pkt = bytearray()
        pkt += self._HEADER
        pkt += c1 + self._COUNTER1_SUFFIX
        pkt += bytes(controls)
        pkt += self._CONTROL_SUFFIX
        pkt.append(checksum)
        pkt += self._CHECKSUM_SUFFIX
        pkt += c2 + self._COUNTER2_SUFFIX
        pkt += c3 + self._COUNTER3_SUFFIX

        # one-shot flags → clear
        drone_model.takeoff_flag = False
        drone_model.land_flag = False
        drone_model.stop_flag = False
        drone_model.calibration_flag = False
        drone_model.flip_flag = False
        drone_model.camera_tilt_state = 0

        return bytes(pkt)

    def send_control_packet(self, packet: bytes):  # type: ignore[override]
        """
        Transmit one RC packet.
        If the video layer has just torn the shared socket down, the send
        will raise OSError(EBADF).  Swallow it and wait until the receiver
        hands us the fresh socket.
        """
        try:
            for port in self._target_ports:
                self.sock.sendto(packet, (self.drone_ip, port))
        except OSError:
            # Socket was closed during video-reconnect window.
            # Wait for VideoReceiverService to call set_socket(…) with the
            # new descriptor.  Until then we just skip transmitting.
            return

        if self.debug_packets:
            self._pkt_counter += 1
            print(f"[wifi-uav] #{self._pkt_counter:05d}   "
                  f"{' '.join(f'{b:02x}' for b in packet[:40])} …")
            try:
                r, p, t, y = getattr(self, "_last_controls", (None, None, None, None))
                if None not in (r, p, t, y):
                    print(f"[wifi-uav] controls R:{r} P:{p} T:{t} Y:{y}")
                command = getattr(self, "_last_command", None)
                if command is not None:
                    flags = []
                    if command & self.FLAG_TAKEOFF_OR_LAND:
                        flags.append(getattr(self, "_last_command_intent", None) or "TAKEOFF_OR_LAND")
                    if command & self.FLAG_STOP:
                        flags.append("STOP")
                    if command & self.FLAG_CALIBRATION:
                        flags.append("CALIBRATE")
                    if command & self.FLAG_FLIP:
                        flags.append("FLIP")
                    tilt = (command >> 6) & 0x03
                    if tilt:
                        flags.append(f"TILT:{tilt}")
                    if flags:
                        print(f"[wifi-uav] command flags: {', '.join(flags)}")
            except Exception:
                pass

    def toggle_debug(self) -> bool:                # type: ignore[override]
        self.debug_packets = not self.debug_packets
        state = "ON" if self.debug_packets else "OFF"
        print(f"[wifi-uav] debug {state}")
        return self.debug_packets

    def _resolve_target_ports(self, control_port: int) -> tuple[int, ...]:
        if self.capabilities.transport == "uav_dual_port":
            return (control_port, control_port + 1)
        return (control_port,)

    def _apply_speed_scale(self, value: float, speed_index: int) -> int:
        """Scale a raw model axis around its center using the app's speed table."""
        scale = self._SPEED_SCALES[speed_index]
        centered = float(value) - 128.0
        return max(0, min(255, round(128.0 + centered * scale)))


================================================
FILE: backend/protocols/wifi_uav_video_protocol.py
================================================
import logging
import socket
import queue
import threading
import time
from typing import Optional

from models.video_frame import VideoFrame
from protocols.base_video_protocol import BaseVideoProtocolAdapter
from utils.wifi_uav_ack_state import WifiUavAckState
from utils.wifi_uav_packets import (
    START_STREAM,
    build_native_ack_packet,
)
from utils.wifi_uav_jpeg import generate_jpeg_headers, EOI
from utils.wifi_uav_variants import get_wifi_uav_capabilities

logger = logging.getLogger(__name__)


class WifiUavVideoProtocolAdapter(BaseVideoProtocolAdapter):
    """
    Protocol adapter for the inexpensive "WiFi UAV" drones.

    Differences to the S2x family:
      • A single duplex UDP socket is used for tx/rx.
      • The drone stops streaming unless it receives two custom
        *frame-request* packets (REQUEST_A / REQUEST_B) for every JPEG.
      • Each UDP datagram has a 56-byte proprietary header that must be
        stripped; the JPEG SOI/APPx headers are completely absent and are
        generated on the client.
    """

    DEFAULT_DRONE_IP = "192.168.169.1"

    REQUEST_A_OFFSETS = (12, 13)          # two-byte LE frame counter
    REQUEST_B_OFFSETS = (12, 13, 88, 89, 107, 108)

    FRAME_TIMEOUT = 0.08          # 80 ms without a full frame → retry sooner
    MAX_RETRIES = 3              # allow one more retry for first-frame reliability
    WATCHDOG_SLEEP = 0.05          # 50 ms between watchdog checks
    INITIAL_STREAM_TIMEOUT = 8.0  # give warmup some time before forcing restart
    LINK_STALL_TIMEOUT = 4.0      # no packets for this long -> recreate adapter

    # ------------------------------------------------------------------ #
    # life-cycle helpers
    # ------------------------------------------------------------------ #
    def __init__(
        self,
        drone_ip: str = DEFAULT_DRONE_IP,
        control_port: int = 8800,
        video_port: int = 8800,
        jpeg_width: int = 640,
        jpeg_height: int = 360,
        components: int = 3,
        *,
        variant: str = "auto",
        debug: bool = False,
    ):
        super().__init__(drone_ip, control_port, video_port)

        self.variant = (variant or "auto").strip().lower()
        self.capabilities = get_wifi_uav_capabilities(self.variant)
        self.debug = debug or logger.isEnabledFor(logging.DEBUG)
        self._dbg = logger.debug if self.debug else (lambda *a, **k: None)
        self._sock_lock = threading.Lock()
        self._pkt_lock = threading.Lock()
        self._pkt_buffer: list[bytes] = []
        self._warmup_stop = threading.Event()
        self._warmup_thread: Optional[threading.Thread] = None
        self._rx_thread: Optional[threading.Thread] = None
        self._target_ports = self._resolve_target_ports(control_port)

        self._sock = self._create_duplex_socket()

        # Pre-built JPEG header (SOI + quant tables + SOF0 + …)
        self._jpeg_header = generate_jpeg_headers(jpeg_width, jpeg_height, components)

        # State for the current frame being assembled
        # If I send 0 it sends 1, starting with 1 is more reliable.
        self._current_fid: int = 1
        self._ack_state = WifiUavAckState()
        self._last_req_ts = time.time()
        self._last_rx_ts = time.time()
        self._stream_started = False
        self._started_once = False

        # Stats
        self.frames_ok = 0
        self.frames_dropped = 0
        self._dbg(f"[init] adapter ready (control:{control_port}  video:{video_port})")
        if self.capabilities.transport == "uav_dual_port":
            logger.info("[wifi-uav] UAV/FLOW variant selected; probing UDP ports %s", self._target_ports)

        # Watchdog for per-frame timeouts
        self._running = True
        self._watchdog = threading.Thread(
            target=self._watchdog_loop, daemon=True, name="FrameWatchdog"
        )
        self._watchdog.start()

        self._retry_cnt       = 0          # retries for *current* frame
        self._had_retry       = False      # did we already retry this frame?
        self.retry_attempts   = 0          # global counter
        self.retry_successes  = 0          # global counter

        self._dbg(f"Main UDP socket created, listening on *:{self._sock.getsockname()[1]}")

    # ------------------------------------------------------------------ #
    # disable keep-alive – one start command is enough for this drone
    # ------------------------------------------------------------------ #
    def start_keepalive(self, interval: float = 1.0) -> None:  # type: ignore[override]
        return

    def stop_keepalive(self) -> None:  # type: ignore[override]
        return

    # ------------------------------------------------------------------ #
    # Base-class hooks
    # ------------------------------------------------------------------ #
    def create_receiver_socket(self) -> socket.socket:
        return self._sock

    def send_start_command(self) -> None:
        for port in self._target_ports:
            self._sock.sendto(START_STREAM, (self.drone_ip, port))
        self._dbg("[wifi-uav] START_STREAM sent to %s", self._target_ports)

    def handle_payload(self, payload: bytes) -> Optional[VideoFrame]:
        """
        Collect slices belonging to the requested frame.

        Native packet layout:
        byte 0        : 0x93
        byte 1        : message type; 0x01 means JPEG fragment
        bytes 2..3    : total packet length
        bytes 8..15   : image sequence
        bytes 32..35  : fragment index
        bytes 36..39  : fragment count
        bytes 56+     : JPEG payload
        """
        parsed = self._parse_fragment_header(payload)
        if parsed is None:
            return None

        frame_id, frag_id, fragment_total, frame_body_len, quality, jpeg_payload = parsed
        self._stream_started = True
        self._last_rx_ts = time.time()
        self._retry_cnt = 0

        # resynchronise if the drone skipped ahead
        if frame_id != self._current_fid:
            self.frames_dropped += 1
            self._dbg(f"⚠ skip   expected {self._current_fid:04x} "
                      f"got {frame_id:04x}")
            self._current_fid = frame_id

        slot = self._ack_state.ingest_fragment(
            frame_id,
            frag_id,
            fragment_total,
            jpeg_payload,
            frame_body_len=frame_body_len,
            quality=quality,
        )
        self._dbg(f"← FID:{frame_id:04x} FRAG:{frag_id:04x}")

        if slot is None:
            received = len(self._ack_state._slot_for_seq(frame_id).received_fragments)
            if fragment_total > 0:
                missing = fragment_total - received
                self._dbg(
                    f"⚠ incomplete FID:{frame_id:04x} "
                    f"missing {missing} fragment(s); waiting for retry"
                )
            else:
                self._dbg(
                    f"⚠ incomplete FID:{frame_id:04x} "
                    f"received {received} fragment(s); waiting for tail"
                )
            return None

        # Only emit a frame once every fragment up to the announced tail exists.
        jpeg = self._jpeg_header + slot.ordered_payload() + EOI
        frame = VideoFrame(frame_id=frame_id, data=jpeg)

        self.frames_ok += 1

        # ── was this frame finished thanks to a retry? ───────────
        if self._had_retry:
            self.retry_successes += 1
            self._dbg(f"✓ recovery! {frame_id:04x}  "
                      f"SUC:{self.retry_successes}  "
                      f"ATT:{self.retry_attempts}")
            self._had_retry = False
        # ──────────────────────────────────────────────────────────────

        self._dbg(f"✓ {frame_id:04x} ({slot.fragment_total} frags)  "
                  f"OK:{self.frames_ok}  DROP:{self.frames_dropped}")

        # prepare next iteration
        self._ack_state.mark_delivered(frame_id)
        self._send_frame_request(frame_id)            # ask for next
        self._current_fid = frame_id + 1
        self._last_rx_ts = self._last_req_ts = time.time()

        return frame

    # ------------------------------------------------------------------ #
    # helpers
    # ------------------------------------------------------------------ #
    def _warmup_loop(self) -> None:
        """During warmup, periodically resend START_STREAM + frame request
        until the first frame is observed, then exit."""
        while getattr(self, "_first_frame", False) and not self._warmup_stop.is_set():
            try:
                self.send_start_command()
                # Ask for the previous frame id; the drone will respond with next
                self._send_frame_request(max(0, self._current_fid - 1))
            except Exception:
                pass
            self._warmup_stop.wait(0.2)
    def _send_frame_request(self, frame_id: int) -> None:
        command_seq = frame_id & 0xFFFFFFFF

        rqst_a = build_native_ack_packet(command_seq, [])
        rqst_b = build_native_ack_packet(command_seq, self._build_ack_slots(frame_id))

        for port in self._target_ports:
            self._sock.sendto(rqst_a, (self.drone_ip, port))
            self._sock.sendto(rqst_b, (self.drone_ip, port))
        self._last_req_ts = time.time()
        self._dbg("→ REQ %04x to %s", frame_id, self._target_ports)

    def _build_ack_slots(self, seq: int) -> list[bytes]:
        return self._ack_state.build_ack_slots(seq)

    def _parse_fragment_header(self, payload: bytes) -> Optional[tuple[int, int, int, int, int, bytes]]:
        if len(payload) < 56 or payload[0] != 0x93 or payload[1] != 0x01:
            return None

        declared_len = int.from_bytes(payload[2:4], "little")
        native_layout = declared_len == len(payload)
        if native_layout:
            frame_id = int.from_bytes(payload[8:16], "little")
            frag_id = int.from_bytes(payload[32:36], "little")
            fragment_total = int.from_bytes(payload[36:40], "little")
            frame_body_len = int.from_bytes(payload[40:44], "little")
            quality = payload[48]
            if fragment_total > 0 and frag_id < fragment_total:
                return frame_id, frag_id, fragment_total, frame_body_len, quality, payload[56:]

        # Compatibility fallback for older captures/comments that only used
        # 16-bit counters and inferred the last fragment from packet length.
        frame_id = int.from_bytes(payload[16:18], "little")
        frag_id = int.from_bytes(payload[32:34], "little")
        # Legacy packets only reveal the total when the tail fragment arrives.
        fragment_total = frag_id + 1 if payload[2] != 0x38 else 0
        return frame_id, frag_id, fragment_total, 0, 0, payload[56:]

    def _resolve_target_ports(self, control_port: int) -> tuple[int, ...]:
        """
        The native UAVSDK starts the legacy backend on 8800 and the BL618
        backend on 8801. For the explicit UAV/FLOW variant, probe both just as
        nativeStart() does. The FLD/legacy path keeps the single-port behavior.
        """
        if self.capabilities.transport == "uav_dual_port":
            return (control_port, control_port + 1)
        return (control_port,)

    def _create_duplex_socket(self) -> socket.socket:
        sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        sock.bind(("", 0))          # let OS choose a free local port
        sock.settimeout(1.0)
        self._dbg(f"Main UDP socket created, listening on *:{sock.getsockname()[1]}")
        return sock

    def get_receiver_socket(self) -> socket.socket:
        """Returns the main socket for the receiver thread to use."""
        with self._sock_lock:
            return self._sock

    def set_rc_adapter(self, rc_adapter) -> None:
        """Provide the RC adapter with our shared UDP socket."""
        try:
            rc_adapter.set_socket(self._sock)
            self._dbg("[wifi-uav] RC adapter socket shared")
        except Exception:
            # If the RC adapter is not ready or doesn't support socket injection,
            # ignore and continue – the receiver loop will still function.
            pass

    # ------------------------------------------------------------------ #
    # Receiver thread API expected by VideoReceiverService
    # ------------------------------------------------------------------ #
    def start(self) -> None:
        if hasattr(self, "_rx_thread") and self._rx_thread and self._rx_thread.is_alive():
            return
        # Small frame buffer; upstream will drop if slow
        self._frame_q: "queue.Queue[VideoFrame]" = queue.Queue(maxsize=2)
        self._last_rx_ts = time.time()
        with self._pkt_lock:
            self._pkt_buffer = []

        def _rx_loop() -> None:
            sock = self.get_receiver_socket()
            while self._running:
                try:
                    payload = self.recv_from_socket(sock)
                    if not payload:
                        continue
                    # Collect raw packet bytes for optional dumping
                    with self._pkt_lock:
                        self._pkt_buffer.append(payload)
                    # Try to assemble a frame
                    frame = self.handle_payload(payload)
                    if frame is not None:
                        try:
                            self._frame_q.put(frame, timeout=0.2)
                        except queue.Full:
                            # Drop frame if consumer is slow
                            pass
                except OSError:
                    # Socket likely closed during stop(); exit loop
                    break
                except Exception as e:
                    self._dbg(f"[wifi-uav] rx error: {e}")
                    continue

        self._rx_thread = threading.Thread(target=_rx_loop, daemon=True, name="WifiUavVideoRx")
        self._rx_thread.start()

        if not self._started_once:
            self._started_once = True
            # Kick off the stream only after the receiver is already draining
            # the UDP socket. K417 responds very quickly and can otherwise dump
            # the first frame burst before the RX loop is active.
            self.send_start_command()
            self._send_frame_request(0)
            self._first_frame = True
            self._warmup_thread = threading.Thread(
                target=self._warmup_loop,
                daemon=True,
                name="Warmup",
            )
            self._warmup_thread.start()

    def is_running(self) -> boo
Download .txt
gitextract_1kcujrmi/

├── .gitignore
├── LICENSE
├── NOTICE
├── README.md
├── backend/
│   ├── control/
│   │   └── strategies.py
│   ├── main.py
│   ├── models/
│   │   ├── __init__.py
│   │   ├── base_rc.py
│   │   ├── base_video_model.py
│   │   ├── control_profile.py
│   │   ├── cooingdv_rc.py
│   │   ├── cooingdv_video_model.py
│   │   ├── debug_rc.py
│   │   ├── s2x_rc.py
│   │   ├── s2x_video_model.py
│   │   ├── stick_range.py
│   │   ├── video_frame.py
│   │   ├── wifi_cam_rc.py
│   │   ├── wifi_uav_rc.py
│   │   └── wifi_uav_video_model.py
│   ├── plugins/
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── follow/
│   │   │   ├── __init__.py
│   │   │   ├── follow_controller.py
│   │   │   ├── follow_plugin.py
│   │   │   └── test_yolo.py
│   │   ├── manager.py
│   │   └── test_yaw_plugin.py
│   ├── protocols/
│   │   ├── __init__.py
│   │   ├── base_protocol_adapter.py
│   │   ├── base_video_protocol.py
│   │   ├── cooingdv_jieli_rc_protocol_adapter.py
│   │   ├── cooingdv_jieli_video_protocol.py
│   │   ├── cooingdv_rc_protocol_adapter.py
│   │   ├── cooingdv_video_protocol.py
│   │   ├── debug_rc_protocol_adapter.py
│   │   ├── debug_video_protocol.py
│   │   ├── no_video_protocol.py
│   │   ├── s2x_rc_protocol_adapter.py
│   │   ├── s2x_video_protocol.py
│   │   ├── wifi_cam_rc_protocol_adapter.py
│   │   ├── wifi_cam_video_protocol.py
│   │   ├── wifi_uav_rc_protocol_adapter.py
│   │   └── wifi_uav_video_protocol.py
│   ├── receive_video.py
│   ├── remote_control.py
│   ├── requirements.txt
│   ├── services/
│   │   ├── __init__.py
│   │   ├── flight_controller.py
│   │   └── video_receiver.py
│   ├── tests/
│   │   ├── test_cooingdv_jieli_ctp.py
│   │   ├── test_s2x_protocol.py
│   │   ├── test_s2x_video_protocol.py
│   │   └── test_wifi_cam_protocol.py
│   ├── utils/
│   │   ├── cooingdv_jieli_ctp.py
│   │   ├── dropping_queue.py
│   │   ├── logging_config.py
│   │   ├── wifi_uav_ack_state.py
│   │   ├── wifi_uav_jpeg.py
│   │   ├── wifi_uav_packets.py
│   │   └── wifi_uav_variants.py
│   ├── video_client.py
│   ├── views/
│   │   ├── __init__.py
│   │   ├── base_video_view.py
│   │   ├── cli_rc.py
│   │   └── opencv_video_view.py
│   └── web_server.py
├── docs/
│   └── research/
│       ├── S2x.md
│       ├── cooingdv.md
│       ├── wifi_cam.md
│       └── wifi_uav.md
├── experimental/
│   ├── README.md
│   └── test_e88pro.py
└── frontend/
    ├── .gitignore
    ├── eslint.config.js
    ├── index.html
    ├── package.json
    ├── postcss.config.cjs
    ├── src/
    │   ├── App.css
    │   ├── App.tsx
    │   ├── components/
    │   │   ├── AxisIndicator.tsx
    │   │   ├── CommandButtons.tsx
    │   │   ├── ControlSchemeToggle.tsx
    │   │   ├── ControlsOverlay.tsx
    │   │   ├── DrawingOverlay.tsx
    │   │   ├── PluginControls.tsx
    │   │   ├── SpeedControl.tsx
    │   │   ├── StaticDrawingOverlay.tsx
    │   │   └── VideoFeed.tsx
    │   ├── hooks/
    │   │   ├── useControls.ts
    │   │   ├── useOverlays.ts
    │   │   ├── usePlugins.ts
    │   │   └── useVideoSizing.ts
    │   ├── index.css
    │   ├── main.tsx
    │   ├── pages/
    │   │   └── TestPage.tsx
    │   └── vite-env.d.ts
    ├── tailwind.config.ts
    ├── tsconfig.app.json
    ├── tsconfig.json
    ├── tsconfig.node.json
    └── vite.config.ts
Download .txt
SYMBOL INDEX (524 symbols across 72 files)

FILE: backend/control/strategies.py
  class ControlStrategy (line 4) | class ControlStrategy(ABC):
    method update (line 8) | def update(self, model, dt: float, axes: Dict[str, float]) -> None:
  class IncrementalStrategy (line 13) | class IncrementalStrategy(ControlStrategy):
    method update (line 14) | def update(self, model, dt, axes):
  class DirectStrategy (line 19) | class DirectStrategy(ControlStrategy):
    method update (line 24) | def update(self, model, dt, axes):

FILE: backend/main.py
  function main (line 44) | def main():

FILE: backend/models/base_rc.py
  class BaseRCModel (line 10) | class BaseRCModel(ABC):
    method __init__ (line 25) | def __init__(
    method update (line 57) | def update(self, dt, axes): ...
    method takeoff (line 59) | def takeoff(self): ...
    method land (line 61) | def land(self): ...
    method get_control_state (line 63) | def get_control_state(self): ...
    method set_profile (line 66) | def set_profile(self, name: str) -> None:
    method set_sensitivity (line 71) | def set_sensitivity(self, preset: int) -> None:
    method set_strategy (line 75) | def set_strategy(self, strategy) -> None:
    method _apply_profile (line 79) | def _apply_profile(self, profile: ControlProfile) -> None:
    method _scale_normalised (line 90) | def _scale_normalised(self, value: float) -> float:
    method _update_axes_incremental (line 99) | def _update_axes_incremental(self, dt, axes):
    method update_axes (line 108) | def update_axes(self, dt, throttle_dir, yaw_dir, pitch_dir, roll_dir):
    method _update_axes_direct (line 172) | def _update_axes_direct(self, axes):

FILE: backend/models/base_video_model.py
  class BaseVideoModel (line 5) | class BaseVideoModel(ABC):
    method ingest_chunk (line 13) | def ingest_chunk(

FILE: backend/models/control_profile.py
  class ControlProfile (line 4) | class ControlProfile:

FILE: backend/models/cooingdv_rc.py
  class CooingdvRcModel (line 22) | class CooingdvRcModel(BaseRCModel):
    method __init__ (line 46) | def __init__(self, profile: str | ControlProfile = "normal") -> None:
    method update (line 68) | def update(self, dt, axes):
    method takeoff (line 71) | def takeoff(self):
    method land (line 75) | def land(self):
    method emergency_stop (line 84) | def emergency_stop(self):
    method flip (line 93) | def flip(self):
    method toggle_headless (line 97) | def toggle_headless(self):
    method calibrate_gyro (line 101) | def calibrate_gyro(self):
    method get_control_state (line 105) | def get_control_state(self):
    method set_strategy (line 114) | def set_strategy(self, strategy) -> None:

FILE: backend/models/cooingdv_video_model.py
  class CooingdvVideoModel (line 18) | class CooingdvVideoModel(BaseVideoModel):
    method __init__ (line 27) | def __init__(self):
    method ingest_chunk (line 30) | def ingest_chunk(
    method reset (line 67) | def reset(self) -> None:

FILE: backend/models/debug_rc.py
  class DebugRcModel (line 8) | class DebugRcModel(BaseRCModel):
    method __init__ (line 11) | def __init__(self):
    method update (line 17) | def update(self, dt, axes):
    method get_control_state (line 22) | def get_control_state(self):
    method set_throttle (line 32) | def set_throttle(self, value: int):
    method set_yaw (line 36) | def set_yaw(self, value: int):
    method set_pitch (line 40) | def set_pitch(self, value: int):
    method set_roll (line 44) | def set_roll(self, value: int):
    method takeoff (line 48) | def takeoff(self):
    method land (line 51) | def land(self):

FILE: backend/models/s2x_rc.py
  class S2xDroneModel (line 8) | class S2xDroneModel(BaseRCModel):
    method __init__ (line 19) | def __init__(self, profile: str | ControlProfile = "normal"):
    method update (line 42) | def update(self, dt, axes):
    method takeoff (line 45) | def takeoff(self):
    method land (line 49) | def land(self):
    method emergency_stop (line 59) | def emergency_stop(self):
    method set_speed_index (line 63) | def set_speed_index(self, speed_index: int) -> None:
    method get_control_state (line 67) | def get_control_state(self):
    method set_strategy (line 77) | def set_strategy(self, strategy) -> None:

FILE: backend/models/s2x_video_model.py
  class S2xVideoModel (line 7) | class S2xVideoModel(BaseVideoModel):
    method __init__ (line 18) | def __init__(self) -> None:
    method ingest_chunk (line 25) | def ingest_chunk(
    method _reset (line 63) | def _reset(self, new_fid: Optional[int]) -> None:
    method _assemble_current (line 67) | def _assemble_current(self, keys_to_use: set[int] | None = None) -> Op...

FILE: backend/models/stick_range.py
  class StickRange (line 4) | class StickRange:

FILE: backend/models/video_frame.py
  class VideoFrame (line 3) | class VideoFrame:
    method __init__ (line 6) | def __init__(self, frame_id, data, format_type="jpeg", timestamp=None):
    method __repr__ (line 15) | def __repr__(self):

FILE: backend/models/wifi_cam_rc.py
  class WifiCamRcModel (line 16) | class WifiCamRcModel(BaseRCModel):
    method __init__ (line 27) | def __init__(self, profile: str | ControlProfile = "normal") -> None:
    method update (line 44) | def update(self, dt, axes):
    method takeoff (line 47) | def takeoff(self):
    method land (line 50) | def land(self):
    method emergency_stop (line 53) | def emergency_stop(self):
    method flip (line 56) | def flip(self):
    method toggle_headless (line 59) | def toggle_headless(self):
    method toggle_altitude_hold (line 62) | def toggle_altitude_hold(self):
    method calibrate_gyro (line 65) | def calibrate_gyro(self):
    method get_control_state (line 68) | def get_control_state(self):
    method set_strategy (line 78) | def set_strategy(self, strategy) -> None:

FILE: backend/models/wifi_uav_rc.py
  class WifiUavRcModel (line 9) | class WifiUavRcModel(BaseRCModel):
    method __init__ (line 34) | def __init__(self, profile: str | ControlProfile = "normal") -> None:
    method update (line 58) | def update(self, dt, axes):          # type: ignore[override]
    method takeoff (line 61) | def takeoff(self):
    method land (line 64) | def land(self):
    method emergency_stop (line 68) | def emergency_stop(self):
    method flip (line 72) | def flip(self):
    method set_speed_index (line 76) | def set_speed_index(self, speed_index: int) -> None:
    method set_camera_tilt_state (line 80) | def set_camera_tilt_state(self, tilt_state: int) -> None:
    method toggle_record (line 85) | def toggle_record(self):             # type: ignore[override]
    method get_control_state (line 88) | def get_control_state(self):
    method set_strategy (line 99) | def set_strategy(self, strategy) -> None:

FILE: backend/models/wifi_uav_video_model.py
  class WifiUavVideoModel (line 8) | class WifiUavVideoModel:
    method __init__ (line 25) | def __init__(self, width: int = 640, height: int = 360, num_components...
    method ingest_chunk (line 35) | def ingest_chunk(self, payload: bytes) -> Optional[VideoFrame]:
    method _reset_state (line 79) | def _reset_state(self, new_frame_id: Optional[int]):

FILE: backend/plugins/base.py
  class Plugin (line 6) | class Plugin(ABC):
    method __init__ (line 14) | def __init__(self,
    method start (line 31) | def start(self):
    method stop (line 39) | def stop(self):
    method _on_start (line 50) | def _on_start(self):
    method _on_stop (line 53) | def _on_stop(self):
    method send_overlay (line 56) | def send_overlay(self, data: list):

FILE: backend/plugins/follow/follow_controller.py
  class FollowController (line 1) | class FollowController:
    method __init__ (line 9) | def __init__(
    method compute (line 29) | def compute(self, box_center_x: float, box_width: float) -> tuple[floa...

FILE: backend/plugins/follow/follow_plugin.py
  class FollowPlugin (line 16) | class FollowPlugin(Plugin):
    method _on_start (line 22) | def _on_start(self):
    method _on_stop (line 82) | def _on_stop(self):
    method _loop (line 95) | def _loop(self):

FILE: backend/plugins/follow/test_yolo.py
  function main (line 4) | def main():

FILE: backend/plugins/manager.py
  class PluginManager (line 13) | class PluginManager:
    method __init__ (line 14) | def __init__(self,
    method available (line 26) | def available(self) -> list[str]:
    method running (line 29) | def running(self) -> list[str]:
    method clear_overlays (line 32) | def clear_overlays(self) -> None:
    method start (line 43) | def start(self, name: str) -> bool:
    method stop (line 90) | def stop(self, name: str) -> bool:
    method stop_all (line 118) | def stop_all(self):
    method _discover_plugins (line 122) | def _discover_plugins(self):

FILE: backend/plugins/test_yaw_plugin.py
  class TestYawPlugin (line 9) | class TestYawPlugin(Plugin):
    method _on_start (line 21) | def _on_start(self):
    method _on_stop (line 46) | def _on_stop(self):
    method _run (line 72) | def _run(self):

FILE: backend/protocols/base_protocol_adapter.py
  class BaseProtocolAdapter (line 3) | class BaseProtocolAdapter(ABC):
    method build_control_packet (line 7) | def build_control_packet(self, drone_model):
    method send_control_packet (line 12) | def send_control_packet(self, packet):
    method toggle_debug (line 17) | def toggle_debug(self):

FILE: backend/protocols/base_video_protocol.py
  class BaseVideoProtocolAdapter (line 9) | class BaseVideoProtocolAdapter(ABC):
    method __init__ (line 15) | def __init__(self, drone_ip: str, control_port: int, video_port: int):
    method start_keepalive (line 22) | def start_keepalive(self, interval: float = 1.0) -> None:
    method stop_keepalive (line 34) | def stop_keepalive(self) -> None:
    method _keepalive_loop (line 40) | def _keepalive_loop(self, interval: float) -> None:
    method recv_from_socket (line 46) | def recv_from_socket(self, sock) -> Optional[bytes]:
    method send_start_command (line 60) | def send_start_command(self) -> None:
    method create_receiver_socket (line 65) | def create_receiver_socket(self) -> socket.socket:
    method handle_payload (line 70) | def handle_payload(self, payload: bytes) -> Optional[VideoFrame]:

FILE: backend/protocols/cooingdv_jieli_rc_protocol_adapter.py
  class CooingdvJieliRcProtocolAdapter (line 24) | class CooingdvJieliRcProtocolAdapter(BaseProtocolAdapter):
    method __init__ (line 49) | def __init__(
    method build_control_packet (line 90) | def build_control_packet(self, drone_model: CooingdvRcModel) -> bytes:
    method send_control_packet (line 97) | def send_control_packet(self, packet: bytes) -> None:
    method toggle_debug (line 111) | def toggle_debug(self) -> bool:
    method stop (line 116) | def stop(self) -> None:
    method start_heartbeat (line 124) | def start_heartbeat(self) -> None:
    method stop_heartbeat (line 137) | def stop_heartbeat(self) -> None:
    method _heartbeat_loop (line 144) | def _heartbeat_loop(self) -> None:
    method _send_control_mode (line 149) | def _send_control_mode(self, enabled: bool) -> None:
    method _send_ctp (line 152) | def _send_ctp(self, topic: str, params: dict[str, str]) -> None:
    method _build_flying_payload (line 159) | def _build_flying_payload(self, model: CooingdvRcModel) -> list[int]:
    method _build_flags (line 170) | def _build_flags(self, model: CooingdvRcModel) -> int:
    method _clear_one_shot_flags (line 186) | def _clear_one_shot_flags(self, model: CooingdvRcModel) -> None:
    method _clamp_axis (line 193) | def _clamp_axis(self, value: float) -> int:

FILE: backend/protocols/cooingdv_jieli_video_protocol.py
  class CooingdvJieliVideoProtocolAdapter (line 26) | class CooingdvJieliVideoProtocolAdapter(BaseVideoProtocolAdapter):
    method __init__ (line 39) | def __init__(
    method start (line 70) | def start(self) -> None:
    method stop (line 90) | def stop(self) -> None:
    method is_running (line 104) | def is_running(self) -> bool:
    method get_frame (line 107) | def get_frame(self, timeout: float = 1.0) -> Optional[VideoFrame]:
    method get_packets (line 113) | def get_packets(self) -> list[bytes]:
    method send_start_command (line 116) | def send_start_command(self) -> None:
    method create_receiver_socket (line 119) | def create_receiver_socket(self):
    method handle_payload (line 122) | def handle_payload(self, payload: bytes) -> Optional[VideoFrame]:
    method _send_open_stream (line 126) | def _send_open_stream(self) -> None:
    method _send_close_stream (line 137) | def _send_close_stream(self) -> None:
    method _send_ctp (line 140) | def _send_ctp(self, topic: str, params: dict[str, str]) -> None:
    method _start_sdp_server (line 150) | def _start_sdp_server(self) -> None:
    method _sdp_loop (line 158) | def _sdp_loop(self) -> None:
    method _rx_loop (line 193) | def _rx_loop(self) -> None:
    method _handle_rtp_packet (line 225) | def _handle_rtp_packet(self, packet: bytes) -> Optional[VideoFrame]:
    method _handle_rtp_jpeg (line 245) | def _handle_rtp_jpeg(self, timestamp: int, payload: bytes, marker: boo...
    method _handle_possible_raw_jpeg (line 267) | def _handle_possible_raw_jpeg(self, payload: bytes) -> Optional[VideoF...

FILE: backend/protocols/cooingdv_rc_protocol_adapter.py
  class CooingdvRcProtocolAdapter (line 30) | class CooingdvRcProtocolAdapter(BaseProtocolAdapter):
    method __init__ (line 80) | def __init__(
    method start_heartbeat (line 137) | def start_heartbeat(self) -> None:
    method stop_heartbeat (line 151) | def stop_heartbeat(self) -> None:
    method _heartbeat_loop (line 159) | def _heartbeat_loop(self) -> None:
    method start_receiver (line 170) | def start_receiver(self) -> None:
    method stop_receiver (line 184) | def stop_receiver(self) -> None:
    method _receiver_loop (line 192) | def _receiver_loop(self) -> None:
    method _process_received_packet (line 210) | def _process_received_packet(self, packet: bytes, addr: tuple[str, int...
    method stop (line 250) | def stop(self) -> None:
    method build_control_packet (line 266) | def build_control_packet(self, drone_model: CooingdvRcModel) -> bytes:
    method send_control_packet (line 277) | def send_control_packet(self, packet: bytes) -> None:
    method toggle_debug (line 305) | def toggle_debug(self) -> bool:
    method _active_variant (line 315) | def _active_variant(self) -> str:
    method _normalise_variant (line 323) | def _normalise_variant(self, value: Optional[str]) -> Optional[str]:
    method _detect_variant (line 338) | def _detect_variant(self, packet: bytes) -> Optional[str]:
    method _build_tc_control_packet (line 347) | def _build_tc_control_packet(self, model: CooingdvRcModel) -> bytes:
    method _build_gl_control_packet (line 360) | def _build_gl_control_packet(self, model: CooingdvRcModel) -> bytes:
    method _clamp_axis (line 383) | def _clamp_axis(self, value: float) -> int:
    method _build_tc_flags (line 388) | def _build_tc_flags(self, model: CooingdvRcModel) -> int:
    method _build_gl_flags (line 406) | def _build_gl_flags(self, model: CooingdvRcModel) -> tuple[int, int]:
    method _clear_one_shot_flags (line 430) | def _clear_one_shot_flags(self, model: CooingdvRcModel) -> None:
    method _decode_tc_flags (line 437) | def _decode_tc_flags(self, flags: int) -> list[str]:
    method _decode_gl_flags (line 453) | def _decode_gl_flags(self, flags1: int, flags2: int) -> list[str]:
    method _calculate_checksum (line 467) | def _calculate_checksum(self, data: bytes) -> int:

FILE: backend/protocols/cooingdv_video_protocol.py
  class CooingdvVideoProtocolAdapter (line 27) | class CooingdvVideoProtocolAdapter(BaseVideoProtocolAdapter):
    method __init__ (line 55) | def __init__(
    method _open_stream (line 95) | def _open_stream(self) -> bool:
    method _close_stream (line 124) | def _close_stream(self) -> None:
    method _reconnect (line 132) | def _reconnect(self) -> bool:
    method send_start_command (line 155) | def send_start_command(self) -> None:
    method create_receiver_socket (line 162) | def create_receiver_socket(self):
    method handle_payload (line 169) | def handle_payload(self, payload: bytes) -> Optional[VideoFrame]:
    method start (line 178) | def start(self) -> None:
    method _rx_loop (line 199) | def _rx_loop(self) -> None:
    method stop (line 258) | def stop(self) -> None:
    method is_running (line 277) | def is_running(self) -> bool:
    method get_frame (line 281) | def get_frame(self, timeout: float = 1.0) -> Optional[VideoFrame]:
    method get_packets (line 288) | def get_packets(self) -> List[bytes]:
    method start_keepalive (line 298) | def start_keepalive(self, interval: float = 1.0) -> None:
    method stop_keepalive (line 302) | def stop_keepalive(self) -> None:

FILE: backend/protocols/debug_rc_protocol_adapter.py
  class DebugRcProtocolAdapter (line 5) | class DebugRcProtocolAdapter:
    method __init__ (line 8) | def __init__(self):
    method send_control_data (line 11) | def send_control_data(self, data: bytes):

FILE: backend/protocols/debug_video_protocol.py
  class DebugVideoProtocolAdapter (line 15) | class DebugVideoProtocolAdapter(BaseVideoProtocolAdapter):
    method __init__ (line 20) | def __init__(self, camera_index: int = 0, debug: bool = False, max_que...
    method start (line 31) | def start(self):
    method stop (line 41) | def stop(self):
    method is_running (line 54) | def is_running(self) -> bool:
    method get_frame (line 57) | def get_frame(self, timeout: float = 1.0) -> Optional[VideoFrame]:
    method get_packets (line 63) | def get_packets(self) -> List[bytes]:
    method _capture_loop (line 67) | def _capture_loop(self):
    method create_receiver_socket (line 101) | def create_receiver_socket(self):
    method send_start_command (line 104) | def send_start_command(self):
    method handle_payload (line 107) | def handle_payload(self, payload: bytes) -> Optional[VideoFrame]:

FILE: backend/protocols/no_video_protocol.py
  class NoVideoProtocolAdapter (line 13) | class NoVideoProtocolAdapter(BaseVideoProtocolAdapter):
    method __init__ (line 22) | def __init__(
    method start (line 34) | def start(self) -> None:
    method stop (line 38) | def stop(self) -> None:
    method is_running (line 41) | def is_running(self) -> bool:
    method get_frame (line 44) | def get_frame(self, timeout: float = 1.0) -> Optional[VideoFrame]:
    method get_packets (line 49) | def get_packets(self) -> list[bytes]:
    method send_start_command (line 52) | def send_start_command(self) -> None:
    method create_receiver_socket (line 55) | def create_receiver_socket(self):
    method handle_payload (line 58) | def handle_payload(self, payload: bytes) -> Optional[VideoFrame]:

FILE: backend/protocols/s2x_rc_protocol_adapter.py
  class S2xRCProtocolAdapter (line 4) | class S2xRCProtocolAdapter(BaseProtocolAdapter):
    method __init__ (line 7) | def __init__(self, drone_ip, control_port=8080):
    method build_control_packet (line 22) | def build_control_packet(self, drone_model):
    method send_control_packet (line 79) | def send_control_packet(self, packet):
    method toggle_debug (line 107) | def toggle_debug(self):
    method _remap_to_full_range (line 112) | def _remap_to_full_range(self, value, model):
    method _scale_axis (line 121) | def _scale_axis(self, value, model, scale):

FILE: backend/protocols/s2x_video_protocol.py
  class S2xVideoProtocolAdapter (line 12) | class S2xVideoProtocolAdapter(BaseVideoProtocolAdapter):
    method __init__ (line 20) | def __init__(
    method send_start_command (line 43) | def send_start_command(self) -> None:
    method start_keepalive (line 49) | def start_keepalive(self, interval: float = 2.0) -> None:
    method stop_keepalive (line 61) | def stop_keepalive(self) -> None:
    method create_receiver_socket (line 69) | def create_receiver_socket(self) -> socket.socket:
    method get_receiver_socket (line 77) | def get_receiver_socket(self) -> socket.socket:
    method recv_from_socket (line 82) | def recv_from_socket(self, sock: socket.socket) -> Optional[bytes]:
    method handle_payload (line 89) | def handle_payload(self, payload: bytes) -> Optional[VideoFrame]:
    method stop (line 130) | def stop(self) -> None:
    method _keepalive_loop (line 143) | def _keepalive_loop(self, interval: float, stop_event: threading.Event...
    method start (line 149) | def start(self) -> None:
    method is_running (line 178) | def is_running(self) -> bool:
    method get_frame (line 181) | def get_frame(self, timeout: float = 1.0) -> Optional[VideoFrame]:
    method get_packets (line 187) | def get_packets(self) -> List[bytes]:
    method _discover_local_ip (line 193) | def _discover_local_ip(self) -> str:

FILE: backend/protocols/wifi_cam_rc_protocol_adapter.py
  class WifiCamRcProtocolAdapter (line 17) | class WifiCamRcProtocolAdapter(BaseProtocolAdapter):
    method __init__ (line 41) | def __init__(
    method build_control_packet (line 57) | def build_control_packet(self, drone_model: WifiCamRcModel) -> bytes:
    method send_control_packet (line 65) | def send_control_packet(self, packet: bytes) -> None:
    method set_camera_type (line 75) | def set_camera_type(self, camera_type: int) -> None:
    method stop (line 84) | def stop(self) -> None:
    method toggle_debug (line 90) | def toggle_debug(self) -> bool:
    method _build_short_packet (line 95) | def _build_short_packet(self, model: WifiCamRcModel) -> bytes:
    method _build_extended_packet (line 107) | def _build_extended_packet(self, model: WifiCamRcModel) -> bytes:
    method _build_short_flags (line 122) | def _build_short_flags(self, model: WifiCamRcModel) -> int:
    method _build_extended_flags (line 138) | def _build_extended_flags(self, model: WifiCamRcModel) -> tuple[int, i...
    method _clear_one_shot_flags (line 156) | def _clear_one_shot_flags(self, model: WifiCamRcModel) -> None:
    method _uses_extended_mode (line 163) | def _uses_extended_mode(self) -> bool:
    method _normalize_mode (line 170) | def _normalize_mode(self, command_mode: str) -> CommandMode:
    method _axis (line 177) | def _axis(self, value: float) -> int:
    method _right_data (line 180) | def _right_data(self, value: int) -> int:
    method _xor (line 186) | def _xor(self, values: bytes | bytearray) -> int:

FILE: backend/protocols/wifi_cam_video_protocol.py
  class WifiCamVideoProtocolAdapter (line 17) | class WifiCamVideoProtocolAdapter(BaseVideoProtocolAdapter):
    method __init__ (line 35) | def __init__(
    method set_rc_adapter (line 59) | def set_rc_adapter(self, rc_adapter) -> None:
    method start (line 64) | def start(self) -> None:
    method stop (line 78) | def stop(self) -> None:
    method is_running (line 92) | def is_running(self) -> bool:
    method get_frame (line 95) | def get_frame(self, timeout: float = 1.0) -> Optional[VideoFrame]:
    method get_packets (line 101) | def get_packets(self) -> list[bytes]:
    method send_start_command (line 107) | def send_start_command(self) -> None:
    method create_receiver_socket (line 110) | def create_receiver_socket(self) -> socket.socket:
    method handle_payload (line 119) | def handle_payload(self, payload: bytes) -> Optional[VideoFrame]:
    method switch_camera (line 155) | def switch_camera(self) -> None:
    method rotate (line 158) | def rotate(self) -> None:
    method _rx_loop (line 161) | def _rx_loop(self) -> None:
    method _send_command (line 188) | def _send_command(self, command: bytes) -> None:
    method _parse_camera_type (line 197) | def _parse_camera_type(self, payload: bytes) -> Optional[int]:
    method _set_camera_type (line 206) | def _set_camera_type(self, camera_type: int) -> None:
    method _finish_frame (line 214) | def _finish_frame(

FILE: backend/protocols/wifi_uav_rc_protocol_adapter.py
  class WifiUavRcProtocolAdapter (line 9) | class WifiUavRcProtocolAdapter(BaseProtocolAdapter):
    method __init__ (line 53) | def __init__(self,
    method set_socket (line 74) | def set_socket(self, sock: socket.socket) -> None:
    method stop (line 83) | def stop(self) -> None:
    method build_control_packet (line 94) | def build_control_packet(self, drone_model: WifiUavRcModel) -> bytes: ...
    method send_control_packet (line 174) | def send_control_packet(self, packet: bytes):  # type: ignore[override]
    method toggle_debug (line 217) | def toggle_debug(self) -> bool:                # type: ignore[override]
    method _resolve_target_ports (line 223) | def _resolve_target_ports(self, control_port: int) -> tuple[int, ...]:
    method _apply_speed_scale (line 228) | def _apply_speed_scale(self, value: float, speed_index: int) -> int:

FILE: backend/protocols/wifi_uav_video_protocol.py
  class WifiUavVideoProtocolAdapter (line 21) | class WifiUavVideoProtocolAdapter(BaseVideoProtocolAdapter):
    method __init__ (line 48) | def __init__(
    method start_keepalive (line 112) | def start_keepalive(self, interval: float = 1.0) -> None:  # type: ign...
    method stop_keepalive (line 115) | def stop_keepalive(self) -> None:  # type: ignore[override]
    method create_receiver_socket (line 121) | def create_receiver_socket(self) -> socket.socket:
    method send_start_command (line 124) | def send_start_command(self) -> None:
    method handle_payload (line 129) | def handle_payload(self, payload: bytes) -> Optional[VideoFrame]:
    method _warmup_loop (line 212) | def _warmup_loop(self) -> None:
    method _send_frame_request (line 223) | def _send_frame_request(self, frame_id: int) -> None:
    method _build_ack_slots (line 235) | def _build_ack_slots(self, seq: int) -> list[bytes]:
    method _parse_fragment_header (line 238) | def _parse_fragment_header(self, payload: bytes) -> Optional[tuple[int...
    method _resolve_target_ports (line 261) | def _resolve_target_ports(self, control_port: int) -> tuple[int, ...]:
    method _create_duplex_socket (line 271) | def _create_duplex_socket(self) -> socket.socket:
    method get_receiver_socket (line 279) | def get_receiver_socket(self) -> socket.socket:
    method set_rc_adapter (line 284) | def set_rc_adapter(self, rc_adapter) -> None:
    method start (line 297) | def start(self) -> None:
    method is_running (line 349) | def is_running(self) -> bool:
    method get_frame (line 360) | def get_frame(self, timeout: float = 1.0) -> Optional[VideoFrame]:
    method get_packets (line 370) | def get_packets(self) -> list[bytes]:
    method _watchdog_loop (line 379) | def _watchdog_loop(self) -> None:
    method stop (line 425) | def stop(self) -> None:

FILE: backend/receive_video.py
  function discover_local_ip (line 44) | def discover_local_ip(remote_ip=DRONE_IP):
  function send_start_command (line 57) | def send_start_command(drone_ip: str, my_ip: str):
  class ControlKeepAlive (line 67) | class ControlKeepAlive(threading.Thread):
    method __init__ (line 71) | def __init__(self, drone_ip, my_ip, interval=1.0):
    method run (line 78) | def run(self):
    method stop (line 83) | def stop(self):
  class VideoReceiver (line 89) | class VideoReceiver(threading.Thread):
    method __init__ (line 90) | def __init__(
    method stop (line 115) | def stop(self):
    method _reset_frame (line 118) | def _reset_frame(self, new_fid):
    method _finalise_frame (line 123) | def _finalise_frame(self, fid, fragments):
    method run (line 145) | def run(self):
  function display_frames (line 213) | def display_frames(frame_q: queue.Queue):
  function main (line 267) | def main():

FILE: backend/remote_control.py
  class DroneController (line 8) | class DroneController:
    method __init__ (line 9) | def __init__(self, drone_ip, control_port):
    method update_axes (line 49) | def update_axes(self, dt, throttle_dir, yaw_dir, pitch_dir, roll_dir):
    method remap_to_full_range (line 101) | def remap_to_full_range(self, value):
    method build_packet_hy (line 110) | def build_packet_hy(self):
    method send_loop (line 155) | def send_loop(self, interval=0.03):
    method stop_loop (line 191) | def stop_loop(self):
    method toggle_debug (line 194) | def toggle_debug(self):
  function ui_loop (line 200) | def ui_loop(stdscr, controller):
  function main (line 319) | def main():

FILE: backend/services/flight_controller.py
  class FlightController (line 8) | class FlightController:
    method __init__ (line 11) | def __init__(self, drone_model, protocol_adapter, update_rate=80.0):
    method start (line 30) | def start(self):
    method stop (line 36) | def stop(self):
    method set_control_direction (line 45) | def set_control_direction(self, control, direction):
    method set_axes (line 56) | def set_axes(self, throttle: float, yaw: float, pitch: float, roll: fl...
    method set_axes_from (line 63) | def set_axes_from(self, source: str, throttle: float, yaw: float, pitc...
    method _control_loop (line 94) | def _control_loop(self):

FILE: backend/services/video_receiver.py
  class VideoReceiverService (line 14) | class VideoReceiverService:
    method __init__ (line 20) | def __init__(
    method start (line 53) | def start(self) -> None:
    method stop (line 63) | def stop(self) -> None:
    method get_frame_queue (line 75) | def get_frame_queue(self) -> queue.Queue:
    method _receiver_loop (line 79) | def _receiver_loop(self) -> None:
    method _dump_frame (line 144) | def _dump_frame(self, frame: "VideoFrame | bytes | bytearray | memoryv...

FILE: backend/tests/test_cooingdv_jieli_ctp.py
  class CooingdvJieliCtpTests (line 8) | class CooingdvJieliCtpTests(unittest.TestCase):
    method test_builds_little_endian_ctp_packet (line 9) | def test_builds_little_endian_ctp_packet(self):
    method test_flying_control_payload_preserves_tc_bytes (line 21) | def test_flying_control_payload_preserves_tc_bytes(self):

FILE: backend/tests/test_s2x_protocol.py
  class S2xRcProtocolTests (line 7) | class S2xRcProtocolTests(unittest.TestCase):
    method _adapter (line 8) | def _adapter(self):
    method test_default_packet_matches_macrochip_hy_layout (line 18) | def test_default_packet_matches_macrochip_hy_layout(self):
    method test_swap_yaw_roll_changes_transmitted_axes (line 35) | def test_swap_yaw_roll_changes_transmitted_axes(self):
    method test_speed_index_scales_roll_and_pitch_only (line 49) | def test_speed_index_scales_roll_and_pitch_only(self):

FILE: backend/tests/test_s2x_video_protocol.py
  class S2xVideoProtocolTests (line 7) | class S2xVideoProtocolTests(unittest.TestCase):
    method _adapter (line 8) | def _adapter(self):
    method _packet (line 13) | def _packet(self, frame_id: int, total_chunks: int, chunk_id: int, bod...
    method test_emits_frame_when_all_declared_chunks_arrive (line 22) | def test_emits_frame_when_all_declared_chunks_arrive(self):
    method test_accepts_out_of_order_chunks (line 35) | def test_accepts_out_of_order_chunks(self):
    method test_rejects_mismatched_declared_length (line 46) | def test_rejects_mismatched_declared_length(self):

FILE: backend/tests/test_wifi_cam_protocol.py
  class WifiCamRcProtocolTests (line 8) | class WifiCamRcProtocolTests(unittest.TestCase):
    method _adapter (line 9) | def _adapter(self, mode="short"):
    method test_short_packet_matches_base_cmd_layout (line 15) | def test_short_packet_matches_base_cmd_layout(self):
    method test_short_packet_escapes_marker_checksum (line 30) | def test_short_packet_escapes_marker_checksum(self):
    method test_extended_packet_uses_camera_type_two_in_auto_mode (line 42) | def test_extended_packet_uses_camera_type_two_in_auto_mode(self):
  class FakeRcAdapter (line 64) | class FakeRcAdapter:
    method __init__ (line 65) | def __init__(self):
    method set_camera_type (line 68) | def set_camera_type(self, camera_type):
  class WifiCamVideoProtocolTests (line 72) | class WifiCamVideoProtocolTests(unittest.TestCase):
    method test_camera_type_probe_updates_rc_adapter (line 73) | def test_camera_type_probe_updates_rc_adapter(self):
    method test_single_chunk_jpeg_frame (line 84) | def test_single_chunk_jpeg_frame(self):
    method test_multi_chunk_jpeg_frame_uses_native_final_marker (line 97) | def test_multi_chunk_jpeg_frame_uses_native_final_marker(self):
    method test_rejects_final_chunk_without_jpeg_tail (line 111) | def test_rejects_final_chunk_without_jpeg_tail(self):

FILE: backend/utils/cooingdv_jieli_ctp.py
  function build_ctp_packet (line 11) | def build_ctp_packet(
  function parse_ctp_packet (line 39) | def parse_ctp_packet(packet: bytes) -> tuple[str, dict]:

FILE: backend/utils/dropping_queue.py
  class DroppingQueue (line 3) | class DroppingQueue(queue.Queue):
    method put (line 7) | def put(self, item, block=True, timeout=None):
    method put_nowait (line 35) | def put_nowait(self, item):

FILE: backend/utils/logging_config.py
  function _normalise_level (line 14) | def _normalise_level(raw: str | None) -> str:
  function bootstrap_runtime (line 23) | def bootstrap_runtime() -> None:
  function configure_logging (line 47) | def configure_logging(level: str | None = None) -> str:

FILE: backend/utils/wifi_uav_ack_state.py
  class WifiUavFrameSlot (line 18) | class WifiUavFrameSlot:
    method reset (line 29) | def reset(self, seq: int, fragment_total: int, frame_body_len: int, qu...
    method ingest (line 38) | def ingest(
    method is_complete (line 66) | def is_complete(self) -> bool:
    method ordered_payload (line 73) | def ordered_payload(self) -> bytes:
    method mark_delivered (line 76) | def mark_delivered(self) -> None:
    method mark_dropped (line 79) | def mark_dropped(self) -> None:
    method ack_status (line 82) | def ack_status(self) -> int:
    method ack_bitmap (line 91) | def ack_bitmap(self) -> bytes:
    method ack_slot (line 96) | def ack_slot(self) -> bytes:
  class WifiUavAckState (line 100) | class WifiUavAckState:
    method __init__ (line 110) | def __init__(self) -> None:
    method reset (line 116) | def reset(self) -> None:
    method ingest_fragment (line 129) | def ingest_fragment(
    method mark_delivered (line 156) | def mark_delivered(self, seq: int) -> None:
    method mark_dropped (line 163) | def mark_dropped(self, seq: int) -> None:
    method build_ack_slots (line 168) | def build_ack_slots(self, request_seq: int) -> list[bytes]:
    method _slot_for_seq (line 185) | def _slot_for_seq(self, seq: int) -> WifiUavFrameSlot:
    method _find_slot (line 188) | def _find_slot(self, seq: int) -> Optional[WifiUavFrameSlot]:

FILE: backend/utils/wifi_uav_jpeg.py
  function generate_dqt_segment (line 35) | def generate_dqt_segment(id: int, table: List[int], precision: int = 0) ...
  function generate_sof0_segment (line 74) | def generate_sof0_segment(width: int, height: int, num_components: int =...
  function generate_sos_segment (line 139) | def generate_sos_segment(
  function generate_jpeg_headers (line 183) | def generate_jpeg_headers(width: int, height: int, num_components: int =...

FILE: backend/utils/wifi_uav_packets.py
  function build_fragment_ack_bitmap (line 59) | def build_fragment_ack_bitmap(fragment_total: int, received_fragments: s...
  function build_ack_slot (line 73) | def build_ack_slot(seq: int, status: int, bitmap: bytes = b"") -> bytes:
  function build_native_ack_packet (line 84) | def build_native_ack_packet(

FILE: backend/utils/wifi_uav_variants.py
  class WifiUavCapabilities (line 21) | class WifiUavCapabilities:
  function wifi_uav_variant_from_drone_type (line 50) | def wifi_uav_variant_from_drone_type(drone_type: str) -> str:
  function resolve_wifi_uav_variant (line 60) | def resolve_wifi_uav_variant(drone_type: str) -> str:
  function get_wifi_uav_capabilities (line 80) | def get_wifi_uav_capabilities(variant: str) -> WifiUavCapabilities:
  function resolve_wifi_uav_capabilities (line 85) | def resolve_wifi_uav_capabilities(drone_type: str) -> WifiUavCapabilities:
  function map_wifi_uav_variant_from_ssid (line 90) | def map_wifi_uav_variant_from_ssid(ssid: str | None) -> str | None:
  function detect_active_wifi_ssid (line 103) | def detect_active_wifi_ssid() -> str | None:
  function _detect_ssid_windows (line 125) | def _detect_ssid_windows() -> str | None:
  function _detect_ssid_macos (line 147) | def _detect_ssid_macos() -> str | None:
  function _detect_ssid_linux (line 170) | def _detect_ssid_linux() -> str | None:

FILE: backend/video_client.py
  function main (line 13) | def main():

FILE: backend/views/base_video_view.py
  class BaseVideoView (line 3) | class BaseVideoView(ABC):
    method __init__ (line 6) | def __init__(self, frame_queue):
    method run (line 11) | def run(self):
    method stop (line 16) | def stop(self):

FILE: backend/views/cli_rc.py
  class CLIView (line 7) | class CLIView:
    method __init__ (line 10) | def __init__(self, flight_controller):
    method run (line 16) | def run(self):
    method _ui_loop (line 20) | def _ui_loop(self, stdscr):

FILE: backend/views/opencv_video_view.py
  class OpenCVVideoView (line 9) | class OpenCVVideoView(BaseVideoView):
    method __init__ (line 12) | def __init__(self, frame_queue, window_name="Drone Video"):
    method _wakeup_highgui (line 19) | def _wakeup_highgui(self):
    method run (line 29) | def run(self):
    method stop (line 89) | def stop(self):

FILE: backend/web_server.py
  class ConnectionManager (line 48) | class ConnectionManager:
    method __init__ (line 52) | def __init__(self):
    method connect (line 55) | async def connect(self, websocket: WebSocket):
    method disconnect (line 59) | def disconnect(self, websocket: WebSocket):
    method broadcast (line 63) | async def broadcast(self, message: str):
    method broadcast_bytes (line 72) | async def broadcast_bytes(self, message: bytes):
    method broadcast_json (line 81) | async def broadcast_json(self, obj: Any):
  function _control_capabilities_for_drone (line 101) | def _control_capabilities_for_drone(drone_type: str) -> dict[str, bool]:
  class FrameHub (line 148) | class FrameHub:
    method __init__ (line 155) | def __init__(self, per_client_queue_size: int = 2):
    method register (line 160) | async def register(self) -> asyncio.Queue:
    method unregister (line 166) | async def unregister(self, q: asyncio.Queue) -> None:
    method publish (line 170) | async def publish(self, frame: Optional[bytes]) -> None:
  function lifespan (line 193) | async def lifespan(app: FastAPI):
  function get_capabilities (line 431) | async def get_capabilities():
  function get_plugins (line 439) | async def get_plugins():
  function start_plugin (line 450) | async def start_plugin(name: str):
  function stop_plugin (line 475) | async def stop_plugin(name: str):
  function websocket_overlay_endpoint (line 494) | async def websocket_overlay_endpoint(websocket: WebSocket):
  function ws_endpoint (line 505) | async def ws_endpoint(websocket: WebSocket) -> None:
  function _frame_pump_worker (line 579) | def _frame_pump_worker(
  function mjpeg_stream (line 641) | async def mjpeg_stream():
  class OverlayBroadcaster (line 665) | class OverlayBroadcaster:
    method __init__ (line 666) | def __init__(self, q: queue.Queue, loop: asyncio.AbstractEventLoop):
    method start (line 672) | def start(self):
    method stop (line 676) | def stop(self):
    method _run (line 681) | def _run(self):

FILE: experimental/test_e88pro.py
  class VideoStreamThread (line 39) | class VideoStreamThread(QThread):
    method __init__ (line 47) | def __init__(self, rtsp_url):
    method run (line 55) | def run(self):
    method stop (line 114) | def stop(self):
    method reinitialize_stream (line 120) | def reinitialize_stream(self):
  class UDPListenerThread (line 130) | class UDPListenerThread(QThread):
    method __init__ (line 138) | def __init__(self, port, buffer_size):
    method run (line 146) | def run(self):
    method stop (line 180) | def stop(self):
  class RTSPViewerApp (line 192) | class RTSPViewerApp(QMainWindow):
    method __init__ (line 198) | def __init__(self):
    method send (line 269) | def send(self):
    method xor (line 298) | def xor(self,bs):
    method keyPressEvent (line 305) | def keyPressEvent(self, event):
    method update_image (line 366) | def update_image(self, pixmap):
    method update_udp_response (line 372) | def update_udp_response(self, data, addr):
    method show_error_message (line 377) | def show_error_message(self, message):
    method send_udp_command (line 390) | def send_udp_command(self, command):
    method on_up_button_clicked (line 399) | def on_up_button_clicked(self):
    method on_down_button_clicked (line 406) | def on_down_button_clicked(self):
    method closeEvent (line 413) | def closeEvent(self, event):

FILE: frontend/src/App.tsx
  function App (line 8) | function App() {

FILE: frontend/src/components/AxisIndicator.tsx
  type Props (line 1) | interface Props {
  function AxisIndicator (line 8) | function AxisIndicator({ x, y, size = 120, label }: Props) {

FILE: frontend/src/components/CommandButtons.tsx
  type CommandButtonsProps (line 3) | interface CommandButtonsProps {
  function CommandButtons (line 17) | function CommandButtons({

FILE: frontend/src/components/ControlSchemeToggle.tsx
  type Props (line 4) | interface Props {

FILE: frontend/src/components/ControlsOverlay.tsx
  type ControlsOverlayProps (line 6) | interface ControlsOverlayProps {
  function ControlsOverlay (line 17) | function ControlsOverlay({

FILE: frontend/src/components/DrawingOverlay.tsx
  function DrawingOverlay (line 4) | function DrawingOverlay() {

FILE: frontend/src/components/PluginControls.tsx
  function PluginControls (line 3) | function PluginControls() {

FILE: frontend/src/components/SpeedControl.tsx
  type SpeedControlProps (line 3) | interface SpeedControlProps {
  constant TIERS (line 9) | const TIERS: { value: SpeedTier; label: string; help: string }[] = [
  function SpeedControl (line 15) | function SpeedControl({ enabled, value, onChange }: SpeedControlProps) {

FILE: frontend/src/components/StaticDrawingOverlay.tsx
  function StaticDrawingOverlay (line 8) | function StaticDrawingOverlay() {

FILE: frontend/src/components/VideoFeed.tsx
  function VideoFeed (line 1) | function VideoFeed({ src }: { src: string }) {

FILE: frontend/src/hooks/useControls.ts
  type ControlMode (line 5) | type ControlMode = "inc" | "abs" | "mouse";
  type Axes (line 7) | interface Axes {
  type CommandCapabilities (line 14) | interface CommandCapabilities {
  type SpeedTier (line 23) | type SpeedTier = 0 | 1 | 2;
  function useControls (line 26) | function useControls() {

FILE: frontend/src/hooks/useOverlays.ts
  constant OVERLAY_WS_URL (line 3) | const OVERLAY_WS_URL = 'ws://localhost:8000/ws/overlays';
  type OverlayObject (line 5) | interface OverlayObject {
  function useOverlays (line 11) | function useOverlays() {

FILE: frontend/src/hooks/usePlugins.ts
  constant API_BASE_URL (line 4) | const API_BASE_URL = 'http://localhost:8000';
  type PluginState (line 6) | interface PluginState {
  function usePlugins (line 11) | function usePlugins() {

FILE: frontend/src/hooks/useVideoSizing.ts
  constant ZERO_RECT (line 3) | const ZERO_RECT = { x: 0, y: 0, width: 0, height: 0, top: 0, left: 0, ri...
  function useVideoSizing (line 5) | function useVideoSizing() {

FILE: frontend/src/pages/TestPage.tsx
  function TestPage (line 4) | function TestPage() {
Condensed preview — 102 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (532K chars).
[
  {
    "path": ".gitignore",
    "chars": 1998,
    "preview": "# ------------------------\r\n# Python\r\n# ------------------------\r\n\r\n# Byte‐compiled / optimized / DLL files\r\n__pycache__"
  },
  {
    "path": "LICENSE",
    "chars": 11548,
    "preview": "                                 Apache License\r\n                           Version 2.0, January 2004\r\n                 "
  },
  {
    "path": "NOTICE",
    "chars": 700,
    "preview": "turbodrone — reverse-engineered control stack for cheap toy drones\r\nCopyright © 2025 Marshall Richards\r\n\r\nThis product i"
  },
  {
    "path": "README.md",
    "chars": 9494,
    "preview": "# Turbodrone\r\nReverse-engineered API and client for controlling some of the best-selling ~$50 \"toy\" drones on Amazon fro"
  },
  {
    "path": "backend/control/strategies.py",
    "chars": 1175,
    "preview": "from abc import ABC, abstractmethod\r\nfrom typing import Dict\r\n\r\nclass ControlStrategy(ABC):\r\n    \"\"\"Maps user intent → r"
  },
  {
    "path": "backend/main.py",
    "chars": 10644,
    "preview": "#!/usr/bin/env python3\r\nimport logging\r\nimport os\r\n\r\nfrom utils.logging_config import bootstrap_runtime, configure_loggi"
  },
  {
    "path": "backend/models/__init__.py",
    "chars": 66,
    "preview": "from .s2x_rc import S2xDroneModel\r\n\r\n__all__ = ['S2xDroneModel']\r\n"
  },
  {
    "path": "backend/models/base_rc.py",
    "chars": 7268,
    "preview": "from __future__ import annotations\r\n\r\nfrom abc import ABC, abstractmethod\r\nfrom typing import ClassVar, Dict, List, Unio"
  },
  {
    "path": "backend/models/base_video_model.py",
    "chars": 1144,
    "preview": "from abc import ABC, abstractmethod\r\nfrom typing import Optional\r\nfrom models.video_frame import VideoFrame\r\n\r\nclass Bas"
  },
  {
    "path": "backend/models/control_profile.py",
    "chars": 541,
    "preview": "from dataclasses import dataclass\r\n\r\n@dataclass(frozen=True)\r\nclass ControlProfile:\r\n    \"\"\"\r\n    Profile parameters exp"
  },
  {
    "path": "backend/models/cooingdv_rc.py",
    "chars": 4010,
    "preview": "\"\"\"\r\nRC Model for Cooingdv drones (RC UFO, KY UFO, E88 Pro, etc.)\r\n\r\nThese drones use the cooingdv publisher's mobile ap"
  },
  {
    "path": "backend/models/cooingdv_video_model.py",
    "chars": 2165,
    "preview": "\"\"\"\r\nVideo Model for Cooingdv drones.\r\n\r\nThis model is intentionally simple because cooingdv drones use RTSP streaming,\r"
  },
  {
    "path": "backend/models/debug_rc.py",
    "chars": 1650,
    "preview": "from __future__ import annotations\r\nimport logging\r\nfrom models.base_rc import BaseRCModel\r\nfrom models.stick_range impo"
  },
  {
    "path": "backend/models/s2x_rc.py",
    "chars": 2531,
    "preview": "# S20 & S29 Controller model\n\nfrom models.base_rc import BaseRCModel\nfrom models.control_profile import ControlProfile\nf"
  },
  {
    "path": "backend/models/s2x_video_model.py",
    "chars": 3324,
    "preview": "from typing import Dict, Optional\n\nfrom models.video_frame import VideoFrame\nfrom models.base_video_model import BaseVid"
  },
  {
    "path": "backend/models/stick_range.py",
    "chars": 224,
    "preview": "from dataclasses import dataclass\r\n\r\n@dataclass(frozen=True)\r\nclass StickRange:\r\n    \"\"\"Drone-specific raw protocol limi"
  },
  {
    "path": "backend/models/video_frame.py",
    "chars": 578,
    "preview": "import time\r\n\r\nclass VideoFrame:\r\n    \"\"\"Model representing a video frame from the drone\"\"\"\r\n    \r\n    def __init__(self"
  },
  {
    "path": "backend/models/wifi_cam_rc.py",
    "chars": 2338,
    "preview": "\"\"\"\r\nRC model for WiFi_CAM native UDP drones.\r\n\r\nThe stock app uses the same centered 0x80 stick semantics as the Cooing"
  },
  {
    "path": "backend/models/wifi_uav_rc.py",
    "chars": 3580,
    "preview": "from __future__ import annotations\r\n\r\nfrom models.base_rc import BaseRCModel\r\nfrom models.control_profile import Control"
  },
  {
    "path": "backend/models/wifi_uav_video_model.py",
    "chars": 3095,
    "preview": "from collections import defaultdict\r\nfrom typing import Dict, Optional\r\n\r\nfrom models.video_frame import VideoFrame\r\nfro"
  },
  {
    "path": "backend/plugins/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "backend/plugins/base.py",
    "chars": 1842,
    "preview": "from abc import ABC, abstractmethod\r\nfrom typing import Iterator\r\nfrom services.flight_controller import FlightControlle"
  },
  {
    "path": "backend/plugins/follow/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "backend/plugins/follow/follow_controller.py",
    "chars": 2143,
    "preview": "class FollowController:\r\n    \"\"\"\r\n    Calculates yaw/pitch commands to keep a target centered and at a stable distance.\r"
  },
  {
    "path": "backend/plugins/follow/follow_plugin.py",
    "chars": 6583,
    "preview": "import threading\r\nimport cv2\r\nimport numpy as np\r\nfrom ultralytics import YOLO\r\nimport time\r\nimport os\r\nimport logging\r\n"
  },
  {
    "path": "backend/plugins/follow/test_yolo.py",
    "chars": 1666,
    "preview": "import cv2\r\nfrom ultralytics import YOLO\r\n\r\ndef main():\r\n    # Load the YOLOv10 model\r\n    model = YOLO(\"yolov10n.pt\")\r\n"
  },
  {
    "path": "backend/plugins/manager.py",
    "chars": 4764,
    "preview": "import importlib\r\nimport inspect\r\nimport logging\r\nimport pkgutil\r\nimport queue\r\nimport threading\r\nfrom typing import Dic"
  },
  {
    "path": "backend/plugins/test_yaw_plugin.py",
    "chars": 3336,
    "preview": "import threading\nimport time\nimport os\n\nfrom .base import Plugin\nfrom control.strategies import DirectStrategy\n\n\nclass T"
  },
  {
    "path": "backend/protocols/__init__.py",
    "chars": 97,
    "preview": "from .s2x_rc_protocol_adapter import S2xRCProtocolAdapter\r\n\r\n__all__ = ['S2xRCProtocolAdapter']\r\n"
  },
  {
    "path": "backend/protocols/base_protocol_adapter.py",
    "chars": 548,
    "preview": "from abc import ABC, abstractmethod\r\n\r\nclass BaseProtocolAdapter(ABC):\r\n    \"\"\"Base abstract class for drone protocol ad"
  },
  {
    "path": "backend/protocols/base_video_protocol.py",
    "chars": 2468,
    "preview": "from abc import ABC, abstractmethod\r\nimport socket\r\nimport threading\r\nfrom typing import Optional\r\n\r\nfrom models.video_f"
  },
  {
    "path": "backend/protocols/cooingdv_jieli_rc_protocol_adapter.py",
    "chars": 6894,
    "preview": "\"\"\"\r\nRC protocol adapter for the CooingDV/Jieli CTP backend.\r\n\r\nThis is the backend used by KY FPV's Jieli DeviceClient "
  },
  {
    "path": "backend/protocols/cooingdv_jieli_video_protocol.py",
    "chars": 9530,
    "preview": "\"\"\"\r\nFirst-pass video adapter for the CooingDV/Jieli backend.\r\n\r\nKY FPV's Jieli path requests video with CTP JSON comman"
  },
  {
    "path": "backend/protocols/cooingdv_rc_protocol_adapter.py",
    "chars": 17427,
    "preview": "\"\"\"\r\nRC Protocol Adapter for CooingDV drones.\r\n\r\nThe CooingDV apps share a UDP control plane on port 7099, but they do n"
  },
  {
    "path": "backend/protocols/cooingdv_video_protocol.py",
    "chars": 11129,
    "preview": "\"\"\"\r\nVideo Protocol Adapter for Cooingdv drones.\r\n\r\nUses RTSP streaming instead of custom UDP protocol - much simpler th"
  },
  {
    "path": "backend/protocols/debug_rc_protocol_adapter.py",
    "chars": 372,
    "preview": "import logging\r\n\r\nlog = logging.getLogger(__name__)\r\n\r\nclass DebugRcProtocolAdapter:\r\n    \"\"\"Dummy RC protocol adapter f"
  },
  {
    "path": "backend/protocols/debug_video_protocol.py",
    "chars": 3553,
    "preview": "import cv2\r\nimport logging\r\nimport time\r\nimport threading\r\nimport queue\r\nfrom typing import Optional, List\r\n\r\nfrom model"
  },
  {
    "path": "backend/protocols/no_video_protocol.py",
    "chars": 1639,
    "preview": "import logging\r\nimport queue\r\nimport threading\r\nimport time\r\nfrom typing import Optional\r\n\r\nfrom models.video_frame impo"
  },
  {
    "path": "backend/protocols/s2x_rc_protocol_adapter.py",
    "chars": 5007,
    "preview": "from protocols.base_protocol_adapter import BaseProtocolAdapter\nimport socket\n\nclass S2xRCProtocolAdapter(BaseProtocolAd"
  },
  {
    "path": "backend/protocols/s2x_video_protocol.py",
    "chars": 7473,
    "preview": "import ipaddress\nimport socket\nimport threading\nimport queue\nfrom typing import Optional, List\n\nfrom models.s2x_video_mo"
  },
  {
    "path": "backend/protocols/wifi_cam_rc_protocol_adapter.py",
    "chars": 6882,
    "preview": "\"\"\"RC protocol adapter for WiFi_CAM native UDP drones.\"\"\"\r\n\r\nfrom __future__ import annotations\r\n\r\nimport logging\r\nimpor"
  },
  {
    "path": "backend/protocols/wifi_cam_video_protocol.py",
    "chars": 8087,
    "preview": "\"\"\"Video protocol adapter for WiFi_CAM native UDP drones.\"\"\"\r\n\r\nfrom __future__ import annotations\r\n\r\nimport logging\r\nim"
  },
  {
    "path": "backend/protocols/wifi_uav_rc_protocol_adapter.py",
    "chars": 9630,
    "preview": "import socket\r\nfrom typing import Final, List, Optional\r\n\r\nfrom protocols.base_protocol_adapter import BaseProtocolAdapt"
  },
  {
    "path": "backend/protocols/wifi_uav_video_protocol.py",
    "chars": 18710,
    "preview": "import logging\nimport socket\nimport queue\nimport threading\nimport time\nfrom typing import Optional\n\nfrom models.video_fr"
  },
  {
    "path": "backend/receive_video.py",
    "chars": 11661,
    "preview": "import argparse\r\nimport ipaddress\r\nimport queue\r\nimport socket\r\nimport threading\r\nimport time\r\nimport os\r\nfrom datetime "
  },
  {
    "path": "backend/remote_control.py",
    "chars": 13152,
    "preview": "#!/usr/bin/env python3\r\nimport socket\r\nimport threading\r\nimport time\r\nimport argparse\r\nimport curses\r\n\r\nclass DroneContr"
  },
  {
    "path": "backend/requirements.txt",
    "chars": 279,
    "preview": "# Numerical array handling\r\nnumpy>=1.19.2\r\n\r\n# OpenCV bindings (for cv2)\r\nopencv-python>=4.5.1.48\r\n\r\n# FastAPI\r\nfastapi\r"
  },
  {
    "path": "backend/services/__init__.py",
    "chars": 85,
    "preview": "from .flight_controller import FlightController\r\n\r\n__all__ = ['FlightController']\r\n\r\n"
  },
  {
    "path": "backend/services/flight_controller.py",
    "chars": 5828,
    "preview": "import threading\r\nimport time\r\nimport os\r\nimport logging\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\nclass FlightControll"
  },
  {
    "path": "backend/services/video_receiver.py",
    "chars": 6325,
    "preview": "import queue\r\nimport socket\r\nimport threading\r\nimport time\r\nimport os\r\nimport logging\r\n\r\nfrom utils.dropping_queue impor"
  },
  {
    "path": "backend/tests/test_cooingdv_jieli_ctp.py",
    "chars": 1771,
    "preview": "import unittest\r\n\r\nfrom utils.cooingdv_jieli_ctp import build_ctp_packet, parse_ctp_packet\r\nfrom protocols.cooingdv_jiel"
  },
  {
    "path": "backend/tests/test_s2x_protocol.py",
    "chars": 1980,
    "preview": "import unittest\n\nfrom models.s2x_rc import S2xDroneModel\nfrom protocols.s2x_rc_protocol_adapter import S2xRCProtocolAdap"
  },
  {
    "path": "backend/tests/test_s2x_video_protocol.py",
    "chars": 1968,
    "preview": "import unittest\n\nfrom models.s2x_video_model import S2xVideoModel\nfrom protocols.s2x_video_protocol import S2xVideoProto"
  },
  {
    "path": "backend/tests/test_wifi_cam_protocol.py",
    "chars": 4204,
    "preview": "import unittest\r\n\r\nfrom models.wifi_cam_rc import WifiCamRcModel\r\nfrom protocols.wifi_cam_rc_protocol_adapter import Wif"
  },
  {
    "path": "backend/utils/cooingdv_jieli_ctp.py",
    "chars": 1691,
    "preview": "from __future__ import annotations\r\n\r\nimport json\r\nfrom typing import Mapping\r\n\r\n\r\nCTP_SIGNATURE = b\"CTP:\"\r\nOP_PUT = \"PU"
  },
  {
    "path": "backend/utils/dropping_queue.py",
    "chars": 1593,
    "preview": "import queue\r\n\r\nclass DroppingQueue(queue.Queue):\r\n    \"\"\"\r\n    A queue that drops the oldest item when it is full.\r\n   "
  },
  {
    "path": "backend/utils/logging_config.py",
    "chars": 1998,
    "preview": "from __future__ import annotations\n\nimport logging\nimport os\n\nimport dotenv\n\n\n_BOOTSTRAPPED = False\n_CONFIGURED = False\n"
  },
  {
    "path": "backend/utils/wifi_uav_ack_state.py",
    "chars": 6110,
    "preview": "from __future__ import annotations\r\n\r\nfrom dataclasses import dataclass, field\r\nfrom collections import deque\r\nfrom typi"
  },
  {
    "path": "backend/utils/wifi_uav_jpeg.py",
    "chars": 6866,
    "preview": "from typing import List\r\n\r\n# Start Of Image\r\nSOI = bytearray(b\"\\xff\\xd8\")\r\n# End Of Image\r\nEOI = bytearray(b\"\\xff\\xd9\")\r"
  },
  {
    "path": "backend/utils/wifi_uav_packets.py",
    "chars": 4025,
    "preview": "# Start Video Feed\r\nSTART_STREAM = b\"\\xef\\x00\\x04\\x00\"\r\n\r\n# Unknown\r\nUNK_FRAME = b\"\\xef\\x20\\x06\\x00\\x01\\x65\"\r\n\r\n# Drone "
  },
  {
    "path": "backend/utils/wifi_uav_variants.py",
    "chars": 5894,
    "preview": "from __future__ import annotations\r\n\r\nimport os\r\nimport subprocess\r\nimport sys\r\nfrom dataclasses import dataclass\r\nfrom "
  },
  {
    "path": "backend/video_client.py",
    "chars": 3107,
    "preview": "#!/usr/bin/env python3\r\nimport argparse\r\nimport queue\r\nimport signal\r\nimport sys\r\nimport os\r\n\r\nfrom protocols.s2x_video_"
  },
  {
    "path": "backend/views/__init__.py",
    "chars": 56,
    "preview": "\r\nfrom .cli_rc import CLIView\r\n\r\n__all__ = ['CLIView']\r\n"
  },
  {
    "path": "backend/views/base_video_view.py",
    "chars": 427,
    "preview": "from abc import ABC, abstractmethod\r\n\r\nclass BaseVideoView(ABC):\r\n    \"\"\"Base abstract class for video display views\"\"\"\r"
  },
  {
    "path": "backend/views/cli_rc.py",
    "chars": 4515,
    "preview": "# Remote Control using curses cli interface\r\n\r\nimport curses\r\nimport time\r\nfrom services.flight_controller import Flight"
  },
  {
    "path": "backend/views/opencv_video_view.py",
    "chars": 3653,
    "preview": "import cv2\r\nimport numpy as np\r\nimport queue\r\nimport time\r\nimport sys\r\nimport ctypes\r\nfrom views.base_video_view import "
  },
  {
    "path": "backend/web_server.py",
    "chars": 27246,
    "preview": "import asyncio\nimport threading\nimport queue\nimport time\nfrom contextlib import asynccontextmanager\nfrom typing import A"
  },
  {
    "path": "docs/research/S2x.md",
    "chars": 7663,
    "preview": "# Research for the S2x drones (S20, S29, PL FPV)\n\n## Chipset\n\nThe S20 and S29 boards seem to use the [XR872AT](https://j"
  },
  {
    "path": "docs/research/cooingdv.md",
    "chars": 57884,
    "preview": "# CooingDV Protocol Research\r\n\r\nThis note documents the CooingDV-style drone protocol as implemented in\r\nTurboDrone and "
  },
  {
    "path": "docs/research/wifi_cam.md",
    "chars": 11668,
    "preview": "# WiFi_CAM Protocol Research\r\n\r\nThis note documents the decompiled `WiFi_CAM` Android app from\r\n`decompiled-wifi-cam-6.1"
  },
  {
    "path": "docs/research/wifi_uav.md",
    "chars": 43472,
    "preview": "# WiFi-UAV Protocol Research\n\nThis note captures findings from the decompiled WiFi-UAV Android app in\n`wifi-uav-app-deco"
  },
  {
    "path": "experimental/README.md",
    "chars": 416,
    "preview": "# Experimental\nThis directory contains early-stage support for drones that are not yet integrated into the main Turbodro"
  },
  {
    "path": "experimental/test_e88pro.py",
    "chars": 18557,
    "preview": "'''\nAuthor: jithinbp@gmail.com\nFor the E88pro drone which is under 10$\nFeatures: One touch takeoff, land, flip(all ways)"
  },
  {
    "path": "frontend/.gitignore",
    "chars": 253,
    "preview": "# Logs\nlogs\n*.log\nnpm-debug.log*\nyarn-debug.log*\nyarn-error.log*\npnpm-debug.log*\nlerna-debug.log*\n\nnode_modules\ndist\ndis"
  },
  {
    "path": "frontend/eslint.config.js",
    "chars": 734,
    "preview": "import js from '@eslint/js'\nimport globals from 'globals'\nimport reactHooks from 'eslint-plugin-react-hooks'\nimport reac"
  },
  {
    "path": "frontend/index.html",
    "chars": 363,
    "preview": "<!doctype html>\n<html lang=\"en\">\n  <head>\n    <meta charset=\"UTF-8\" />\n    <link rel=\"icon\" type=\"image/svg+xml\" href=\"/"
  },
  {
    "path": "frontend/package.json",
    "chars": 870,
    "preview": "{\n  \"name\": \"frontend\",\n  \"private\": true,\n  \"version\": \"0.0.0\",\n  \"type\": \"module\",\n  \"scripts\": {\n    \"dev\": \"vite\",\n "
  },
  {
    "path": "frontend/postcss.config.cjs",
    "chars": 189,
    "preview": " // postcss.config.cjs  (new)\r\nmodule.exports = {\r\n  plugins: {\r\n    '@tailwindcss/postcss': {},   // ← new v4 plugin\r\n "
  },
  {
    "path": "frontend/src/App.css",
    "chars": 606,
    "preview": "#root {\n  max-width: 1280px;\n  margin: 0 auto;\n  padding: 2rem;\n  text-align: center;\n}\n\n.logo {\n  height: 6em;\n  paddin"
  },
  {
    "path": "frontend/src/App.tsx",
    "chars": 1168,
    "preview": "import { useControls } from './hooks/useControls';\nimport { ControlSchemeToggle } from './components/ControlSchemeToggle"
  },
  {
    "path": "frontend/src/components/AxisIndicator.tsx",
    "chars": 1274,
    "preview": "interface Props {\r\n  x: number;           // -1 … +1 (left … right)\r\n  y: number;           // -1 … +1 (down … up)   – p"
  },
  {
    "path": "frontend/src/components/CommandButtons.tsx",
    "chars": 2522,
    "preview": "import type { CommandCapabilities } from \"../hooks/useControls\";\r\n\r\ninterface CommandButtonsProps {\r\n  droneType: string"
  },
  {
    "path": "frontend/src/components/ControlSchemeToggle.tsx",
    "chars": 2392,
    "preview": "import React from \"react\";\r\nimport type { ControlMode } from \"../hooks/useControls\";\r\n\r\ninterface Props {\r\n  mode: Contr"
  },
  {
    "path": "frontend/src/components/ControlsOverlay.tsx",
    "chars": 2371,
    "preview": "import AxisIndicator from \"./AxisIndicator\";\r\nimport type { Axes, CommandCapabilities, SpeedTier } from \"../hooks/useCon"
  },
  {
    "path": "frontend/src/components/DrawingOverlay.tsx",
    "chars": 1249,
    "preview": "import { useOverlays } from \"../hooks/useOverlays\";\r\nimport { useVideoSizing } from \"../hooks/useVideoSizing\";\r\n\r\nfuncti"
  },
  {
    "path": "frontend/src/components/PluginControls.tsx",
    "chars": 1946,
    "preview": "import { usePlugins } from '../hooks/usePlugins';\r\n\r\nexport function PluginControls() {\r\n  const { pluginsEnabled, avail"
  },
  {
    "path": "frontend/src/components/SpeedControl.tsx",
    "chars": 2186,
    "preview": "import type { SpeedTier } from \"../hooks/useControls\";\r\n\r\ninterface SpeedControlProps {\r\n  enabled: boolean;\r\n  value: S"
  },
  {
    "path": "frontend/src/components/StaticDrawingOverlay.tsx",
    "chars": 902,
    "preview": "import { useOverlays } from \"../hooks/useOverlays\";\r\n\r\n/**\r\n * A simple overlay that assumes its parent container is cor"
  },
  {
    "path": "frontend/src/components/VideoFeed.tsx",
    "chars": 206,
    "preview": "export function VideoFeed({ src }: { src: string }) {\r\n  return (\r\n    <img\r\n      src={src}\r\n      alt=\"Drone video fee"
  },
  {
    "path": "frontend/src/hooks/useControls.ts",
    "chars": 12849,
    "preview": "import { useCallback, useEffect, useRef, useState } from \"react\";\n\n/* ──────────────────────────────────────────────────"
  },
  {
    "path": "frontend/src/hooks/useOverlays.ts",
    "chars": 1436,
    "preview": "import { useState, useEffect, useRef } from 'react';\r\n\r\nconst OVERLAY_WS_URL = 'ws://localhost:8000/ws/overlays';\r\n\r\nexp"
  },
  {
    "path": "frontend/src/hooks/usePlugins.ts",
    "chars": 3059,
    "preview": "import { useState, useEffect, useCallback } from 'react';\r\n\r\n// Assuming your backend runs on port 8000\r\nconst API_BASE_"
  },
  {
    "path": "frontend/src/hooks/useVideoSizing.ts",
    "chars": 2230,
    "preview": "import { useState, useLayoutEffect, useCallback } from \"react\";\r\n\r\nconst ZERO_RECT = { x: 0, y: 0, width: 0, height: 0, "
  },
  {
    "path": "frontend/src/index.css",
    "chars": 1770,
    "preview": "@import \"tailwindcss\";\n\n/* tech-y heading font */\n@import url('https://fonts.googleapis.com/css2?family=Rajdhani:wght@50"
  },
  {
    "path": "frontend/src/main.tsx",
    "chars": 495,
    "preview": "import { StrictMode } from 'react'\nimport { createRoot } from 'react-dom/client'\nimport './index.css'\nimport App from '."
  },
  {
    "path": "frontend/src/pages/TestPage.tsx",
    "chars": 644,
    "preview": "import StaticDrawingOverlay from \"../components/StaticDrawingOverlay\";\r\nimport { VideoFeed } from \"../components/VideoFe"
  },
  {
    "path": "frontend/src/vite-env.d.ts",
    "chars": 38,
    "preview": "/// <reference types=\"vite/client\" />\n"
  },
  {
    "path": "frontend/tailwind.config.ts",
    "chars": 312,
    "preview": "import type { Config } from 'tailwindcss';\r\n\r\nexport default {\r\n  content: [\r\n    './index.html',\r\n    './src/**/*.{js,t"
  },
  {
    "path": "frontend/tsconfig.app.json",
    "chars": 702,
    "preview": "{\n  \"compilerOptions\": {\n    \"tsBuildInfoFile\": \"./node_modules/.tmp/tsconfig.app.tsbuildinfo\",\n    \"target\": \"ES2020\",\n"
  },
  {
    "path": "frontend/tsconfig.json",
    "chars": 119,
    "preview": "{\n  \"files\": [],\n  \"references\": [\n    { \"path\": \"./tsconfig.app.json\" },\n    { \"path\": \"./tsconfig.node.json\" }\n  ]\n}\n"
  },
  {
    "path": "frontend/tsconfig.node.json",
    "chars": 630,
    "preview": "{\n  \"compilerOptions\": {\n    \"tsBuildInfoFile\": \"./node_modules/.tmp/tsconfig.node.tsbuildinfo\",\n    \"target\": \"ES2022\","
  },
  {
    "path": "frontend/vite.config.ts",
    "chars": 161,
    "preview": "import { defineConfig } from 'vite'\nimport react from '@vitejs/plugin-react'\n\n// https://vite.dev/config/\nexport default"
  }
]

About this extraction

This page contains the full source code of the marshallrichards/turbodrone GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 102 files (484.0 KB), approximately 125.2k tokens, and a symbol index with 524 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!