Showing preview only (1,073K chars total). Download the full file or copy to clipboard to get everything.
Repository: jvdillon/netv
Branch: main
Commit: 9fde1364c4ff
Files: 67
Total size: 1.0 MB
Directory structure:
gitextract_lipmxb5x/
├── .dockerignore
├── .github/
│ └── workflows/
│ ├── ai-upscale.yml
│ ├── ci.yml
│ ├── ffmpeg-base.yml
│ └── release.yml
├── .gitignore
├── Dockerfile
├── Dockerfile.ai_upscale
├── Dockerfile.ffmpeg
├── LICENSE
├── README.md
├── __init__.py
├── auth.py
├── auth_test.py
├── cache.py
├── cache_test.py
├── docker-compose.yml
├── entrypoint-ai_upscale.sh
├── entrypoint.sh
├── epg.py
├── epg_test.py
├── ffmpeg_command.py
├── ffmpeg_command_test.py
├── ffmpeg_session.py
├── ffmpeg_session_test.py
├── m3u.py
├── m3u_test.py
├── main.py
├── main_test.py
├── pyproject.toml
├── static/
│ └── js/
│ ├── app.js
│ ├── favorites-grid.js
│ ├── player.js
│ ├── settings.js
│ └── virtual-guide.js
├── templates/
│ ├── base.html
│ ├── error.html
│ ├── guide.html
│ ├── login.html
│ ├── movie_detail.html
│ ├── player.html
│ ├── search.html
│ ├── series.html
│ ├── series_detail.html
│ ├── settings.html
│ ├── setup.html
│ └── vod.html
├── testing.py
├── tools/
│ ├── alignm3u.py
│ ├── export-tensorrt.py
│ ├── install-ai_upscale.sh
│ ├── install-ffmpeg.sh
│ ├── install-letsencrypt.sh
│ ├── install-netv.sh
│ ├── install-prereqs.sh
│ ├── patches/
│ │ ├── dnn_backend_tensorrt.cpp
│ │ ├── dnn_backend_torch.cpp
│ │ ├── dnn_cuda_kernels.cu
│ │ ├── dnn_cuda_kernels.h
│ │ └── vf_dnn_processing.c
│ ├── uninstall-netv.sh
│ ├── xtream2m3u.py
│ └── zap2xml.py
├── util.py
├── util_test.py
├── xtream.py
└── xtream_test.py
================================================
FILE CONTENTS
================================================
================================================
FILE: .dockerignore
================================================
# Git
.git/
.gitignore
# Python
__pycache__/
*.py[cod]
.venv/
.ruff_cache/
.pytest_cache/
uv.lock
# App data (user-specific)
cache/
.cache/
settings.json
*.pem
# Tools (not needed in container, except specific scripts)
tools/
!tools/install-ffmpeg.sh
!tools/patches/
!tools/install-ai_upscale.sh
!tools/export-tensorrt.py
# Tests
*_test.py
conftest.py
# Docs
*.md
!README.md
screenshots/
LICENSE
# Docker
Dockerfile
docker-compose.yml
.dockerignore
================================================
FILE: .github/workflows/ai-upscale.yml
================================================
name: AI Upscale Image
on:
workflow_dispatch: # Manual trigger
push:
branches: [main]
paths:
- "Dockerfile.ai_upscale"
- "entrypoint-ai_upscale.sh"
- ".github/workflows/ai-upscale.yml"
workflow_run:
# Also trigger after ffmpeg-base completes to pick up new ffmpeg image
workflows: ["FFmpeg Base Image"]
types: [completed]
branches: [main]
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}-ai-upscale
jobs:
build:
runs-on: ubuntu-latest
# Skip if triggered by failed ffmpeg workflow
if: ${{ github.event_name != 'workflow_run' || github.event.workflow_run.conclusion == 'success' }}
permissions:
contents: read
packages: write
strategy:
matrix:
include:
- nvidia: 'cuda:12.4'
base_image: 'ubuntu:22.04'
- nvidia: 'cuda:12.6'
base_image: 'ubuntu:24.04'
- nvidia: 'cuda:12.8'
base_image: 'ubuntu:24.04'
- nvidia: 'cuda:13.0'
base_image: 'ubuntu:24.04'
latest: true
steps:
- name: Free disk space
run: |
# Remove large unnecessary packages to free up space for torch/tensorrt
sudo rm -rf /usr/share/dotnet /usr/local/lib/android /opt/ghc /opt/hostedtoolcache
sudo apt-get clean
df -h
- uses: actions/checkout@v4
with:
ref: ${{ github.event.workflow_run.head_sha || github.sha }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to Container Registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Parse CUDA version
id: cuda
run: |
# Extract "12.4" from "cuda:12.4"
CUDA_VER="${{ matrix.nvidia }}"
CUDA_VER="${CUDA_VER#cuda:}"
echo "version=$CUDA_VER" >> $GITHUB_OUTPUT
- name: Extract metadata
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=raw,value=cuda${{ steps.cuda.outputs.version }}
type=raw,value=latest,enable=${{ matrix.latest == true }}
- name: Build and push
uses: docker/build-push-action@v6
with:
context: .
file: Dockerfile.ai_upscale
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
build-args: |
FFMPEG_IMAGE=ghcr.io/${{ github.repository }}-ffmpeg:cuda${{ steps.cuda.outputs.version }}
NVIDIA=${{ matrix.nvidia }}
BASE_IMAGE=${{ matrix.base_image }}
no-cache: true
- name: Verify image
run: |
# Free space: BuildKit uses separate storage, prune it before pulling
docker buildx prune -af || true
docker system prune -af || true
df -h
docker pull ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:cuda${{ steps.cuda.outputs.version }}
# Verify everything that should be enabled is actually linked
# Note: --entrypoint bypasses the default entrypoint which tries to build TensorRT engines (requires GPU)
docker run --rm --entrypoint sh ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:cuda${{ steps.cuda.outputs.version }} -c "
echo '=== Verifying AI Upscale build ==='
LDD_OUTPUT=\$(ldd /usr/local/bin/ffmpeg)
# Verify ffmpeg binaries exist
for bin in ffmpeg ffprobe ffplay; do
test -x /usr/local/bin/\$bin || { echo \"ERROR: \$bin not found\"; exit 1; }
echo \"OK: \$bin exists\"
done
# Verify non-NVIDIA dependencies are satisfied
# Note: All NVIDIA libraries (libnvinfer, libcuda, libcudart) are loaded via dlopen,
# so ffmpeg works even without NVIDIA GPU/drivers installed
MISSING=\$(echo \"\$LDD_OUTPUT\" | grep 'not found' | grep -v -E 'libnvinfer|libnvonnxparser|libcudart|libcuda' || true)
if [ -n \"\$MISSING\" ]; then
echo 'ERROR: Missing non-NVIDIA libraries:'
echo \"\$MISSING\"
exit 1
fi
echo 'OK: All non-NVIDIA dependencies satisfied'
# Verify ffmpeg has NO hard CUDA dependency (all CUDA/TensorRT loaded via dlopen)
if echo \"\$LDD_OUTPUT\" | grep -q 'libcudart'; then
echo 'ERROR: libcudart linked at compile time (should use dlopen)'
exit 1
fi
echo 'OK: No libcudart dependency (CUDA Driver API via dlopen)'
echo 'OK: libnvinfer loaded via dlopen (not in ldd output)'
# Verify Python AI packages installed (use pip show, not import - import needs CUDA runtime)
echo '=== Verifying Python packages ==='
pip3 show torch >/dev/null 2>&1 || { echo 'ERROR: torch not installed'; exit 1; }
echo 'OK: torch installed'
pip3 show onnx >/dev/null 2>&1 || { echo 'ERROR: onnx not installed'; exit 1; }
echo 'OK: onnx installed'
pip3 show tensorrt >/dev/null 2>&1 || { echo 'ERROR: tensorrt not installed'; exit 1; }
echo 'OK: tensorrt installed'
# Verify AI upscale scripts exist
echo '=== Verifying AI upscale scripts ==='
test -x /app/tools/install-ai_upscale.sh || { echo 'ERROR: install-ai_upscale.sh not found'; exit 1; }
echo 'OK: install-ai_upscale.sh exists'
test -f /app/tools/export-tensorrt.py || { echo 'ERROR: export-tensorrt.py not found'; exit 1; }
echo 'OK: export-tensorrt.py exists'
echo ''
echo '=== All verifications passed ==='
"
================================================
FILE: .github/workflows/ci.yml
================================================
name: CI
on:
workflow_dispatch: # Manual trigger
push:
branches: [main]
paths:
# Only trigger on files used by main Dockerfile
- "Dockerfile"
- "*.py"
- "pyproject.toml"
- "templates/**"
- "static/**"
- "entrypoint.sh"
- ".github/workflows/ci.yml"
pull_request:
branches: [main]
workflow_run:
# Trigger after ffmpeg-base completes to pick up new ffmpeg image
workflows: ["FFmpeg Base Image"]
types: [completed]
branches: [main]
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
FFMPEG_IMAGE: ghcr.io/${{ github.repository }}-ffmpeg:latest
jobs:
test:
runs-on: ubuntu-latest
# Skip if triggered by failed ffmpeg workflow
if: ${{ github.event_name != 'workflow_run' || github.event.workflow_run.conclusion == 'success' }}
steps:
- uses: actions/checkout@v4
with:
# For workflow_run, checkout the commit that triggered ffmpeg build
ref: ${{ github.event.workflow_run.head_sha || github.sha }}
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.11"
- name: Install uv
uses: astral-sh/setup-uv@v4
- name: Install dependencies
run: uv sync --group dev
- name: Lint with ruff
run: uv run ruff check .
- name: Type check with basedpyright
run: uv run basedpyright
- name: Run tests
run: uv run pytest
build:
runs-on: ubuntu-latest
needs: test
permissions:
contents: read
packages: write
steps:
- uses: actions/checkout@v4
with:
ref: ${{ github.event.workflow_run.head_sha || github.sha }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to Container Registry
if: github.event_name != 'pull_request'
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Verify base image exists
run: |
if ! docker manifest inspect ${{ env.FFMPEG_IMAGE }} > /dev/null 2>&1; then
echo "ERROR: Base image ${{ env.FFMPEG_IMAGE }} not found"
echo "Run the 'FFmpeg Base Image' workflow first"
exit 1
fi
echo "Base image verified: ${{ env.FFMPEG_IMAGE }}"
- name: Extract metadata
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=raw,value=latest,enable={{is_default_branch}}
type=ref,event=branch
type=sha,prefix=
- name: Build and push
uses: docker/build-push-action@v6
with:
context: .
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
build-args: |
FFMPEG_IMAGE=${{ env.FFMPEG_IMAGE }}
cache-from: type=gha
cache-to: type=gha,mode=max
================================================
FILE: .github/workflows/ffmpeg-base.yml
================================================
name: FFmpeg Base Image
on:
schedule:
# Build daily at 3 AM UTC
- cron: "0 3 * * *"
push:
branches: [main]
paths:
- "Dockerfile.ffmpeg"
- "tools/install-ffmpeg.sh"
- ".github/workflows/ffmpeg-base.yml"
workflow_dispatch:
inputs:
ffmpeg_version:
description: "FFmpeg version (e.g., 7.1 or snapshot)"
required: false
default: "snapshot"
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}-ffmpeg
jobs:
build:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
strategy:
matrix:
include:
- nvidia: 'cuda:12.4'
base_image: 'ubuntu:22.04'
- nvidia: 'cuda:12.6'
base_image: 'ubuntu:24.04'
- nvidia: 'cuda:12.8'
base_image: 'ubuntu:24.04'
- nvidia: 'cuda:13.0'
base_image: 'ubuntu:24.04'
steps:
- uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to Container Registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Parse CUDA version
id: cuda
run: |
# Extract "12.4" from "cuda:12.4"
CUDA_VER="${{ matrix.nvidia }}"
CUDA_VER="${CUDA_VER#cuda:}"
echo "version=$CUDA_VER" >> $GITHUB_OUTPUT
- name: Generate build date
id: date
run: |
echo "date=$(date -u +'%Y-%m-%d')" >> $GITHUB_OUTPUT
echo "datetime=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT
- name: Determine FFmpeg version
id: version
run: |
VERSION="${{ github.event.inputs.ffmpeg_version || 'snapshot' }}"
echo "version=$VERSION" >> $GITHUB_OUTPUT
if [ "$VERSION" = "snapshot" ]; then
echo "tag=${{ steps.date.outputs.date }}" >> $GITHUB_OUTPUT
else
echo "tag=$VERSION" >> $GITHUB_OUTPUT
fi
- name: Extract metadata
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=raw,value=cuda${{ steps.cuda.outputs.version }}
type=raw,value=latest,enable=${{ steps.cuda.outputs.version == '13.0' }}
type=raw,value=${{ steps.date.outputs.date }}-cuda${{ steps.cuda.outputs.version }},enable=${{ steps.version.outputs.version == 'snapshot' }}
- name: Build and push
uses: docker/build-push-action@v6
with:
context: .
file: Dockerfile.ffmpeg
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
build-args: |
BUILD_DATE=${{ steps.date.outputs.datetime }}
FFMPEG_VERSION=${{ steps.version.outputs.version }}
NVIDIA=${{ matrix.nvidia }}
FFMPEG_BASE_IMAGE=${{ matrix.base_image }}
no-cache: true
- name: Verify image
run: |
docker pull ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:cuda${{ steps.cuda.outputs.version }}
# Verify ffmpeg build - check binaries and shared library dependencies
# Note: Static libraries (libx264, libx265, etc.) are compiled into the binary
# and won't appear in ldd output. If they were missing, the build would have failed.
docker run --rm ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:cuda${{ steps.cuda.outputs.version }} sh -c "
echo '=== Verifying ffmpeg build ==='
# Verify all binaries exist
for bin in ffmpeg ffprobe ffplay; do
test -x /usr/local/bin/\$bin || { echo \"ERROR: \$bin not found\"; exit 1; }
echo \"OK: \$bin exists\"
done
# Get ldd output for shared library checks
LDD_OUTPUT=\$(ldd /usr/local/bin/ffmpeg)
# Verify non-NVIDIA shared dependencies are satisfied
MISSING=\$(echo \"\$LDD_OUTPUT\" | grep 'not found' | grep -v -E 'libnvinfer|libnvonnxparser|libcudart|libcuda' || true)
if [ -n \"\$MISSING\" ]; then
echo 'ERROR: Missing non-NVIDIA libraries:'
echo \"\$MISSING\"
exit 1
fi
echo 'OK: All non-NVIDIA shared dependencies satisfied'
# Verify NVIDIA libraries (all loaded via dlopen - no hard dependencies)
# - CUDA Driver API (libcuda): loaded via dlopen when TensorRT backend used
# - TensorRT (libnvinfer): loaded via dlopen when TensorRT backend used
echo '=== Verifying NVIDIA libraries ==='
# Verify ffmpeg has NO hard CUDA dependency
if echo \"\$LDD_OUTPUT\" | grep -q 'libcudart'; then
echo 'ERROR: libcudart linked at compile time (should use dlopen)'
exit 1
fi
echo 'OK: No libcudart dependency (uses CUDA Driver API via dlopen)'
ls /usr/lib/x86_64-linux-gnu/libnvinfer.so* >/dev/null 2>&1 || { echo 'ERROR: TensorRT (libnvinfer) not installed'; exit 1; }
echo 'OK: libnvinfer installed (loaded via dlopen)'
# Verify libva is linked (we build it as shared for runtime)
echo \"\$LDD_OUTPUT\" | grep -q libva || { echo 'ERROR: libva not linked'; exit 1; }
echo 'OK: libva linked'
echo ''
echo '=== All verifications passed ==='
"
================================================
FILE: .github/workflows/release.yml
================================================
name: Release
on:
push:
tags:
- "v*"
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
FFMPEG_IMAGE: ghcr.io/${{ github.repository }}-ffmpeg:latest
jobs:
release:
runs-on: ubuntu-latest
permissions:
contents: write
packages: write
steps:
- uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to Container Registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
type=raw,value=latest
- name: Build and push
uses: docker/build-push-action@v6
with:
context: .
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
build-args: |
FFMPEG_IMAGE=${{ env.FFMPEG_IMAGE }}
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Generate release notes
id: notes
run: |
echo "## Docker Image" >> notes.md
echo "" >> notes.md
echo "Pull the image:" >> notes.md
echo "\`\`\`bash" >> notes.md
echo "docker pull ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.ref_name }}" >> notes.md
echo "\`\`\`" >> notes.md
echo "" >> notes.md
echo "## What's Changed" >> notes.md
git log $(git describe --tags --abbrev=0 HEAD^ 2>/dev/null || git rev-list --max-parents=0 HEAD)..HEAD --pretty=format:"- %s" >> notes.md || true
- name: Create GitHub Release
uses: softprops/action-gh-release@v2
with:
body_path: notes.md
generate_release_notes: true
================================================
FILE: .gitignore
================================================
# Python
__pycache__/
*.py[cod]
.venv/
.ruff_cache/
.pytest_cache/
# Debugging
**/*.out
**/*.log
# UV
uv.lock
# App data
.cache/
cache/
# Certificates
*.pem
# Tools - user data and generated files
**/*.gz
**/*.json
**/*.m3u
**/*.xml
tools/.zap2xml/
================================================
FILE: Dockerfile
================================================
# netv application image
#
# Default build uses pre-built FFmpeg with full hardware support:
# docker compose build
#
# Alternative: use apt FFmpeg (fewer codecs, no NVENC/QSV):
# FFMPEG_IMAGE=ubuntu:24.04 docker compose build
#
# The optimized FFmpeg base image includes:
# - NVENC (NVIDIA hardware encoding)
# - VAAPI (Intel/AMD hardware encoding)
# - QSV/VPL (Intel QuickSync)
# - All major codecs (x264, x265, VP9, AV1, etc.)
ARG FFMPEG_IMAGE=ghcr.io/jvdillon/netv-ffmpeg:latest
FROM ${FFMPEG_IMAGE}
ENV DEBIAN_FRONTEND=noninteractive
# Install dependencies
# - If using apt ffmpeg (ubuntu base): install ffmpeg + python
# - If using compiled ffmpeg (netv-ffmpeg base): ffmpeg already present, just install python
# Note: The conditional must be evaluated in shell, not in Dockerfile syntax
RUN apt-get update && \
apt-get install -y --no-install-recommends \
gosu \
python3 \
python3-pip && \
# Conditionally install ffmpeg if not present from base image
if [ ! -x /usr/local/bin/ffmpeg ] && [ ! -x /usr/bin/ffmpeg ]; then \
apt-get install -y --no-install-recommends ffmpeg; \
fi && \
rm -rf /var/lib/apt/lists/*
# App setup
WORKDIR /app
# Copy application files with verification
COPY pyproject.toml README.md ./
COPY *.py ./
COPY templates/ templates/
COPY static/ static/
# Verify critical files exist
RUN test -f pyproject.toml || { echo "ERROR: pyproject.toml not found"; exit 1; }
# Install Python dependencies
# --ignore-installed: avoids "Cannot uninstall X, RECORD file not found" for apt packages
# --break-system-packages: required for PEP 668 (Ubuntu 24.04+), doesn't exist in pip 22.0 (Ubuntu 22.04)
# Using try-fallback approach for maximum compatibility
RUN if python3 -m pip install --help 2>&1 | grep -q -- '--break-system-packages'; then \
python3 -m pip install --no-cache-dir --ignore-installed --break-system-packages .; \
else \
python3 -m pip install --no-cache-dir --ignore-installed .; \
fi
# Runtime config
EXPOSE 8000
# Environment variables (see README for details)
ENV NETV_PORT=8000
ENV NETV_HTTPS=""
ENV LOG_LEVEL=INFO
# Create non-root user (entrypoint handles permissions and group membership)
RUN useradd -m netv
# Copy entrypoint and set permissions with validation
COPY entrypoint.sh /app/
RUN chmod +x /app/entrypoint.sh && \
test -x /app/entrypoint.sh || { echo "ERROR: entrypoint.sh not executable"; exit 1; }
# Healthcheck with improved error handling
# Note: start-period allows time for application startup
HEALTHCHECK --interval=30s --timeout=10s --start-period=10s --retries=3 \
CMD python3 -c "import urllib.request; r=urllib.request.urlopen('http://localhost:8000/', timeout=5); exit(0 if r.status==200 else 1)" 2>/dev/null || exit 1
ENTRYPOINT ["/app/entrypoint.sh"]
================================================
FILE: Dockerfile.ai_upscale
================================================
# netv with AI Upscale (TensorRT super-resolution)
#
# This image includes everything needed for AI upscaling:
# - FFmpeg with TensorRT DNN backend
# - Python + torch + tensorrt for building engines
# - Auto-builds TensorRT engines on first start (GPU-specific)
#
# REQUIREMENTS:
# - Docker BuildKit (DOCKER_BUILDKIT=1 or use docker buildx)
# - NVIDIA GPU with 8GB+ VRAM recommended
# - nvidia-container-toolkit installed on host
#
# Build:
# docker build -f Dockerfile.ai_upscale -t netv-ai-upscale .
#
# Run (engines are cached in volume):
# docker run --gpus all -v netv-models:/models -p 8000:8000 netv-ai-upscale
#
# Note: First start takes ~2-3 minutes to build TensorRT engines for your GPU.
# Subsequent starts are instant (engines cached in /models volume).
ARG FFMPEG_IMAGE=ghcr.io/jvdillon/netv-ffmpeg:latest
FROM ${FFMPEG_IMAGE}
# Build metadata (passed from workflow, used for documentation/debugging)
ARG NVIDIA=cuda:13.0
ARG BASE_IMAGE=ubuntu:24.04
# Store build info as labels
LABEL org.opencontainers.image.description="netv with AI Upscale (TensorRT)"
LABEL ai.netv.cuda="${NVIDIA}"
LABEL ai.netv.base="${BASE_IMAGE}"
ENV DEBIAN_FRONTEND=noninteractive
# Install dependencies
RUN apt-get update && apt-get install -y --no-install-recommends \
gosu \
python3 \
python3-pip \
&& rm -rf /var/lib/apt/lists/*
# App setup
WORKDIR /app
# Copy application files with verification
COPY pyproject.toml README.md ./
COPY *.py ./
COPY templates/ templates/
COPY static/ static/
COPY tools/ tools/
# Verify critical files exist
RUN test -f pyproject.toml || { echo "ERROR: pyproject.toml not found"; exit 1; } && \
test -f tools/export-tensorrt.py || { echo "ERROR: export-tensorrt.py not found"; exit 1; }
# Install Python dependencies with version constraints for stability
# --ignore-installed: avoids "Cannot uninstall X, RECORD file not found" for apt packages
# --break-system-packages: required for PEP 668 (Ubuntu 24.04+), doesn't exist in pip 22.0 (Ubuntu 22.04)
# Version constraints: use compatible versions that work together
RUN if python3 -m pip install --help 2>&1 | grep -q -- '--break-system-packages'; then \
PIP_OPTS="--no-cache-dir --ignore-installed --break-system-packages"; \
else \
PIP_OPTS="--no-cache-dir --ignore-installed"; \
fi && \
# Install with minimum version constraints for compatibility
python3 -m pip install $PIP_OPTS \
'torch>=2.1.0' \
'onnx>=1.14.0' \
'tensorrt>=9.0' \
. && \
# Verify packages installed correctly
python3 -c "import torch; import onnx; import tensorrt; print(f'Installed: torch={torch.__version__}, onnx={onnx.__version__}, tensorrt={tensorrt.__version__}')" && \
# Remove Windows-only TensorRT libraries to save ~500MB
# Log what we're deleting for debugging
echo "Removing Windows-only TensorRT libraries..." && \
find /usr -name '*_win_*.so*' -type f 2>/dev/null | head -5 | xargs -I{} echo " Removing: {}" && \
find /usr -name '*_win_*.so*' -type f -delete 2>/dev/null || true && \
find /usr -name '*_win.so*' -type f -delete 2>/dev/null || true && \
echo "Cleanup complete"
# Runtime config
EXPOSE 8000
# Environment variables (see README for details)
ENV NETV_PORT=8000
ENV NETV_HTTPS=""
ENV LOG_LEVEL=INFO
ENV SR_ENGINE_DIR=/models
# Create non-root user
RUN useradd -m netv
# Copy entrypoint and set permissions with validation
COPY entrypoint-ai_upscale.sh /app/entrypoint.sh
RUN chmod +x /app/entrypoint.sh && \
test -x /app/entrypoint.sh || { echo "ERROR: entrypoint.sh not executable"; exit 1; }
# Create models directory with proper permissions (will be a volume mount point)
RUN mkdir -p /models && \
chown netv:netv /models && \
chmod 755 /models && \
test -d /models && test -w /models || { echo "ERROR: /models not writable"; exit 1; }
VOLUME /models
# Healthcheck with improved error handling
# Note: start-period=60s allows time for TensorRT engine compilation on first start
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
CMD python3 -c "import urllib.request; r=urllib.request.urlopen('http://localhost:8000/', timeout=5); exit(0 if r.status==200 else 1)" 2>/dev/null || exit 1
ENTRYPOINT ["/app/entrypoint.sh"]
================================================
FILE: Dockerfile.ffmpeg
================================================
# FFmpeg Docker image with hardware acceleration (NVENC, VAAPI, QSV, AMF)
#
# REQUIREMENTS:
# - Docker BuildKit (DOCKER_BUILDKIT=1 or use docker buildx)
# - 20GB+ disk space for build
#
# Uses install-ffmpeg.sh as single source of truth for build configuration.
#
# NVIDIA GPU compatibility:
# FFmpeg compiles CUDA code to PTX (parallel thread execution) assembly,
# NOT to GPU-specific SASS binary code. PTX is forward-compatible: the
# NVIDIA driver JIT-compiles PTX to the actual GPU at runtime.
#
# This means a binary built with -arch=sm_52 (Maxwell) runs on ALL GPUs
# from Maxwell through Blackwell and beyond. The only cost is JIT compilation
# on first run (cached by driver for subsequent runs).
#
# For Docker builds, we use NVCC_GENCODE=minimum (sm_52 for CUDA <13, sm_75 for CUDA 13+)
# to maximize GPU compatibility. For local builds, install-ffmpeg.sh defaults to
# NVCC_GENCODE=native for best performance on the build machine.
# =============================================================================
# Builder stage: compile FFmpeg using install-ffmpeg.sh
# =============================================================================
ARG FFMPEG_BASE_IMAGE=ubuntu:24.04
FROM ${FFMPEG_BASE_IMAGE} AS builder
# Build configuration - these are passed to install-ffmpeg.sh via environment
# Hardware acceleration
ARG ENABLE_NVIDIA_CUDA=1
ARG ENABLE_AMD_AMF=1
ARG ENABLE_TENSORRT=1
ARG ENABLE_LIBTORCH=0
ARG LIBTORCH_VERSION=2.5.0
ARG LIBTORCH_VARIANT=cu124
# Library builds
ARG BUILD_LIBPLACEBO=1
ARG LIBPLACEBO_GIT_REF=
ARG BUILD_LIBX265=1
ARG BUILD_LIBAOM=1
ARG BUILD_LIBWEBP=1
ARG BUILD_LIBVPL=1
ARG BUILD_LIBDAV1D=1
ARG BUILD_LIBSVTAV1=1
ARG BUILD_LIBVMAF=1
ARG BUILD_LIBVA=1
ARG BUILD_LIBJXL=1
ARG BUILD_LIBX264=1
ARG FFMPEG_VERSION=snapshot
ARG NVIDIA=cuda:12.8
ARG NVCC_GENCODE=minimum
ENV DEBIAN_FRONTEND=noninteractive
# Override install-ffmpeg.sh paths for container
ENV SRC_DIR=/src
ENV BUILD_DIR=/opt/ffmpeg_build
ENV BIN_DIR=/opt/bin
ENV LIB_DIR=/opt/lib
# Pass build args to script via env
ENV ENABLE_NVIDIA_CUDA=${ENABLE_NVIDIA_CUDA}
ENV ENABLE_AMD_AMF=${ENABLE_AMD_AMF}
ENV ENABLE_TENSORRT=${ENABLE_TENSORRT}
ENV ENABLE_LIBTORCH=${ENABLE_LIBTORCH}
ENV LIBTORCH_VERSION=${LIBTORCH_VERSION}
ENV LIBTORCH_VARIANT=${LIBTORCH_VARIANT}
ENV BUILD_LIBPLACEBO=${BUILD_LIBPLACEBO}
ENV LIBPLACEBO_GIT_REF=${LIBPLACEBO_GIT_REF}
ENV BUILD_LIBX265=${BUILD_LIBX265}
ENV BUILD_LIBAOM=${BUILD_LIBAOM}
ENV BUILD_LIBWEBP=${BUILD_LIBWEBP}
ENV BUILD_LIBVPL=${BUILD_LIBVPL}
ENV BUILD_LIBDAV1D=${BUILD_LIBDAV1D}
ENV BUILD_LIBSVTAV1=${BUILD_LIBSVTAV1}
ENV BUILD_LIBVMAF=${BUILD_LIBVMAF}
ENV BUILD_LIBVA=${BUILD_LIBVA}
ENV BUILD_LIBJXL=${BUILD_LIBJXL}
ENV BUILD_LIBX264=${BUILD_LIBX264}
ENV FFMPEG_VERSION=${FFMPEG_VERSION}
ENV NVIDIA=${NVIDIA}
ENV NVCC_GENCODE=${NVCC_GENCODE}
# Pre-configure timezone to prevent tzdata interactive prompts
ENV DEBIAN_FRONTEND=noninteractive
ENV TZ=Etc/UTC
RUN ln -fs /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
# Install sudo (install-ffmpeg.sh uses it, works as no-op when root)
RUN apt-get update && apt-get install -y sudo
# Copy build script and patches
COPY tools/install-ffmpeg.sh /tmp/
COPY tools/patches /tmp/patches
# Run the build script
# Extract CUDA version from NVIDIA arg (e.g., "cuda:13.0" -> "13.0")
# Note: echo BUILD_ARGS forces cache invalidation when any build arg changes
RUN echo "BUILD_ARGS: NVIDIA=${NVIDIA} FFMPEG_VERSION=${FFMPEG_VERSION} NVCC_GENCODE=${NVCC_GENCODE} \
ENABLE_NVIDIA_CUDA=${ENABLE_NVIDIA_CUDA} ENABLE_AMD_AMF=${ENABLE_AMD_AMF} ENABLE_TENSORRT=${ENABLE_TENSORRT} \
ENABLE_LIBTORCH=${ENABLE_LIBTORCH} BUILD_LIBPLACEBO=${BUILD_LIBPLACEBO} BUILD_LIBX265=${BUILD_LIBX265} \
BUILD_LIBAOM=${BUILD_LIBAOM} BUILD_LIBWEBP=${BUILD_LIBWEBP} BUILD_LIBVPL=${BUILD_LIBVPL} \
BUILD_LIBDAV1D=${BUILD_LIBDAV1D} BUILD_LIBSVTAV1=${BUILD_LIBSVTAV1} BUILD_LIBVMAF=${BUILD_LIBVMAF} \
BUILD_LIBVA=${BUILD_LIBVA} BUILD_LIBJXL=${BUILD_LIBJXL} BUILD_LIBX264=${BUILD_LIBX264}" && \
chmod +x /tmp/install-ffmpeg.sh && \
CUDA_VERSION="${NVIDIA#cuda:}" && \
export CUDA_VERSION && \
/tmp/install-ffmpeg.sh
# =============================================================================
# Runtime stage: minimal image with just FFmpeg binaries
# =============================================================================
ARG FFMPEG_BASE_IMAGE=ubuntu:24.04
FROM ${FFMPEG_BASE_IMAGE}
ARG BUILD_DATE
ARG FFMPEG_VERSION=snapshot
ARG ENABLE_NVIDIA_CUDA=1
ARG ENABLE_AMD_AMF=1
ARG ENABLE_LIBTORCH=0
ARG BUILD_LIBPLACEBO=1
ARG BUILD_LIBX265=1
ARG BUILD_LIBAOM=1
ARG BUILD_LIBWEBP=1
ARG BUILD_LIBVPL=1
ARG BUILD_LIBDAV1D=1
ARG BUILD_LIBSVTAV1=1
ARG BUILD_LIBVMAF=1
ARG BUILD_LIBVA=1
ARG BUILD_LIBJXL=1
ARG BUILD_LIBX264=1
LABEL org.opencontainers.image.created="${BUILD_DATE}"
LABEL org.opencontainers.image.title="netv-ffmpeg"
LABEL org.opencontainers.image.description="FFmpeg with NVENC, VAAPI, QSV, AMF hardware acceleration"
LABEL org.opencontainers.image.version="${FFMPEG_VERSION}"
ENV DEBIAN_FRONTEND=noninteractive
# Add Intel graphics PPA for newer Xe driver support (Ubuntu 24.04+ only)
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
. /etc/os-release && \
if [ "$VERSION_CODENAME" != "jammy" ]; then \
apt-get update && apt-get install -y --no-install-recommends software-properties-common && \
add-apt-repository -y ppa:kobuk-team/intel-graphics && \
apt-get update; \
fi
# Add NVIDIA repo and install TensorRT runtime (for TensorRT DNN backend)
# Note: TensorRT is loaded via dlopen, so ffmpeg works even if these aren't installed,
# but including them means the Docker image works out of the box with --gpus all
ARG ENABLE_TENSORRT=1
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
if [ "$ENABLE_TENSORRT" = "1" ]; then \
. /etc/os-release && \
UBUNTU_VER=$(echo "$VERSION_ID" | tr -d '.') && \
apt-get update && apt-get install -y --no-install-recommends ca-certificates curl && \
curl -fsSL "https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VER}/x86_64/cuda-keyring_1.1-1_all.deb" \
-o /tmp/cuda-keyring.deb && \
dpkg -i /tmp/cuda-keyring.deb && rm /tmp/cuda-keyring.deb && \
apt-get update && \
if [ "$VERSION_CODENAME" = "jammy" ]; then \
apt-get install -y --no-install-recommends libnvinfer8 libnvinfer-plugin8; \
else \
apt-get install -y --no-install-recommends libnvinfer10 libnvinfer-plugin10; \
fi && \
rm -rf /var/lib/apt/lists/*; \
fi
# Runtime libraries for FFmpeg
# Note: x265, libaom, libwebp, libvpl, libdav1d are statically linked when built from source
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
. /etc/os-release && \
if [ "$VERSION_CODENAME" = "jammy" ]; then \
LIBVPX=libvpx7; \
LIBSRT=libsrt1.4-openssl; \
LIBUNISTRING=libunistring2; \
LIBASOUND=libasound2; \
LIBSNDIO=libsndio7.0; \
else \
LIBVPX=libvpx9; \
LIBSRT=libsrt1.5-openssl; \
LIBUNISTRING=libunistring5; \
LIBASOUND=libasound2t64; \
LIBSNDIO=libsndio7.0; \
fi && \
apt-get update && apt-get install -y --no-install-recommends \
# Core codec libs
libass9 \
libbluray2 \
libfdk-aac2 \
libmp3lame0 \
libopus0 \
libvorbis0a \
libvorbisenc2 \
$LIBVPX \
# Text/font rendering
libfontconfig1 \
libfreetype6 \
libfribidi0 \
libharfbuzz0b \
# Audio/video processing
librubberband2 \
libsoxr0 \
libvidstab1.1 \
libzimg2 \
libnuma1 \
# Network/crypto
$LIBSRT \
libssl3 \
# X11/display
libxcb1 \
libxcb-shm0 \
libxcb-shape0 \
libxcb-xfixes0 \
libxv1 \
libx11-6 \
libxext6 \
# Hardware accel:
# - NVENC/CUDA: provided by nvidia-container-toolkit from host (no pkg needed)
# - VAAPI: intel-media-va-driver-non-free (Intel), mesa-va-drivers (AMD), libva from source (below)
# - OpenCL: ocl-icd-libopencl1 (ICD loader), backend from host (NVIDIA) or mesa-opencl-icd (AMD)
# - Vulkan: libvulkan1 (conditional below), driver from host
libvdpau1 \
intel-media-va-driver-non-free \
mesa-va-drivers \
ocl-icd-libopencl1 \
# Intel oneVPL/QSV runtime for Intel GPU hardware encoding (modern Intel CPUs)
libmfx-gen1.2 \
# Other deps
zlib1g \
$LIBUNISTRING \
liblzma5 \
liblzo2-2 \
$LIBASOUND \
libdrm2 \
$LIBSNDIO \
libsdl2-2.0-0 \
libpulse0
# Conditional runtime libs for apt-based packages (when not built from source)
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
. /etc/os-release && \
HWY_PKG="libhwy1t64" && \
if [ "$VERSION_CODENAME" = "jammy" ]; then HWY_PKG="libhwy0"; fi && \
APT_PKGS="" && \
[ "$BUILD_LIBX265" != "1" ] && APT_PKGS="$APT_PKGS libx265-199" ; \
[ "$BUILD_LIBAOM" != "1" ] && APT_PKGS="$APT_PKGS libaom3" ; \
[ "$BUILD_LIBWEBP" != "1" ] && APT_PKGS="$APT_PKGS libwebp7 libwebpmux3" ; \
[ "$BUILD_LIBVPL" != "1" ] && APT_PKGS="$APT_PKGS libvpl2" ; \
[ "$BUILD_LIBDAV1D" != "1" ] && APT_PKGS="$APT_PKGS libdav1d7" ; \
[ "$BUILD_LIBSVTAV1" != "1" ] && APT_PKGS="$APT_PKGS libsvtav1enc1d1" ; \
[ "$BUILD_LIBVMAF" != "1" ] && APT_PKGS="$APT_PKGS libvmaf3" ; \
[ "$BUILD_LIBPLACEBO" = "1" ] && APT_PKGS="$APT_PKGS libvulkan1" ; \
[ "$BUILD_LIBVA" != "1" ] && APT_PKGS="$APT_PKGS libva2 libva-drm2 libva-x11-2" ; \
[ "$BUILD_LIBJXL" = "1" ] && APT_PKGS="$APT_PKGS libbrotli1 $HWY_PKG" ; \
[ "$BUILD_LIBJXL" != "1" ] && APT_PKGS="$APT_PKGS libjxl0.7" ; \
[ "$BUILD_LIBX264" != "1" ] && APT_PKGS="$APT_PKGS libx264-164" ; \
if [ -n "$APT_PKGS" ]; then apt-get update && apt-get install -y --no-install-recommends $APT_PKGS; fi
# Tell libva where to find system drivers and which driver to use
# Our libva is built with prefix=/opt/ffmpeg_build but drivers are system-installed
# Default LIBVA_DRIVER_NAME=iHD (Intel iHD driver, supports Xe kernel driver, Gen8+)
# Override at runtime for other GPUs:
# - AMD: LIBVA_DRIVER_NAME=radeonsi
# - Older Intel: LIBVA_DRIVER_NAME=i965
# - Auto-detect: unset LIBVA_DRIVER_NAME (let libva auto-select)
ENV LIBVA_DRIVERS_PATH=/usr/lib/x86_64-linux-gnu/dri
# Note: Set to empty to allow auto-detection, or override at container runtime
ARG LIBVA_DRIVER_NAME_DEFAULT=iHD
ENV LIBVA_DRIVER_NAME=${LIBVA_DRIVER_NAME_DEFAULT}
# Copy FFmpeg binaries from builder and verify they exist
COPY --from=builder /opt/bin/ffmpeg /usr/local/bin/
COPY --from=builder /opt/bin/ffprobe /usr/local/bin/
COPY --from=builder /opt/bin/ffplay /usr/local/bin/
# Verify FFmpeg binaries are executable and have expected size (not empty)
RUN for bin in ffmpeg ffprobe ffplay; do \
if [ ! -x "/usr/local/bin/$bin" ]; then \
echo "ERROR: $bin not executable or not found"; exit 1; \
fi; \
SIZE=$(stat -c%s "/usr/local/bin/$bin" 2>/dev/null || echo 0); \
if [ "$SIZE" -lt 1000000 ]; then \
echo "ERROR: $bin seems too small (${SIZE} bytes), may be corrupt"; exit 1; \
fi; \
done && echo "FFmpeg binaries verified"
# Copy built libva if compiled from source (for Intel Xe kernel driver support)
# libva needs to be in /opt/lib to match the rpath embedded in ffmpeg binary
RUN --mount=type=bind,from=builder,source=/opt/lib,target=/tmp/ffmpeg_libs \
if [ "$BUILD_LIBVA" = "1" ] && [ -f /tmp/ffmpeg_libs/libva.so ]; then \
mkdir -p /opt/lib && \
cp -a /tmp/ffmpeg_libs/libva*.so* /opt/lib/ && \
echo "/opt/lib" > /etc/ld.so.conf.d/ffmpeg.conf && \
ldconfig; \
fi
# Copy VMAF model files if built from source (needed for -vf libvmaf filter)
RUN --mount=type=bind,from=builder,source=/opt/ffmpeg_build/share,target=/tmp/ffmpeg_share \
if [ "$BUILD_LIBVMAF" = "1" ] && [ -d /tmp/ffmpeg_share/libvmaf ]; then \
mkdir -p /usr/local/share && \
cp -a /tmp/ffmpeg_share/libvmaf /usr/local/share/; \
fi
# Copy LibTorch shared libraries if torch enabled (needed for DNN filters)
RUN --mount=type=bind,from=builder,source=/src,target=/tmp/src \
if [ "$ENABLE_LIBTORCH" = "1" ] && [ -d /tmp/src/libtorch/lib ]; then \
mkdir -p /opt/lib && \
cp -a /tmp/src/libtorch/lib/*.so* /opt/lib/ && \
echo "/opt/lib" > /etc/ld.so.conf.d/libtorch.conf && \
ldconfig; \
fi
# Verify all dependencies are satisfied (exclude NVIDIA libs - provided at runtime by nvidia-container-toolkit)
# NVIDIA libraries excluded: libnvinfer, libnvonnxparser, libcudart, libcuda, libnvcuvid, libnvrtc
RUN set -e && \
echo "=== Checking library dependencies ===" && \
LDD_OUTPUT=$(ldd /usr/local/bin/ffmpeg 2>&1) && \
ALL_MISSING=$(echo "$LDD_OUTPUT" | grep "not found" || true) && \
if [ -n "$ALL_MISSING" ]; then \
echo "Libraries reported as 'not found':" && \
echo "$ALL_MISSING" && \
# Filter out NVIDIA libraries (provided by nvidia-container-toolkit at runtime)
MISSING=$(echo "$ALL_MISSING" | grep -v -E "libnv|libcuda|libcublas|libcurand|libcufft" || true) && \
if [ -n "$MISSING" ]; then \
echo "ERROR: Non-NVIDIA libraries missing:" && \
echo "$MISSING" && \
exit 1; \
fi && \
echo "All missing libs are NVIDIA (provided at runtime)"; \
fi && \
echo "All FFmpeg dependencies satisfied"
# Capture ffmpeg capabilities (may fail if TensorRT enabled - NVIDIA libs only available at runtime)
RUN ffmpeg -version > /ffmpeg-version.txt && \
ffmpeg -hide_banner -encoders > /ffmpeg-encoders.txt && \
ffmpeg -hide_banner -decoders > /ffmpeg-decoders.txt && \
ffmpeg -hide_banner -filters > /ffmpeg-filters.txt || \
echo "Skipped (NVIDIA libs not available during build)"
CMD ["ffmpeg", "-version"]
================================================
FILE: LICENSE
================================================
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to the Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2024
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
FILE: README.md
================================================
# neTV
A minimal, self-hosted web interface for IPTV streams.





## Why This Exists
We built neTV because we couldn't find a clean, lightweight interface for
Xtream IPTV services. Existing solutions were either bloated media centers or
clunky apps that didn't work well across devices.
**neTV is intentionally minimal.** It does one thing: play your IPTV streams
with a clean UI that works on desktop, tablet, mobile, and Chromecast.
We also prioritize **keyboard navigation** throughout (though still rough
around the edges). The entire app is theoretically usable with just arrow keys,
Enter, and Escape -- perfect for media PCs, HTPCs, or anyone who prefers
keeping hands on the keyboard (like me).
### Disclaimer
This is a **player only** -- it does not provide any content. You must have your
own IPTV subscription that provides Xtream Codes API access or M3U playlists.
Users are responsible for ensuring they have legal rights to access any content
through their IPTV providers.
## Features
- **Live TV** with EPG grid guide
- **Movies & Series** with metadata, seasons, episodes
- **AI Upscale** - Real-time 4x upscaling via TensorRT (720p → 4K @ 85fps)
- **Chromecast** support (HTTPS required)
- **Closed captions** with style customization
- **Search** across all content (supports regex)
- **Favorites** with drag-and-drop ordering
- **Resume playback** for VOD content
- **Responsive** - works on desktop, tablet, mobile
- **Keyboard navigation** - 10-foot UI friendly
### Transcoding
Extensively optimized for minimal latency and CPU usage:
- **Smart passthrough** - h264+aac streams remux without re-encoding (zero CPU)
- **Full GPU pipeline** - NVDEC decode → NVENC/VAAPI encode, CPU stays idle
- **Probe caching** - Streams probed once, series episodes share probe data
- **Interlace detection** - Auto-deinterlaces OTA/cable, skips progressive
- **Smart seeking** - Reuses segments for backward seeks, only transcodes gaps
- **Session recovery** - VOD sessions survive restarts, resume where you left off
- **HTTPS passthrough** - Auto-proxies HTTP streams when behind HTTPS
### 4K AI Upscaling
Real-time 4x upscaling using Real-ESRGAN via TensorRT. Transforms 480p/720p/1080p
content to pristine 4K at 85fps (RTX 5090). Perfect for older shows and low-bitrate streams.
| Before (720p source) | After (4K AI Upscale) |
|---|---|
|  |  |
|  |  |
|  |  |
Requires Nvidia GPU and the [AI Upscale Docker image](#ai-upscale-image-nvidia-gpu).
The Settings page shows AI Upscale options when TensorRT engines are available.
## Alternatives
If you want a full-featured media center, you might be happier with:
- **[Jellyfin](https://jellyfin.org/)** - Free, open-source media system
- **[Emby](https://emby.media/)** - Media server with IPTV support
- **[Plex](https://plex.tv/)** - Popular media platform with live TV
These are excellent, mature projects with large communities. neTV exists for
users who find them overkill and just want a simple IPTV player.
| | neTV | [nodecast-tv] | [Jellyfin] | [Emby] | [Plex] |
|---|---|---|---|---|---|
| **Focus** | IPTV | IPTV | General media | General media | General media |
| **Xtream Codes** | ✅ | ✅ | ❌ | ❌ | ❌ |
| **M3U playlists** | ✅ | ✅ | ✅ | ✅ | ⚠️ Via [xTeVe] |
| **XMLTV EPG** | ✅ | ⚠️ Via provider | ✅ | ✅ | ✅ |
| **Local media** | ❌ | ❌ | ✅ | ✅ | ✅ |
| **Live TV** | ✅ | ✅ | ✅ | ✅ | ✅ |
| **VOD (movies/series)** | ✅ | ✅ | ✅ | ✅ | ✅ |
| **DVR recording** | ❌ | ❌ | ✅ | ✅ | ⚠️ Pass |
| **Catchup/timeshift** | ❌ | ❌ | ⚠️ Plugin | ⚠️ Plugin | ❌ |
| **Live rewind buffer** | ✅ | ❌ | ⚠️ Via DVR | ⚠️ Via DVR | ⚠️ Via DVR |
| **Resume playback** | ✅ | ❌ | ✅ | ✅ | ✅ |
| **Multi-user** | ✅ | ✅ | ✅ | ✅ | ✅ |
| **User roles** | ⚠️ Admin/viewer | ⚠️ Admin/viewer | ✅ Granular | ✅ Granular | ✅ Granular |
| **Stream limits** | ✅ Per-user, per-source | ❌ | ⚠️ Per-user | ⚠️ Per-user | ⚠️ Per-user |
| **Library permissions** | N/A | N/A | ✅ Per-library | ✅ Per-library | ✅ Per-library |
| **Favorites** | ✅ Drag-and-drop | ✅ | ✅ | ✅ | ✅ |
| **Search** | ✅ Regex | ✅ Basic | ✅ Basic | ✅ Basic | ✅ Basic |
| **Video transcoding** | ✅ | ❌ | ✅ | ✅ | ✅ |
| **Audio transcoding** | ✅ | ✅ | ✅ | ✅ | ✅ |
| **Transcode only if needed** | ✅ Auto mode | ❌ | ⚠️ Per-library | ⚠️ Per-library | ⚠️ Per-client |
| **NVENC** | ✅ | ❌ | ✅ | ✅ | ⚠️ Pass |
| **VAAPI** | ✅ | ❌ | ✅ | ✅ | ⚠️ Pass |
| **QSV** | ✅ | ❌ | ✅ | ✅ | ⚠️ Pass |
| **AI Upscale (4x)** | ✅ TensorRT | ❌ | ⚠️ Plugin | ❌ | ❌ |
| **Software fallback** | ✅ | ❌ Browser | ✅ | ✅ | ✅ |
| **Legacy GPU** | ✅ Any | ❌ No (browser) | ✅ Any | ✅ Any | ⚠️ Driver 450+ |
| **ffprobe caching** | ✅ Dynamic | ❌ None | ⚠️ Offline | ⚠️ Offline | ⚠️ Offline |
| **Episode probe reuse** | ✅ MRU | ❌ No | ⚠️ Per-file | ⚠️ Per-file | ⚠️ Per-file |
| **Session recovery** | ✅ Yes | ❌ No | ⚠️ Via DB | ⚠️ Via DB | ⚠️ Via DB |
| **Auto deinterlace** | ✅ Yes | ❌ No | ⚠️ Manual | ⚠️ Manual | ⚠️ Manual |
| **Subtitles** | ⚠️ WebVTT | ❌ No | ✅ Full | ✅ Full | ✅ Full |
| **Chromecast** | ✅ Yes | ❌ No | ✅ Yes | ✅ Yes | ✅ Yes |
| **Keyboard/remote** | ✅ 10-foot UI | ⚠️ Basic | ✅ 10-foot UI | ✅ 10-foot UI | ✅ 10-foot UI |
| **Mobile apps** | ⚠️ Web only | ⚠️ Web only | ✅ Native | ✅ Native | ✅ Native |
| **Subscription** | ✅ Free | ✅ Free | ✅ Free | ⚠️ Premiere | ⚠️ Pass |
| **Setup complexity** | ✅ Minimal | ✅ Minimal | ⚠️ Moderate | ⚠️ Moderate | ⚠️ Moderate |
| **License** | Apache 2.0 | GPL v3 | GPL v2 | GPL v2 | Proprietary |
| **Stack** | Python, FFmpeg | Node.js | .NET, FFmpeg | .NET, FFmpeg | Proprietary |
*Corrections welcome — [open an issue](https://github.com/jvdillon/netv/issues).*
[nodecast-tv]: https://github.com/technomancer702/nodecast-tv
[Jellyfin]: https://jellyfin.org
[Emby]: https://emby.media
[Plex]: https://plex.tv
[xTeVe]: https://github.com/xteve-project/xTeVe
## Installation
### Docker
Create a `docker-compose.yml`:
```yaml
services:
netv:
image: ghcr.io/jvdillon/netv:latest
ports:
- "8000:8000"
volumes:
- ./cache:/app/cache
- /etc/localtime:/etc/localtime:ro
devices:
- /dev/dri:/dev/dri # for hardware transcoding (remove if no GPU)
restart: unless-stopped
```
Then run:
```bash
docker compose up -d
```
Open http://localhost:8000. To update: `docker compose pull && docker compose up -d`
#### Optional: Nonfree (proprietary) FFMPEG optimized for Nvidia or AMD and/or Intel GPU
We provide a custom built ffmpeg with Nvidia, AMD, and Intel _proprietary
support_ for GPUs. Notably, essential packages are built from source and often
_significantly_ newer than what is baked into Ubuntu 2024 (LTS).
The custom built ffmpeg is not required unless you want:
- best possible GPU performance,
- bleeding edge capability,
- to use AMD discrete GPU,
- realtime AI upscaling (Nvidia only).
Note: the custom built ffmpeg will generally work even if a dependency is not
available. In such cases the specific capability will not be available but
other capabilities will still work. In this sense the custom built ffmpeg is a
"kitchen sink" build.
| | Custom ffmpeg | Ubuntu ffmpeg |
|---|---|---|
| Intel or AMD Integrated GPU (VAAPI) | ✅ | ✅ |
| Intel Integrated GPU (QSV QuickSync) | ✅ | ✅ |
| Nvidia Discrete GPU (NVENC via LLVM) | ❌ | ✅ |
| Nvidia Discrete GPU (NVENC via nvcc) | ✅ | ❌ |
| AMD Discrete GPU (AMF) | ✅ | ❌ |
| Fraunhofer FDK AAC | ✅ | ❌ |
| Realtime AI Upscale (Nvidia TensorRT/Cuda) | ✅ | ❌ |
| AV1 Vulkan | ✅ | ❌ |
| Torch (Nvidia Cuda) | ⚠️ Optional | ❌ |
For Nvidia, you will need the [nvidia-container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html).
To determine which ffmpeg build for Cuda, check your driver and compute capability:
```bash
nvidia-smi --query-gpu=driver_version,compute_cap --format=csv,noheader
# Example: 580.87.02, 8.6 → Driver 580, compute ≥7.5 → use cuda13.0
```
Find your CUDA version ([source](https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html)):
| Driver | < 7.5 (Maxwell/Pascal/Volta) | ≥ 7.5 (Turing+) |
|--------|------------------------------|-----------------|
| 550 | cuda12.4 | cuda12.4 |
| 560 | cuda12.6 | cuda12.6 |
| 570 | cuda12.8 | cuda12.8 |
| 580+ | cuda12.8 | cuda13.0 |
Then run:
```bash
FFMPEG_IMAGE=ghcr.io/jvdillon/netv-ffmpeg:<cuda-version> docker compose --profile nvidia up -d
```
For AMD or Intel, it does not matter which version you choose nor do you need Cuda installed.
#### Optional: AI Upscaling (Nvidia GPU only)
For real-time 2x or 4x AI upscaling (4x: 720p → 4K at ~39fps or 480p → 4K at ~85fps on RTX 5090):
```bash
git clone https://github.com/jvdillon/netv.git
cd netv
docker build -f Dockerfile.ai_upscale -t netv-ai-upscale .
docker run --gpus all -v netv-models:/models -v ./cache:/app/cache -p 8000:8000 netv-ai-upscale
```
First start builds TensorRT engines for your GPU (~2-3 min). Engines are cached in the
`netv-models` volume for instant subsequent starts.
Requirements:
- Nvidia GPU (RTX 20xx or newer recommended)
- [nvidia-container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html)
- Driver 535+ (CUDA 12.x)
#### Docker Custom Builds
For customization or development:
```bash
git clone https://github.com/jvdillon/netv.git
cd netv
docker compose build # optimized FFmpeg (default)
# FFMPEG_IMAGE=ubuntu:24.04 docker compose build # or stock FFmpeg
docker compose up -d
```
To update: `git pull && docker compose build && docker compose up -d`
#### Options
```bash
NETV_PORT=9000 docker compose up -d # custom port
NETV_HTTPS=1 docker compose up -d # enable HTTPS (mount certs first)
```
### Debian/Ubuntu (`systemd`)
For peak FFMPEG performance, Chromecast (requires HTTPS), and auto-start:
```bash
# 1. Install prerequisites (uv, Python)
./tools/install-prereqs.sh
# 2. (Optional) Get HTTPS certificates (required for Chromecast)
./tools/install-letsencrypt.sh yourdomain.com
# 3. (Optional) Build FFmpeg (required for optimal Nvidia encoding efficiency)
./tools/install-ffmpeg.sh
# 4. (Optional) Build AI Upscale engines (requires Nvidia GPU + TensorRT)
uv sync --group ai_upscale
./tools/install-ai_upscale.sh
# 5. Install systemd service
sudo ./tools/install-netv.sh # default port=8000 or --port 9000
```
Manage with:
```bash
sudo systemctl status netv # Check status
sudo systemctl restart netv # Restart after updates
journalctl -u netv -f # View logs
sudo systemctl edit netv --full # Change port or other settings
sudo ./tools/uninstall-netv.sh # Uninstall
```
### Development/Testing
Requires Python 3.11+ and [uv](https://docs.astral.sh/uv/):
```bash
git clone https://github.com/jvdillon/netv.git
cd netv
uv run ./main.py --port 8000 # --https
```
Or with pip:
```bash
pip install .
./main.py --port 8000
```
Open http://localhost:8000, create an admin account, and add your IPTV source.
### Additional Gems
There's also some useful applications in `tools/`:
- `zap2xml.py`: Scrape guide data into XML (I `crontab` this at 5am daily).
- `alignm3u.py`: Useful for reworking your HDHomeRun m3u to align with guide.
- `xtream2m3u.py`: Dump xtream to m3u, useful for making Emby work with IPTV.
## Troubleshooting
### Debug Logging
Enable verbose logs to diagnose EPG, M3U parsing, or other issues.
**Docker:**
In `docker-compose.yml`, change `LOG_LEVEL=INFO` to `LOG_LEVEL=DEBUG`, then restart:
```bash
docker compose down && docker compose up -d
docker compose logs -f
```
**Systemd:**
```bash
sudo systemctl edit netv
```
Add:
```ini
[Service]
Environment="LOG_LEVEL=DEBUG"
```
Then restart and view logs:
```bash
sudo systemctl restart netv
journalctl -u netv -f
```
**Manual / Development:**
```bash
LOG_LEVEL=DEBUG ./main.py
# or
./main.py --debug
```
## Q&A
### Where can I get free IPTV?
Check out [iptv-org/iptv](https://github.com/iptv-org/iptv) -- a community-maintained
collection of publicly available IPTV channels from around the world.
### Where can I get TV guide data?
The free choice is [iptv-org/epg](https://github.com/iptv-org/epg), but this
has never worked reliably for me.
For a more robust solution, consider [Schedules Direct](https://schedulesdirect.org/) --
your membership helps fund Open Source projects.
Alternatively you can use `tools/zap2xml.py`. I've used this for over a year
and found it to be very reliable -- it scrapes guide data from zap2it/gracenote.
### How do I set up HDHomeRun?
HDHomeRun devices provide an M3U playlist, but it lacks EPG channel IDs. Use the
`tools/` to fetch guide data and align it:
```bash
# 1. Get your HDHomeRun lineup (replace IP with your device's IP)
wget http://192.168.1.87/lineup.m3u -O tools/lineup.m3u
# 2. Fetch TV guide data for your area
./tools/zap2xml.py --zip 90210
# 3. Align the M3U with the guide (adds tvg-id for EPG matching)
./tools/alignm3u.py --input tools/lineup.m3u --xmltv tools/xmltv.xml --output tools/ota.m3u
```
Then add `tools/ota.m3u` as an M3U source in neTV settings.
And set up a cron job to refresh the guide daily (e.g.,
`0 5 * * * /usr/bin/python3 /path/to/netv/tools/zap2xml.py --zip 90210 && cp /path/to/netv/tools/xmltv.xml /var/www/html/`).
### How do I enable hardware transcoding?
Hardware transcoding is auto-detected. Check Settings to see available encoders.
- **Intel/AMD (VAAPI)**: Works automatically if `/dev/dri` exists.
- **Nvidia**: Requires [nvidia-container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html).
See [Nvidia GPU (NVENC)](#nvidia-gpu-nvenc) installation section for driver/compute compatibility table.
- **No GPU / VPS**: If `/dev/dri` doesn't exist, comment out the `devices` section
in `docker-compose.yml` or compose will fail to start
### How do I install CUDA on Ubuntu?
Tested on Ubuntu 24.04 LTS, 25.04, and 25.10.
```bash
# Step 1: Remove existing Nvidia packages
sudo apt purge -y '^nv.*' '^libnv.*' '^cuda-.*' '^libcuda-.*' '^cudnn[0-9]*-.*' '^libcudnn[0-9]*-.*'
sudo apt autoremove -y
# Step 2: Add Nvidia CUDA repository
wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2404/x86_64/cuda-keyring_1.1-1_all.deb
sudo dpkg -i cuda-keyring_1.1-1_all.deb
sudo apt modernize-sources || true
sudo apt update
# Step 3: Install driver and CUDA toolkit
# For Turing+ GPUs (RTX 20 series and newer, compute >=7.5):
sudo apt install -y nvidia-open cuda-toolkit-13 cudnn9-cuda-13 libcudnn9-dev-cuda-13 libnvinfer-bin
# For Maxwell/Pascal GPUs (GTX 900/1000 series, compute <7.5):
# Driver 590 dropped support. Pin to 580 and use CUDA 12.8.
# Note: Maxwell/Pascal requires nvidia-driver (proprietary), not nvidia-open.
# sudo apt install -y nvidia-driver-pinning-580
# sudo apt install -y nvidia-driver-580 cuda-toolkit-12-8 cudnn9-cuda-12-8 libcudnn9-dev-cuda-12 libnvinfer-bin
# sudo update-alternatives --set cuda /usr/local/cuda-12.8
# Step 4: Configure environment
tee -a ~/.bashrc << 'EOF'
export CUDA_HOME=/usr/local/cuda
if [ -d $CUDA_HOME ]; then
export PATH="${CUDA_HOME}/bin${PATH:+:${PATH}}"
export LD_LIBRARY_PATH="${CUDA_HOME}/lib64${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}"
fi
unset CUDA_HOME
EOF
source ~/.bashrc
# Step 5: Verify installation
nvidia-smi --query-gpu=name,compute_cap,driver_version --format=csv,noheader
nvcc --version
```
### What are the keyboard shortcuts?
| Key | Action |
|-----|--------|
| `Space` / `k` | Play/pause |
| `f` | Fullscreen |
| `m` | Mute |
| `c` | Toggle captions |
| `i` | Toggle info overlay |
| `←` / `→` | Seek ±10s |
| `↑` / `↓` | Volume |
| `j` | Jump to time |
| `Esc` | Back / close |
### What Does "neTV" Mean?
Yes.
We leave pronunciation and meaning as an exercise for your idiom:
- **N-E-T-V** -- "Any TV", say it out loud
- **≠TV** -- "Not Equals TV", because we're `!=` traditional cable
- **Net-V** -- "Net Vision", because it streams video over your network
- **Ni!-TV** -- For the [Knights who say Ni](https://www.youtube.com/watch?v=zIV4poUZAQo)
We will also accept a shrubbery. One that looks nice. And not too expensive.
## Support
If you find neTV useful, consider buying me a coffee:
[](https://buymeacoffee.com/jvdillon)
## License
Apache License 2.0
================================================
FILE: __init__.py
================================================
================================================
FILE: auth.py
================================================
"""Authentication: users, passwords, tokens, JWT."""
from __future__ import annotations
from typing import Any
import hashlib
import hmac
import json
import pathlib
import secrets
import time
APP_DIR = pathlib.Path(__file__).parent
# Use old "cache" if it exists (backwards compat), otherwise ".cache"
_OLD_CACHE = APP_DIR / "cache"
CACHE_DIR = _OLD_CACHE if _OLD_CACHE.exists() else APP_DIR / ".cache"
SERVER_SETTINGS_FILE = CACHE_DIR / "server_settings.json"
USERS_DIR = CACHE_DIR / "users"
TOKEN_EXPIRY = 86400 * 7 # 7 days
def _get_settings_file() -> pathlib.Path:
"""Get the settings file."""
return SERVER_SETTINGS_FILE
def _get_secret_key() -> str:
"""Get or generate secret key (persisted in settings)."""
settings_file = _get_settings_file()
settings = {}
if settings_file.exists():
settings = json.loads(settings_file.read_text())
if "secret_key" not in settings:
settings["secret_key"] = secrets.token_hex(32)
settings_file.write_text(json.dumps(settings, indent=2))
return settings["secret_key"]
def _hash_password(password: str, salt: str | None = None) -> str:
"""Hash password with salt using PBKDF2."""
if salt is None:
salt = secrets.token_hex(16)
key = hashlib.pbkdf2_hmac("sha256", password.encode(), salt.encode(), 100000)
return f"{salt}:{key.hex()}"
def _verify_hashed_password(password: str, hashed: str) -> bool:
"""Verify password against hash."""
if ":" not in hashed:
return False # Invalid hash format
salt, _ = hashed.split(":", 1)
return hmac.compare_digest(_hash_password(password, salt), hashed)
def _get_users() -> dict[str, dict[str, Any]]:
"""Get users from settings. Returns empty dict if no users configured.
User format: {username: {password: str, admin: bool}}
"""
settings_file = _get_settings_file()
if settings_file.exists():
settings = json.loads(settings_file.read_text())
return settings.get("users", {})
return {}
def get_all_usernames() -> list[str]:
"""Get list of all usernames."""
return list(_get_users().keys())
def is_setup_required() -> bool:
"""Check if initial setup is required (no users configured)."""
return len(_get_users()) == 0
def create_user(username: str, password: str, admin: bool = False) -> None:
"""Create a new user with hashed password."""
settings_file = _get_settings_file()
settings = {}
if settings_file.exists():
settings = json.loads(settings_file.read_text())
users = settings.get("users", {})
# First user is always admin
if len(users) == 0:
admin = True
users[username] = {"password": _hash_password(password), "admin": admin}
settings["users"] = users
settings_file.write_text(json.dumps(settings, indent=2))
# Create user directory for per-user settings
user_dir = USERS_DIR / username
user_dir.mkdir(parents=True, exist_ok=True)
def _ensure_one_admin(users: dict[str, dict[str, Any]]) -> None:
"""Ensure at least one user is admin. Promotes first user if needed."""
if not users or any(u.get("admin") for u in users.values()):
return
next(iter(users.values()))["admin"] = True
def delete_user(username: str) -> bool:
"""Delete a user. Returns True if deleted, False if not found."""
settings_file = _get_settings_file()
if not settings_file.exists():
return False
settings = json.loads(settings_file.read_text())
users = settings.get("users", {})
if username not in users:
return False
del users[username]
_ensure_one_admin(users)
settings["users"] = users
settings_file.write_text(json.dumps(settings, indent=2))
return True
def verify_password(username: str, password: str) -> bool:
"""Verify username and password."""
users = _get_users()
user_data = users.get(username, {"password": _hash_password("dummy")})
stored = user_data["password"]
valid = _verify_hashed_password(password, stored)
return valid and username in users
def change_password(username: str, new_password: str) -> bool:
"""Change a user's password. Returns True if successful."""
settings_file = _get_settings_file()
if not settings_file.exists():
return False
settings = json.loads(settings_file.read_text())
users = settings.get("users", {})
if username not in users:
return False
users[username]["password"] = _hash_password(new_password)
settings["users"] = users
settings_file.write_text(json.dumps(settings, indent=2))
return True
def is_admin(username: str) -> bool:
"""Check if user is an admin."""
users = _get_users()
user_data = users.get(username, {})
return user_data.get("admin", False)
def set_admin(username: str, admin: bool) -> bool:
"""Set admin status for a user. Returns True if successful."""
settings_file = _get_settings_file()
if not settings_file.exists():
return False
settings = json.loads(settings_file.read_text())
users = settings.get("users", {})
if username not in users:
return False
users[username]["admin"] = admin
_ensure_one_admin(users)
settings["users"] = users
settings_file.write_text(json.dumps(settings, indent=2))
return True
def get_users_with_admin() -> list[dict[str, Any]]:
"""Get list of users with their admin status and limits."""
users = _get_users()
return [
{
"username": u,
"admin": d.get("admin", False),
"max_streams_per_source": d.get("max_streams_per_source", {}),
"unavailable_groups": d.get("unavailable_groups", []),
}
for u, d in users.items()
]
def get_user_limits(username: str) -> dict[str, Any]:
"""Get user's stream limits and group restrictions."""
users = _get_users()
user_data = users.get(username, {})
return {
"max_streams_per_source": user_data.get("max_streams_per_source", {}),
"unavailable_groups": user_data.get("unavailable_groups", []),
}
def set_user_limits(
username: str,
max_streams_per_source: dict[str, int] | None = None,
unavailable_groups: list[str] | None = None,
) -> bool:
"""Set user's stream limits and/or group restrictions. Returns True if successful."""
settings_file = _get_settings_file()
if not settings_file.exists():
return False
settings = json.loads(settings_file.read_text())
users = settings.get("users", {})
if username not in users:
return False
if max_streams_per_source is not None:
users[username]["max_streams_per_source"] = max_streams_per_source
if unavailable_groups is not None:
users[username]["unavailable_groups"] = unavailable_groups
settings["users"] = users
settings_file.write_text(json.dumps(settings, indent=2))
return True
def create_token(payload: dict[str, Any]) -> str:
"""Create a signed JWT-like token."""
payload = {**payload, "exp": int(time.time()) + TOKEN_EXPIRY}
data = json.dumps(payload, separators=(",", ":")).encode()
sig = hmac.new(_get_secret_key().encode(), data, hashlib.sha256).hexdigest()
return f"{data.hex()}.{sig}"
def verify_token(token: str) -> dict[str, Any] | None:
"""Verify token and return payload, or None if invalid/expired."""
try:
data_hex, sig = token.split(".")
data = bytes.fromhex(data_hex)
expected = hmac.new(_get_secret_key().encode(), data, hashlib.sha256).hexdigest()
if not hmac.compare_digest(sig, expected):
return None
payload = json.loads(data)
if payload.get("exp", 0) < time.time():
return None
return payload
except Exception:
return None
================================================
FILE: auth_test.py
================================================
"""Tests for auth.py."""
from __future__ import annotations
from pathlib import Path
from unittest import mock
import json
import pytest
@pytest.fixture
def auth_module(tmp_path: Path):
"""Import auth module with temp settings file."""
import auth
# Patch settings files to temp location
original_server = auth.SERVER_SETTINGS_FILE
original_users = auth.USERS_DIR
auth.SERVER_SETTINGS_FILE = tmp_path / "server_settings.json"
auth.USERS_DIR = tmp_path / "users"
auth.USERS_DIR.mkdir(exist_ok=True)
yield auth
auth.SERVER_SETTINGS_FILE = original_server
auth.USERS_DIR = original_users
class TestPasswordHashing:
def test_hash_password_creates_salt(self, auth_module):
hashed = auth_module._hash_password("mypassword")
assert ":" in hashed
salt, key = hashed.split(":")
assert len(salt) == 32 # 16 bytes hex
assert len(key) == 64 # 32 bytes hex
def test_hash_password_with_salt_deterministic(self, auth_module):
salt = "a" * 32
h1 = auth_module._hash_password("test", salt)
h2 = auth_module._hash_password("test", salt)
assert h1 == h2
def test_verify_hashed_password_correct(self, auth_module):
hashed = auth_module._hash_password("secret")
assert auth_module._verify_hashed_password("secret", hashed)
def test_verify_hashed_password_wrong(self, auth_module):
hashed = auth_module._hash_password("secret")
assert not auth_module._verify_hashed_password("wrong", hashed)
class TestUserManagement:
def test_is_setup_required_no_users(self, auth_module):
assert auth_module.is_setup_required()
def test_create_user_and_verify(self, auth_module):
auth_module.create_user("admin", "password123")
assert not auth_module.is_setup_required()
assert auth_module.verify_password("admin", "password123")
assert not auth_module.verify_password("admin", "wrongpass")
assert not auth_module.verify_password("nobody", "password123")
class TestTokens:
def test_create_and_verify_token(self, auth_module):
payload = {"user": "admin", "role": "admin"}
token = auth_module.create_token(payload)
result = auth_module.verify_token(token)
assert result is not None
assert result["user"] == "admin"
assert result["role"] == "admin"
assert "exp" in result
def test_token_format(self, auth_module):
token = auth_module.create_token({"test": 1})
assert "." in token
_, sig = token.split(".")
assert len(sig) == 64 # sha256 hex
def test_invalid_token_rejected(self, auth_module):
assert auth_module.verify_token("invalid") is None
assert auth_module.verify_token("abc.def") is None
assert auth_module.verify_token("") is None
def test_tampered_token_rejected(self, auth_module):
token = auth_module.create_token({"user": "admin"})
# Tamper with signature
data, _ = token.split(".")
tampered = f"{data}.{'0' * 64}"
assert auth_module.verify_token(tampered) is None
def test_expired_token_rejected(self, auth_module):
# Create token with expired time
with mock.patch.object(auth_module, "TOKEN_EXPIRY", -1):
token = auth_module.create_token({"user": "admin"})
assert auth_module.verify_token(token) is None
class TestSecretKey:
def test_get_secret_key_generates_and_persists(self, auth_module):
key1 = auth_module._get_secret_key()
assert len(key1) == 64 # 32 bytes hex
# Should return same key
key2 = auth_module._get_secret_key()
assert key1 == key2
# Should be persisted (uses _get_settings_file which returns legacy if server doesn't exist)
settings_file = auth_module._get_settings_file()
settings = json.loads(settings_file.read_text())
assert settings["secret_key"] == key1
if __name__ == "__main__":
from testing import run_tests
run_tests(__file__)
================================================
FILE: cache.py
================================================
"""File cache, settings, sources management."""
from __future__ import annotations
from collections.abc import Callable
from dataclasses import dataclass, field
from typing import Any
import hashlib
import json
import logging
import pathlib
import subprocess
import threading
import time
import urllib.parse
log = logging.getLogger(__name__)
# ===========================================================================
# VAAPI Auto-Detection
# ===========================================================================
def _get_gpu_vendor() -> str | None:
"""Detect GPU vendor ID via lspci or sysfs. Returns '8086' (Intel) or '1002' (AMD)."""
# Try lspci first (works on bare metal)
try:
result = subprocess.run(["lspci", "-nn"], capture_output=True, text=True, timeout=5)
for line in result.stdout.splitlines():
if "VGA" in line or "Display" in line or "3D" in line:
if "[8086:" in line:
return "8086"
if "[1002:" in line:
return "1002"
except Exception:
pass
# Fallback: check sysfs (works in containers)
drm_path = pathlib.Path("/sys/class/drm")
if drm_path.exists():
for card in drm_path.iterdir():
if card.name.startswith("card") and card.name[4:].isdigit():
vendor_file = card / "device" / "vendor"
if vendor_file.exists():
vendor = vendor_file.read_text().strip().replace("0x", "")
if vendor in ("8086", "1002"):
return vendor
return None
def _detect_vaapi_device() -> str | None:
"""Auto-detect the VAAPI render device. Returns '/dev/dri/renderD128' or None."""
render = pathlib.Path("/dev/dri/renderD128")
return str(render) if render.exists() else None
def _detect_libva_driver() -> str | None:
"""Auto-detect LIBVA driver name. Returns 'iHD', 'i965', 'radeonsi', or None."""
vendor = _get_gpu_vendor()
if vendor == "8086":
# iHD for Intel Gen8+ (Broadwell 2014+), supports Xe driver
# Fall back to i965 for older Intel GPUs
dri_path = _detect_dri_path()
if dri_path and pathlib.Path(f"{dri_path}/iHD_drv_video.so").exists():
return "iHD"
return "i965"
if vendor == "1002":
return "radeonsi"
return None
def _detect_dri_path() -> str | None:
"""Auto-detect the system DRI drivers path.
Returns path like '/usr/lib/x86_64-linux-gnu/dri' or None.
"""
# Check common locations in order of preference
candidates = [
"/usr/lib/x86_64-linux-gnu/dri", # Debian/Ubuntu
"/usr/lib64/dri", # Fedora/RHEL
"/usr/lib/dri", # Arch
]
for path in candidates:
if pathlib.Path(path).is_dir():
return path
return None
# Cached detection results (computed once at import)
VAAPI_DEVICE = _detect_vaapi_device()
LIBVA_DRIVER = _detect_libva_driver()
DRI_PATH = _detect_dri_path()
APP_DIR = pathlib.Path(__file__).parent
# Use old "cache" if it exists (backwards compat), otherwise ".cache"
_OLD_CACHE = APP_DIR / "cache"
CACHE_DIR = _OLD_CACHE if _OLD_CACHE.exists() else APP_DIR / ".cache"
CACHE_DIR.mkdir(exist_ok=True)
SERVER_SETTINGS_FILE = CACHE_DIR / "server_settings.json"
USERS_DIR = CACHE_DIR / "users"
USERS_DIR.mkdir(exist_ok=True)
LOGOS_DIR = CACHE_DIR / "logos"
LOGOS_DIR.mkdir(exist_ok=True)
# Cache TTLs in seconds
LIVE_CACHE_TTL = 2 * 3600 # 2 hours
EPG_CACHE_TTL = 6 * 3600 # 6 hours
VOD_CACHE_TTL = 12 * 3600 # 12 hours
SERIES_CACHE_TTL = 12 * 3600 # 12 hours
INFO_CACHE_TTL = 7 * 24 * 3600 # 7 days max for series/movie info
INFO_CACHE_STALE = 24 * 3600 # Refresh in background after 24 hours
LOGO_CACHE_TTL = 7 * 24 * 3600 # 7 days for logos (server-side)
LOGO_BROWSER_TTL = 24 * 3600 # 1 day for browser cache (re-validates before server expires)
LOGO_MAX_SIZE = 1024 * 1024 # 1MB max logo size
# In-memory cache
_cache: dict[str, Any] = {}
_cache_lock = threading.Lock()
def _parse_json_file(path: str) -> tuple[Any, float] | None:
"""Parse JSON file - runs in separate process to avoid GIL blocking."""
try:
with open(path) as f:
data = json.load(f)
return data.get("data"), data.get("timestamp", 0)
except Exception:
return None
def load_file_cache(name: str, use_process: bool = False) -> tuple[Any, float] | None:
"""Load cached data from file. Returns (data, timestamp) or None.
Args:
name: Cache file name (without .json extension)
use_process: If True, parse in separate process to avoid GIL blocking
"""
path = CACHE_DIR / f"{name}.json"
if not path.exists():
return None
if use_process:
import concurrent.futures
with concurrent.futures.ProcessPoolExecutor(max_workers=1) as executor:
future = executor.submit(_parse_json_file, str(path))
return future.result(timeout=60)
try:
data = json.loads(path.read_text())
return data.get("data"), data.get("timestamp", 0)
except Exception:
return None
def save_file_cache(name: str, data: Any) -> None:
"""Save data to cache file with current timestamp."""
path = CACHE_DIR / f"{name}.json"
path.write_text(json.dumps({"data": data, "timestamp": time.time()}))
def clear_all_caches() -> None:
"""Clear memory cache except EPG (file cache preserved for restart)."""
with _cache_lock:
epg = _cache.get("epg")
_cache.clear()
if epg:
_cache["epg"] = epg
def clear_all_file_caches() -> int:
"""Clear all data file caches (live, vod, series). Returns count deleted."""
cache_files = ["live_data.json", "vod_data.json", "series_data.json"]
deleted = 0
for name in cache_files:
path = CACHE_DIR / name
if path.exists():
path.unlink()
deleted += 1
# Also clear memory cache
clear_all_caches()
return deleted
def get_cache() -> dict[str, Any]:
"""Get reference to memory cache."""
return _cache
def get_cache_lock() -> threading.Lock:
"""Get cache lock."""
return _cache_lock
def _sanitize_name(name: str) -> str:
"""Sanitize a name for use as a directory/file name."""
# Remove path traversal and special chars
name = name.replace("..", "").replace("/", "_").replace("\\", "_")
name = "".join(c for c in name if c.isalnum() or c in "-_ ")
return name[:224] or "default"
def _url_to_filename(url: str) -> str:
"""Derive a readable filename from URL with hash suffix to avoid collisions."""
# Always include hash suffix to avoid collisions
url_hash = hashlib.md5(url.encode()).hexdigest()[:8]
parsed = urllib.parse.urlparse(url)
path = parsed.path.rstrip("/")
if path:
# Get last path component
name = path.split("/")[-1]
# Strip extension, we'll add our own
if "." in name:
name = name.rsplit(".", 1)[0]
name = _sanitize_name(name)
if name and len(name) >= 2:
return f"{name}_{url_hash}"
return url_hash
def get_cached_logo(source_name: str, url: str) -> pathlib.Path | None:
"""Get cached logo path if valid and not expired. Returns None if not cached."""
safe_source = _sanitize_name(source_name)
filename = _url_to_filename(url)
source_dir = LOGOS_DIR / safe_source
if not source_dir.exists():
return None
# Look for file with any extension
for ext in ("png", "jpg", "jpeg", "gif", "webp", "svg"):
path = source_dir / f"{filename}.{ext}"
if path.exists():
age = time.time() - path.stat().st_mtime
if age < LOGO_CACHE_TTL:
return path
# Expired, delete it
path.unlink(missing_ok=True)
return None
def save_logo(source_name: str, url: str, data: bytes, content_type: str) -> pathlib.Path:
"""Save logo to cache. Returns the saved path."""
safe_source = _sanitize_name(source_name)
filename = _url_to_filename(url)
source_dir = LOGOS_DIR / safe_source
source_dir.mkdir(parents=True, exist_ok=True)
# Determine extension from content-type
ext_map = {
"image/png": "png",
"image/jpeg": "jpg",
"image/gif": "gif",
"image/webp": "webp",
"image/svg+xml": "svg",
}
ext = ext_map.get(content_type.split(";")[0].strip(), "png")
path = source_dir / f"{filename}.{ext}"
# Atomic write: write to temp file then rename
tmp = path.with_suffix(".tmp")
tmp.write_bytes(data)
tmp.rename(path)
return path
def get_cached_info(cache_key: str, fetch_fn: Callable[[], Any], force: bool = False) -> Any:
"""Get info from memory cache, file cache, or fetch. Stale-while-revalidate."""
cached = load_file_cache(cache_key)
cached_data, cached_ts = cached if cached else (None, 0)
age = time.time() - cached_ts
if force and cached_data:
_cache.pop(cache_key, None)
cached_data = None
if cache_key in _cache and not force:
if cached_ts and age > INFO_CACHE_STALE:
def bg_refresh() -> None:
try:
data = fetch_fn()
_cache[cache_key] = data
save_file_cache(cache_key, data)
log.info("Background refreshed %s", cache_key)
except Exception as e:
log.warning("Background refresh failed for %s: %s", cache_key, e)
threading.Thread(target=bg_refresh, daemon=True).start()
return _cache[cache_key]
if cached_data and age < INFO_CACHE_TTL:
_cache[cache_key] = cached_data
if age > INFO_CACHE_STALE:
def bg_refresh() -> None:
try:
data = fetch_fn()
_cache[cache_key] = data
save_file_cache(cache_key, data)
log.info("Background refreshed %s", cache_key)
except Exception as e:
log.warning("Background refresh failed for %s: %s", cache_key, e)
threading.Thread(target=bg_refresh, daemon=True).start()
return cached_data
data = fetch_fn()
_cache[cache_key] = data
save_file_cache(cache_key, data)
return data
def _test_encoder(cmd: list[str], timeout: int = 5, env: dict | None = None) -> tuple[bool, str]:
"""Test if an encoder works. Returns (success, error_message)."""
try:
run_env = None
if env:
import os
run_env = os.environ.copy()
run_env.update(env)
result = subprocess.run(cmd, capture_output=True, timeout=timeout, env=run_env)
if result.returncode == 0:
return True, ""
stderr = result.stderr.decode(errors="replace").strip()
# Extract the most relevant error line
for line in stderr.split("\n"):
if line and not line.startswith("["):
return False, line
return False, stderr if stderr else "unknown error"
except subprocess.TimeoutExpired:
return False, "timeout"
except FileNotFoundError:
return False, "ffmpeg not found"
except Exception as e:
return False, str(e)
def detect_encoders() -> dict[str, bool]:
"""Detect available FFmpeg H.264 encoders by testing actual hardware."""
log.info("Detecting hardware encoders...")
encoders = {
"nvenc": False,
"amf": False,
"qsv": False,
"vaapi": False,
}
# Test input: 1 frame of 256x256 black (64x64 is below NVENC minimum on newer GPUs)
test_input = ["-f", "lavfi", "-i", "color=black:s=256x256:d=0.04", "-frames:v", "1"]
base_cmd = ["ffmpeg", "-hide_banner", "-loglevel", "error", "-y"]
null_out = ["-f", "null", "-"]
# NVENC: try nvenc directly
ok, err = _test_encoder(base_cmd + test_input + ["-c:v", "h264_nvenc"] + null_out)
encoders["nvenc"] = ok
if ok:
log.info(" NVENC (h264_nvenc): available")
else:
log.info(" NVENC (h264_nvenc): unavailable - %s", err)
# AMF: try amf directly
ok, err = _test_encoder(base_cmd + test_input + ["-c:v", "h264_amf"] + null_out)
encoders["amf"] = ok
if ok:
log.info(" AMF (h264_amf): available")
else:
log.info(" AMF (h264_amf): unavailable - %s", err)
# QSV: needs hwaccel init
ok, err = _test_encoder(
base_cmd
+ ["-hwaccel", "qsv", "-hwaccel_output_format", "qsv"]
+ test_input
+ ["-c:v", "h264_qsv"]
+ null_out
)
encoders["qsv"] = ok
if ok:
log.info(" QSV (h264_qsv): available")
else:
log.info(" QSV (h264_qsv): unavailable - %s", err)
# VA-API: needs device, hwupload, and driver env vars for hybrid GPU systems
vaapi_baseline_only = False
if VAAPI_DEVICE and LIBVA_DRIVER and DRI_PATH:
vaapi_env = {
"LIBVA_DRIVER_NAME": LIBVA_DRIVER,
"LIBVA_DRIVERS_PATH": DRI_PATH,
}
# Try high profile first, fall back to constrained_baseline for older GPUs
ok, err = _test_encoder(
base_cmd
+ ["-init_hw_device", f"vaapi=va:{VAAPI_DEVICE}"]
+ test_input
+ ["-vf", "format=nv12,hwupload", "-c:v", "h264_vaapi"]
+ null_out,
env=vaapi_env,
)
if not ok:
# Some older AMD GPUs (GCN 1.0) only support baseline profile
ok, err = _test_encoder(
base_cmd
+ ["-init_hw_device", f"vaapi=va:{VAAPI_DEVICE}"]
+ test_input
+ [
"-vf",
"format=nv12,hwupload",
"-c:v",
"h264_vaapi",
"-profile:v",
"constrained_baseline",
]
+ null_out,
env=vaapi_env,
)
if ok:
vaapi_baseline_only = True
encoders["vaapi"] = ok
encoders["vaapi_baseline_only"] = vaapi_baseline_only
if ok:
profile_note = " (baseline only)" if vaapi_baseline_only else ""
log.info(
" VAAPI (h264_vaapi): available%s (device=%s, driver=%s)",
profile_note,
VAAPI_DEVICE,
LIBVA_DRIVER,
)
else:
log.info(" VAAPI (h264_vaapi): unavailable - %s", err)
else:
log.info(" VAAPI (h264_vaapi): unavailable - no Intel/AMD GPU detected")
return encoders
AVAILABLE_ENCODERS = detect_encoders()
def refresh_encoders() -> dict[str, bool]:
"""Re-detect available encoders and update the cache."""
global AVAILABLE_ENCODERS
AVAILABLE_ENCODERS = detect_encoders()
return AVAILABLE_ENCODERS
def _default_encoder() -> str:
"""Return first available encoder option.
Preference order: nvenc > amf > qsv > vaapi > software
For nvenc/amf, prefer +vaapi fallback if VAAPI is available.
"""
if AVAILABLE_ENCODERS.get("nvenc"):
return "nvenc+vaapi" if AVAILABLE_ENCODERS.get("vaapi") else "nvenc+software"
if AVAILABLE_ENCODERS.get("amf"):
return "amf+vaapi" if AVAILABLE_ENCODERS.get("vaapi") else "amf+software"
if AVAILABLE_ENCODERS.get("qsv"):
return "qsv"
if AVAILABLE_ENCODERS.get("vaapi"):
return "vaapi"
return "software"
@dataclass(slots=True)
class Source:
id: str
name: str
type: str # "xtream", "m3u", or "epg"
url: str
username: str = ""
password: str = ""
epg_timeout: int = 120 # seconds
epg_schedule: list[str] = field(default_factory=list) # ["03:00", "15:00"]
epg_enabled: bool = True # Whether to fetch EPG from this source
epg_url: str = "" # EPG URL (auto-detected from M3U/Xtream, or manual override)
deinterlace_fallback: bool = True # Deinterlace when probe is skipped (for OTA/HDHomeRun)
max_streams: int = 0 # Max concurrent streams from this source (0 = unlimited)
def load_server_settings() -> dict[str, Any]:
"""Load server-wide settings."""
if SERVER_SETTINGS_FILE.exists():
data: dict[str, Any] = json.loads(SERVER_SETTINGS_FILE.read_text())
else:
data = {}
data.setdefault("transcode_mode", "auto")
# Migrate old transcode_hw values to new format
old_hw = data.get("transcode_hw", "")
if old_hw == "nvidia":
data["transcode_hw"] = (
"nvenc+vaapi" if AVAILABLE_ENCODERS.get("vaapi") else "nvenc+software"
)
elif old_hw == "intel":
data["transcode_hw"] = "qsv"
# "vaapi" and "software" remain unchanged
data.setdefault("transcode_hw", _default_encoder())
data.setdefault("vod_transcode_cache_mins", 60)
# 0 = no caching (dead sessions cleaned immediately)
data.setdefault("live_transcode_cache_secs", 0)
data.setdefault("live_dvr_mins", 0) # 0 = disabled (default 30 sec buffer)
data.setdefault("transcode_dir", "") # Empty = system temp dir
data.setdefault("probe_live", True)
data.setdefault("probe_movies", True)
data.setdefault("probe_series", False)
data.setdefault("sources", [])
data.setdefault("users", {})
data.setdefault("user_agent_preset", "tivimate")
data.setdefault("user_agent_custom", "")
return data
def save_server_settings(settings: dict[str, Any]) -> None:
"""Save server-wide settings."""
SERVER_SETTINGS_FILE.write_text(json.dumps(settings, indent=2))
def _validate_username(username: str) -> None:
"""Validate username to prevent path traversal and length attacks."""
if (
not username
or len(username) > 64
or ".." in username
or "/" in username
or "\\" in username
):
raise ValueError("Invalid username")
def load_user_settings(username: str) -> dict[str, Any]:
"""Load per-user settings."""
_validate_username(username)
user_file = USERS_DIR / username / "settings.json"
if user_file.exists():
data = json.loads(user_file.read_text())
else:
data = {}
data.setdefault("guide_filter", [])
data.setdefault("captions_enabled", True)
data.setdefault("watch_history", {})
data.setdefault("favorites", {"series": {}, "movies": {}})
data.setdefault("cc_lang", "")
data.setdefault("cc_style", {})
data.setdefault("cast_host", "")
return data
def save_user_settings(username: str, settings: dict[str, Any]) -> None:
"""Save per-user settings."""
_validate_username(username)
user_dir = USERS_DIR / username
user_dir.mkdir(exist_ok=True)
(user_dir / "settings.json").write_text(json.dumps(settings, indent=2))
def get_watch_position(username: str, stream_url: str) -> dict[str, Any] | None:
"""Get saved watch position for a stream. Returns None if not found or >=95% watched."""
settings = load_user_settings(username)
history = settings.get("watch_history", {})
entry = history.get(stream_url)
if not entry:
return None
# Reset if >=95% watched
if entry.get("duration", 0) > 0:
pct = entry.get("position", 0) / entry["duration"]
if pct >= 0.95:
return None
return entry
def save_watch_position(username: str, stream_url: str, position: float, duration: float) -> None:
"""Save watch position for a stream."""
settings = load_user_settings(username)
history = settings.setdefault("watch_history", {})
history[stream_url] = {
"position": position,
"duration": duration,
"updated": time.time(),
}
# Keep only last 200 entries
if len(history) > 200:
sorted_entries = sorted(history.items(), key=lambda x: x[1].get("updated", 0), reverse=True)
settings["watch_history"] = dict(sorted_entries[:200])
save_user_settings(username, settings)
def get_sources() -> list[Source]:
"""Get list of configured sources."""
settings = load_server_settings()
return [Source(**s) for s in settings.get("sources", [])]
def update_source_epg_url(source_id: str, epg_url: str) -> None:
"""Update a source's epg_url in settings (only if currently empty)."""
if not epg_url:
return
settings = load_server_settings()
for s in settings.get("sources", []):
if s["id"] == source_id and not s.get("epg_url"):
s["epg_url"] = epg_url
save_server_settings(settings)
log.info("Saved EPG URL for source %s: %s", source_id, epg_url)
break
================================================
FILE: cache_test.py
================================================
"""Tests for cache.py."""
from __future__ import annotations
from pathlib import Path
from unittest import mock
import subprocess
import pytest
import cache
@pytest.fixture
def cache_module(tmp_path: Path):
"""Import cache module with temp directories."""
# Patch paths to temp locations
original_server_settings = cache.SERVER_SETTINGS_FILE
original_users_dir = cache.USERS_DIR
original_cache_dir = cache.CACHE_DIR
cache.SERVER_SETTINGS_FILE = tmp_path / "server_settings.json"
cache.USERS_DIR = tmp_path / "users"
cache.USERS_DIR.mkdir(exist_ok=True)
cache.CACHE_DIR = tmp_path / "cache"
cache.CACHE_DIR.mkdir(exist_ok=True)
# Clear memory cache
cache._cache.clear()
yield cache
cache.SERVER_SETTINGS_FILE = original_server_settings
cache.USERS_DIR = original_users_dir
cache.CACHE_DIR = original_cache_dir
cache._cache.clear()
class TestFileCache:
def test_save_and_load_file_cache(self, cache_module):
cache_module.save_file_cache("test", {"key": "value"})
result = cache_module.load_file_cache("test")
assert result is not None
data, ts = result
assert data == {"key": "value"}
assert ts > 0
def test_load_nonexistent_cache(self, cache_module):
assert cache_module.load_file_cache("nonexistent") is None
def test_load_corrupted_cache(self, cache_module):
path = cache_module.CACHE_DIR / "corrupted.json"
path.write_text("not valid json")
assert cache_module.load_file_cache("corrupted") is None
class TestMemoryCache:
def test_get_cache_returns_reference(self, cache_module):
cache = cache_module.get_cache()
cache["test"] = 123
assert cache_module.get_cache()["test"] == 123
def test_clear_all_caches_preserves_epg(self, cache_module):
cache = cache_module.get_cache()
cache["epg"] = {"data": "epg"}
cache["live"] = {"data": "live"}
cache_module.clear_all_caches()
assert "epg" in cache
assert "live" not in cache
class TestCachedInfo:
def test_get_cached_info_calls_fetch(self, cache_module):
fetch_fn = mock.Mock(return_value={"result": 42})
result = cache_module.get_cached_info("test_key", fetch_fn)
assert result == {"result": 42}
fetch_fn.assert_called_once()
def test_get_cached_info_uses_memory_cache(self, cache_module):
fetch_fn = mock.Mock(return_value={"result": 1})
cache_module.get_cached_info("key1", fetch_fn)
cache_module.get_cached_info("key1", fetch_fn)
# Only called once - second call uses memory cache
fetch_fn.assert_called_once()
def test_get_cached_info_force_bypasses_memory(self, cache_module):
fetch_fn = mock.Mock(return_value={"result": 1})
cache_module.get_cached_info("key2", fetch_fn)
cache_module.get_cached_info("key2", fetch_fn, force=True)
assert fetch_fn.call_count == 2
class TestSettings:
def test_load_settings_defaults(self, cache_module):
settings = cache_module.load_server_settings()
assert settings["sources"] == []
assert settings["transcode_mode"] == "auto"
assert settings["transcode_hw"] in (
"nvenc+vaapi",
"nvenc+software",
"amf+vaapi",
"amf+software",
"qsv",
"vaapi",
"software",
)
assert settings["probe_movies"] is True
def test_save_and_load_settings(self, cache_module):
settings = {"sources": [{"id": "s1", "name": "Test"}], "custom": True}
cache_module.save_server_settings(settings)
loaded = cache_module.load_server_settings()
assert loaded["sources"] == [{"id": "s1", "name": "Test"}]
assert loaded["custom"] is True
class TestUserSettings:
def test_load_user_settings_defaults(self, cache_module):
settings = cache_module.load_user_settings("testuser")
assert settings["guide_filter"] == []
assert settings["captions_enabled"] is True
assert settings["watch_history"] == {}
def test_save_and_load_user_settings(self, cache_module):
settings = {"guide_filter": ["cat1", "cat2"], "captions_enabled": False}
cache_module.save_user_settings("testuser", settings)
loaded = cache_module.load_user_settings("testuser")
assert loaded["guide_filter"] == ["cat1", "cat2"]
assert loaded["captions_enabled"] is False
def test_watch_position_save_and_get(self, cache_module):
cache_module.save_watch_position("user1", "http://video.url", 120.5, 3600.0)
entry = cache_module.get_watch_position("user1", "http://video.url")
assert entry is not None
assert entry["position"] == 120.5
assert entry["duration"] == 3600.0
def test_watch_position_resets_at_95_percent(self, cache_module):
# Save at 96% watched
cache_module.save_watch_position("user1", "http://video.url", 960.0, 1000.0)
entry = cache_module.get_watch_position("user1", "http://video.url")
assert entry is None # Should be reset
class TestSource:
def test_source_dataclass(self, cache_module):
source = cache_module.Source(
id="test",
name="Test Source",
type="xtream",
url="http://example.com",
)
assert source.id == "test"
assert source.username == ""
assert source.epg_timeout == 120
assert source.epg_enabled is True
def test_get_sources_empty(self, cache_module):
sources = cache_module.get_sources()
assert sources == []
def test_get_sources_from_settings(self, cache_module):
settings = {
"sources": [
{
"id": "s1",
"name": "Source 1",
"type": "m3u",
"url": "http://example.com/playlist.m3u",
}
]
}
cache_module.save_server_settings(settings)
sources = cache_module.get_sources()
assert len(sources) == 1
assert sources[0].id == "s1"
assert sources[0].type == "m3u"
class TestUpdateSourceEpgUrl:
def test_update_source_epg_url(self, cache_module):
settings = {"sources": [{"id": "s1", "name": "S1", "type": "m3u", "url": "http://x"}]}
cache_module.save_server_settings(settings)
cache_module.update_source_epg_url("s1", "http://epg.example.com")
loaded = cache_module.load_server_settings()
assert loaded["sources"][0]["epg_url"] == "http://epg.example.com"
def test_update_source_epg_url_not_overwrite(self, cache_module):
settings = {
"sources": [
{
"id": "s1",
"name": "S1",
"type": "m3u",
"url": "http://x",
"epg_url": "http://existing",
}
]
}
cache_module.save_server_settings(settings)
cache_module.update_source_epg_url("s1", "http://new")
loaded = cache_module.load_server_settings()
assert loaded["sources"][0]["epg_url"] == "http://existing"
def test_update_source_epg_url_empty_noop(self, cache_module):
settings = {"sources": [{"id": "s1", "name": "S1", "type": "m3u", "url": "http://x"}]}
cache_module.save_server_settings(settings)
cache_module.update_source_epg_url("s1", "")
loaded = cache_module.load_server_settings()
assert "epg_url" not in loaded["sources"][0]
class TestEncoderDetection:
"""Tests for encoder detection functions."""
def test_test_encoder_success(self):
"""Test _test_encoder returns (True, '') on successful command."""
with mock.patch("subprocess.run") as mock_run:
mock_run.return_value = mock.Mock(returncode=0)
ok, err = cache._test_encoder(["echo", "test"])
assert ok is True
assert err == ""
mock_run.assert_called_once()
def test_test_encoder_failure(self):
"""Test _test_encoder returns (False, error) on non-zero return code."""
with mock.patch("subprocess.run") as mock_run:
mock_run.return_value = mock.Mock(returncode=1, stderr=b"encoder not found")
ok, err = cache._test_encoder(["false"])
assert ok is False
assert "encoder not found" in err
def test_test_encoder_timeout(self):
"""Test _test_encoder returns (False, 'timeout') on timeout."""
with mock.patch("subprocess.run") as mock_run:
mock_run.side_effect = subprocess.TimeoutExpired(cmd=["test"], timeout=5)
ok, err = cache._test_encoder(["sleep", "100"], timeout=5)
assert ok is False
assert err == "timeout"
def test_test_encoder_exception(self):
"""Test _test_encoder returns (False, error) on exception."""
with mock.patch("subprocess.run") as mock_run:
mock_run.side_effect = FileNotFoundError("ffmpeg not found")
ok, err = cache._test_encoder(["nonexistent_command"])
assert ok is False
assert err == "ffmpeg not found"
def test_detect_encoders_all_available(self):
"""Test detect_encoders when all hardware is available."""
with (
mock.patch.object(cache, "_test_encoder", return_value=(True, "")),
mock.patch.object(cache, "VAAPI_DEVICE", "/dev/dri/renderD128"),
mock.patch.object(cache, "LIBVA_DRIVER", "i965"),
mock.patch.object(cache, "DRI_PATH", "/usr/lib/x86_64-linux-gnu/dri"),
):
result = cache.detect_encoders()
assert result == {
"nvenc": True,
"amf": True,
"qsv": True,
"vaapi": True,
"vaapi_baseline_only": False,
}
def test_detect_encoders_none_available(self):
"""Test detect_encoders when no hardware is available."""
with mock.patch.object(cache, "_test_encoder", return_value=(False, "not found")):
result = cache.detect_encoders()
assert result == {
"nvenc": False,
"amf": False,
"qsv": False,
"vaapi": False,
}
def test_detect_encoders_partial(self):
"""Test detect_encoders with mixed hardware availability."""
def mock_test(cmd, timeout=5, env=None):
# Return True only for VAAPI
if "h264_vaapi" in cmd:
return True, ""
return False, "not available"
with (
mock.patch.object(cache, "_test_encoder", side_effect=mock_test),
mock.patch.object(cache, "VAAPI_DEVICE", "/dev/dri/renderD128"),
mock.patch.object(cache, "LIBVA_DRIVER", "i965"),
mock.patch.object(cache, "DRI_PATH", "/usr/lib/x86_64-linux-gnu/dri"),
):
result = cache.detect_encoders()
assert result["nvenc"] is False
assert result["amf"] is False
assert result["qsv"] is False
assert result["vaapi"] is True
def test_detect_encoders_nvenc_only(self):
"""Test detect_encoders when only NVENC is available."""
def mock_test(cmd, timeout=5, env=None):
if "h264_nvenc" in cmd:
return True, ""
return False, "not available"
with mock.patch.object(cache, "_test_encoder", side_effect=mock_test):
result = cache.detect_encoders()
assert result["nvenc"] is True
assert result["amf"] is False
assert result["qsv"] is False
assert result["vaapi"] is False
def test_detect_encoders_vaapi_command_structure(self):
"""Test detect_encoders passes correct VAAPI command structure when GPU detected."""
captured_cmds = []
captured_envs = []
def capture_cmd(cmd, timeout=5, env=None):
captured_cmds.append(cmd)
captured_envs.append(env)
return False, "test"
# Mock auto-detected VAAPI device
with (
mock.patch.object(cache, "_test_encoder", side_effect=capture_cmd),
mock.patch.object(cache, "VAAPI_DEVICE", "/dev/dri/renderD128"),
mock.patch.object(cache, "LIBVA_DRIVER", "i965"),
mock.patch.object(cache, "DRI_PATH", "/usr/lib/x86_64-linux-gnu/dri"),
):
cache.detect_encoders()
# Find VAAPI commands (now 2: High profile first, then baseline fallback)
vaapi_cmds = [c for c in captured_cmds if "h264_vaapi" in c]
assert len(vaapi_cmds) == 2
# First command: High profile (default, no -profile:v)
assert "-init_hw_device" in vaapi_cmds[0]
assert "hwupload" in " ".join(vaapi_cmds[0])
assert "constrained_baseline" not in " ".join(vaapi_cmds[0])
# Second command: constrained_baseline fallback
assert "constrained_baseline" in " ".join(vaapi_cmds[1])
def test_detect_encoders_qsv_command_structure(self):
"""Test detect_encoders passes correct QSV command structure."""
captured_cmds = []
def capture_cmd(cmd, timeout=5, env=None):
captured_cmds.append(cmd)
return False, "test"
with mock.patch.object(cache, "_test_encoder", side_effect=capture_cmd):
cache.detect_encoders()
# Find QSV command
qsv_cmd = [c for c in captured_cmds if "h264_qsv" in c][0]
assert "-hwaccel" in qsv_cmd
assert "qsv" in qsv_cmd
assert "-hwaccel_output_format" in qsv_cmd
def test_refresh_encoders_updates_global(self):
"""Test refresh_encoders updates AVAILABLE_ENCODERS."""
original = cache.AVAILABLE_ENCODERS.copy()
with mock.patch.object(
cache,
"detect_encoders",
return_value={"nvenc": True, "amf": True, "qsv": True, "vaapi": True},
):
result = cache.refresh_encoders()
assert cache.AVAILABLE_ENCODERS == {
"nvenc": True,
"amf": True,
"qsv": True,
"vaapi": True,
}
assert result == cache.AVAILABLE_ENCODERS
# Restore original
cache.AVAILABLE_ENCODERS = original
def test_default_encoder_prefers_nvenc_with_vaapi(self):
"""Test _default_encoder prefers NVENC+VAAPI when both available."""
original = cache.AVAILABLE_ENCODERS.copy()
cache.AVAILABLE_ENCODERS = {
"nvenc": True,
"amf": True,
"qsv": True,
"vaapi": True,
}
try:
assert cache._default_encoder() == "nvenc+vaapi"
finally:
cache.AVAILABLE_ENCODERS = original
def test_default_encoder_nvenc_without_vaapi(self):
"""Test _default_encoder uses NVENC+software when VAAPI unavailable."""
original = cache.AVAILABLE_ENCODERS.copy()
cache.AVAILABLE_ENCODERS = {
"nvenc": True,
"amf": False,
"qsv": False,
"vaapi": False,
}
try:
assert cache._default_encoder() == "nvenc+software"
finally:
cache.AVAILABLE_ENCODERS = original
def test_default_encoder_falls_back_to_amf(self):
"""Test _default_encoder falls back to AMF when NVENC unavailable."""
original = cache.AVAILABLE_ENCODERS.copy()
cache.AVAILABLE_ENCODERS = {
"nvenc": False,
"amf": True,
"qsv": True,
"vaapi": True,
}
try:
assert cache._default_encoder() == "amf+vaapi"
finally:
cache.AVAILABLE_ENCODERS = original
def test_default_encoder_falls_back_to_qsv(self):
"""Test _default_encoder falls back to QSV when NVENC/AMF unavailable."""
original = cache.AVAILABLE_ENCODERS.copy()
cache.AVAILABLE_ENCODERS = {
"nvenc": False,
"amf": False,
"qsv": True,
"vaapi": True,
}
try:
assert cache._default_encoder() == "qsv"
finally:
cache.AVAILABLE_ENCODERS = original
def test_default_encoder_falls_back_to_vaapi(self):
"""Test _default_encoder falls back to VAAPI when NVENC/AMF/QSV unavailable."""
original = cache.AVAILABLE_ENCODERS.copy()
cache.AVAILABLE_ENCODERS = {
"nvenc": False,
"amf": False,
"qsv": False,
"vaapi": True,
}
try:
assert cache._default_encoder() == "vaapi"
finally:
cache.AVAILABLE_ENCODERS = original
def test_default_encoder_falls_back_to_software(self):
"""Test _default_encoder falls back to software as last resort."""
original = cache.AVAILABLE_ENCODERS.copy()
cache.AVAILABLE_ENCODERS = {
"nvenc": False,
"amf": False,
"qsv": False,
"vaapi": False,
}
try:
assert cache._default_encoder() == "software"
finally:
cache.AVAILABLE_ENCODERS = original
class TestLogoCache:
"""Tests for logo caching functions."""
def test_sanitize_name_removes_path_traversal(self):
assert ".." not in cache._sanitize_name("../../../etc/passwd")
assert "/" not in cache._sanitize_name("foo/bar")
assert "\\" not in cache._sanitize_name("foo\\bar")
def test_sanitize_name_keeps_safe_chars(self):
assert cache._sanitize_name("my-source_123") == "my-source_123"
assert cache._sanitize_name("Source Name") == "Source Name"
def test_sanitize_name_truncates_long_names(self):
long_name = "a" * 300
result = cache._sanitize_name(long_name)
assert len(result) == 224
def test_sanitize_name_empty_returns_default(self):
assert cache._sanitize_name("") == "default"
assert cache._sanitize_name("!!!") == "default"
def test_url_to_filename_extracts_name(self):
result = cache._url_to_filename("http://example.com/logos/channel1.png")
assert result.startswith("channel1_")
assert len(result) == len("channel1_") + 8 # name + underscore + 8 char hash
def test_url_to_filename_strips_extension(self):
result = cache._url_to_filename("http://example.com/logo.png")
assert not result.endswith(".png")
assert result.startswith("logo_")
def test_url_to_filename_hash_differs_by_url(self):
r1 = cache._url_to_filename("http://example.com/a/logo.png")
r2 = cache._url_to_filename("http://example.com/b/logo.png")
# Same base name but different hashes
assert r1.startswith("logo_")
assert r2.startswith("logo_")
assert r1 != r2
def test_url_to_filename_fallback_to_hash(self):
result = cache._url_to_filename("http://example.com/")
assert len(result) == 8 # Just the hash
def test_save_and_get_cached_logo(self, cache_module, tmp_path):
cache_module.LOGOS_DIR = tmp_path / "logos"
cache_module.LOGOS_DIR.mkdir()
# Save a logo
data = b"\x89PNG\r\n\x1a\n" + b"\x00" * 100 # Fake PNG
path = cache_module.save_logo(
"TestSource", "http://example.com/logo.png", data, "image/png"
)
assert path.exists()
assert path.suffix == ".png"
assert path.read_bytes() == data
# Get cached logo
cached = cache_module.get_cached_logo("TestSource", "http://example.com/logo.png")
assert cached == path
def test_get_cached_logo_returns_none_when_missing(self, cache_module, tmp_path):
cache_module.LOGOS_DIR = tmp_path / "logos"
cache_module.LOGOS_DIR.mkdir()
cached = cache_module.get_cached_logo("NoSource", "http://missing.com/logo.png")
assert cached is None
def test_get_cached_logo_expires(self, cache_module, tmp_path):
import time
cache_module.LOGOS_DIR = tmp_path / "logos"
cache_module.LOGOS_DIR.mkdir()
# Save a logo
data = b"\x89PNG" + b"\x00" * 100
path = cache_module.save_logo("TestSource", "http://example.com/old.png", data, "image/png")
# Backdate the file
old_time = time.time() - cache_module.LOGO_CACHE_TTL - 100
import os
os.utime(path, (old_time, old_time))
# Should be expired
cached = cache_module.get_cached_logo("TestSource", "http://example.com/old.png")
assert cached is None
assert not path.exists() # Should be deleted
def test_save_logo_content_type_mapping(self, cache_module, tmp_path):
cache_module.LOGOS_DIR = tmp_path / "logos"
cache_module.LOGOS_DIR.mkdir()
data = b"test"
assert cache_module.save_logo("s", "http://a.com/1", data, "image/jpeg").suffix == ".jpg"
assert cache_module.save_logo("s", "http://a.com/2", data, "image/gif").suffix == ".gif"
assert cache_module.save_logo("s", "http://a.com/3", data, "image/webp").suffix == ".webp"
assert cache_module.save_logo("s", "http://a.com/4", data, "image/svg+xml").suffix == ".svg"
assert cache_module.save_logo("s", "http://a.com/5", data, "unknown/type").suffix == ".png"
if __name__ == "__main__":
from testing import run_tests
run_tests(__file__)
================================================
FILE: docker-compose.yml
================================================
# Docker Compose for neTV
#
# Build:
# docker compose build # Default (optimized FFmpeg)
# FFMPEG_IMAGE=ubuntu:24.04 docker compose build # Alternative (apt FFmpeg)
#
# Run:
# docker compose up -d # Auto-detects hardware (Intel/AMD)
# docker compose --profile nvidia up -d # NVIDIA GPU (driver 580+, Turing+)
#
# NVIDIA with older drivers/GPUs (see README for driver/compute compatibility):
# FFMPEG_IMAGE=ghcr.io/jvdillon/netv-ffmpeg:cuda12.8 docker compose --profile nvidia up -d
#
# Local CUDA 12.4 FFmpeg build (from Dockerfile.ffmpeg):
# docker build --progress plain --build-arg NVIDIA=cuda:12.4 --build-arg FFMPEG_BASE_IMAGE=ubuntu:22.04 -f Dockerfile.ffmpeg -t netv-ffmpeg:cuda12.4 .
# FFMPEG_IMAGE=netv-ffmpeg:cuda12.4 docker compose --profile nvidia build
# FFMPEG_IMAGE=netv-ffmpeg:cuda12.4 docker compose --profile nvidia up -d
#
# No GPU or /dev/dri? Comment out the 'devices' section below.
services:
netv:
build:
context: .
args:
FFMPEG_IMAGE: ${FFMPEG_IMAGE:-ghcr.io/jvdillon/netv-ffmpeg:latest}
image: netv
container_name: netv
ports:
- "${NETV_PORT:-8000}:8000"
environment:
- NETV_PORT=8000
- NETV_HTTPS=${NETV_HTTPS:-}
- LOG_LEVEL=INFO # DEBUG for verbose logging
volumes:
- ./cache:/app/cache
- /etc/localtime:/etc/localtime:ro # Use host timezone for EPG
# For HTTPS, also mount your certificates:
# - /etc/letsencrypt:/etc/letsencrypt:ro
# Use system RAM instead of using local storage to store transcodes
# - /dev/shm:/tmp
restart: unless-stopped
# Hardware acceleration (Intel/AMD) - comment out if no /dev/dri
devices:
- /dev/dri:/dev/dri
# NVIDIA GPU: docker compose --profile nvidia up -d
netv-nvidia:
extends:
service: netv
container_name: netv
profiles:
- nvidia
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
================================================
FILE: entrypoint-ai_upscale.sh
================================================
#!/bin/sh
set -e
# Entrypoint for AI Upscale image
#
# Same as base entrypoint, plus:
# - Auto-builds TensorRT engines on first start if missing
# Fix cache directory ownership
mkdir -p /app/cache
if [ "$(stat -c '%U' /app/cache)" != "netv" ]; then
chown -R netv:netv /app/cache 2>/dev/null || true
fi
# Ensure writable even on filesystems that ignore chown (e.g., some NAS mounts)
if ! gosu netv sh -c "touch /app/cache/.perm_test && rm /app/cache/.perm_test" 2>/dev/null; then
chmod -R u+rwX,g+rwX /app/cache 2>/dev/null || true
chmod g+s /app/cache 2>/dev/null || true
fi
# Final verification - warn if still not writable
if ! gosu netv sh -c "touch /app/cache/.perm_test && rm /app/cache/.perm_test" 2>/dev/null; then
echo "WARNING: /app/cache is not writable by netv user"
echo "Cache operations may fail. Check volume permissions."
fi
# Fix models directory ownership
mkdir -p /models
if [ "$(stat -c '%U' /models)" != "netv" ]; then
chown -R netv:netv /models 2>/dev/null || true
fi
if ! gosu netv sh -c "touch /models/.perm_test && rm /models/.perm_test" 2>/dev/null; then
chmod -R u+rwX,g+rwX /models 2>/dev/null || true
fi
if ! gosu netv sh -c "touch /models/.perm_test && rm /models/.perm_test" 2>/dev/null; then
echo "WARNING: /models is not writable by netv user"
echo "TensorRT engine caching may fail. Check volume permissions."
fi
# Add netv user to render device group (for VAAPI hardware encoding)
if [ -e /dev/dri/renderD128 ]; then
RENDER_GID=$(stat -c '%g' /dev/dri/renderD128)
RENDER_ADDED=false
if groupadd --gid "$RENDER_GID" hostrender 2>/dev/null; then
: # Created new group
fi
if usermod -aG hostrender netv 2>/dev/null; then
RENDER_ADDED=true
fi
if [ "$RENDER_ADDED" = "false" ]; then
echo "WARNING: Could not add netv to render group (GID $RENDER_GID)"
if [ "$RENDER_GID" = "65534" ]; then
echo " GID 65534 (nogroup) indicates Docker user namespace mapping issue."
echo " This is usually harmless - VAAPI may still work if container has device access."
echo " To fix: ensure 'render' group exists on host and user is in it, or use --privileged"
else
echo " VAAPI hardware encoding may not be available."
echo " To fix on host: sudo usermod -aG render \$USER (then restart Docker)"
fi
fi
fi
# Build TensorRT engines if missing (first run only)
# Builds both recommended models: 4x-compact (quality) and 2x-liveaction-span (fast)
if ! ls /models/4x-compact_*p_fp16.engine >/dev/null 2>&1; then
echo "========================================"
echo "AI Upscale: First start detected"
echo "========================================"
echo "Building TensorRT engines for your GPU..."
echo "Models: 4x-compact (quality), 2x-liveaction-span (fast)"
echo "This only happens once (cached in /models volume)."
echo ""
# Run as netv user so files have correct ownership
if ! gosu netv env MODEL_DIR=/models MODEL="recommended" /app/tools/install-ai_upscale.sh; then
echo "ERROR: Failed to build TensorRT engines"
echo "Check GPU compatibility and CUDA installation"
exit 1
fi
# Verify engines were created
if ! ls /models/4x-compact_*p_fp16.engine >/dev/null 2>&1; then
echo "ERROR: TensorRT engines not found after build"
echo "Build may have succeeded but produced no output"
exit 1
fi
fi
# Drop to netv user and run the app
exec gosu netv python3 main.py --port "${NETV_PORT:-8000}" ${NETV_HTTPS:+--https}
================================================
FILE: entrypoint.sh
================================================
#!/bin/sh
set -e
# Entrypoint: fix permissions and drop to netv user
#
# Handles two common Docker issues:
# 1. Bind-mounted ./cache owned by host user (permission denied)
# 2. /dev/dri/renderD128 GID mismatch (VAAPI unavailable)
# Fix cache directory ownership (skip if already correct to avoid slow recursive chown)
# Build/runtime note: this only applies to bind-mounted cache (e.g., NAS),
# not to image layers, so it does not affect build reproducibility.
mkdir -p /app/cache
if [ "$(stat -c '%U' /app/cache)" != "netv" ]; then
chown -R netv:netv /app/cache 2>/dev/null || true
fi
# Ensure writable even on filesystems that ignore chown (e.g., some NAS mounts)
if ! gosu netv sh -c "touch /app/cache/.perm_test && rm /app/cache/.perm_test" 2>/dev/null; then
chmod -R u+rwX,g+rwX /app/cache 2>/dev/null || true
chmod g+s /app/cache 2>/dev/null || true
fi
# Final verification - warn if still not writable
if ! gosu netv sh -c "touch /app/cache/.perm_test && rm /app/cache/.perm_test" 2>/dev/null; then
echo "WARNING: /app/cache is not writable by netv user"
echo "Cache operations may fail. Check volume permissions."
fi
mkdir -p /app/cache/users
if [ "$(stat -c '%U' /app/cache/users)" != "netv" ]; then
chown -R netv:netv /app/cache/users 2>/dev/null || true
fi
# Ensure writable even on filesystems that ignore chown (e.g., some NAS mounts)
if ! gosu netv sh -c "touch /app/cache/users/.perm_test && rm /app/cache/users/.perm_test" 2>/dev/null; then
chmod -R u+rwX,g+rwX /app/cache/users 2>/dev/null || true
chmod g+s /app/cache/users 2>/dev/null || true
fi
# Final verification - warn if still not writable
if ! gosu netv sh -c "touch /app/cache/users/.perm_test && rm /app/cache/users/.perm_test" 2>/dev/null; then
echo "WARNING: /app/cache/users is not writable by netv user"
echo "Cache operations may fail. Check volume permissions."
fi
# Add netv user to render device group (for VAAPI hardware encoding)
if [ -e /dev/dri/renderD128 ]; then
RENDER_GID=$(stat -c '%g' /dev/dri/renderD128)
RENDER_ADDED=false
if groupadd --gid "$RENDER_GID" hostrender 2>/dev/null; then
: # Created new group
fi
if usermod -aG hostrender netv 2>/dev/null; then
RENDER_ADDED=true
fi
if [ "$RENDER_ADDED" = "false" ]; then
echo "WARNING: Could not add netv to render group (GID $RENDER_GID)"
if [ "$RENDER_GID" = "65534" ]; then
echo " GID 65534 (nogroup) indicates Docker user namespace mapping issue."
echo " This is usually harmless - VAAPI may still work if container has device access."
echo " To fix: ensure 'render' group exists on host and user is in it, or use --privileged"
else
echo " VAAPI hardware encoding may not be available."
echo " To fix on host: sudo usermod -aG render \$USER (then restart Docker)"
fi
fi
fi
# Drop to netv user and run the app
exec gosu netv python3 main.py --port "${NETV_PORT:-8000}" ${NETV_HTTPS:+--https}
================================================
FILE: epg.py
================================================
"""EPG storage and XMLTV parsing."""
from __future__ import annotations
from dataclasses import dataclass
from datetime import UTC, datetime, timedelta, timezone
from pathlib import Path
import contextlib
import gzip
import logging
import re
import sqlite3
import threading
import time
import defusedxml.ElementTree as ET # Safe XML parsing
from util import safe_urlopen
log = logging.getLogger(__name__)
# =============================================================================
# Data Types
# =============================================================================
@dataclass(slots=True)
class Program:
channel_id: str
title: str
start: datetime
stop: datetime
desc: str = ""
source_id: str = ""
# =============================================================================
# SQLite Storage
# =============================================================================
_DB_PATH: Path | None = None
_local = threading.local()
def init(cache_dir: Path) -> None:
"""Initialize EPG database."""
global _DB_PATH
_DB_PATH = cache_dir / "epg.db"
conn = _get_conn()
conn.executescript("""
CREATE TABLE IF NOT EXISTS channels (
id TEXT PRIMARY KEY,
name TEXT,
source_id TEXT
);
CREATE TABLE IF NOT EXISTS icons (
channel_id TEXT PRIMARY KEY,
url TEXT
);
CREATE TABLE IF NOT EXISTS programs (
id INTEGER PRIMARY KEY,
channel_id TEXT,
title TEXT,
start_ts REAL,
stop_ts REAL,
desc TEXT,
source_id TEXT
);
CREATE INDEX IF NOT EXISTS idx_programs_channel_time
ON programs(channel_id, start_ts, stop_ts);
CREATE INDEX IF NOT EXISTS idx_programs_time
ON programs(start_ts);
""")
conn.commit()
def _get_conn() -> sqlite3.Connection:
"""Get thread-local database connection."""
if not hasattr(_local, "conn") or _local.conn is None:
if _DB_PATH is None:
raise RuntimeError("EPG database not initialized")
_local.conn = sqlite3.connect(_DB_PATH, timeout=30.0)
_local.conn.row_factory = sqlite3.Row
_local.conn.execute("PRAGMA journal_mode=WAL")
return _local.conn
def clear() -> None:
"""Clear all EPG data."""
conn = _get_conn()
conn.executescript("DELETE FROM programs; DELETE FROM channels; DELETE FROM icons;")
conn.commit()
def clear_source(source_id: str) -> None:
"""Clear EPG data for a specific source."""
conn = _get_conn()
conn.execute("DELETE FROM programs WHERE source_id = ?", (source_id,))
conn.execute("DELETE FROM channels WHERE source_id = ?", (source_id,))
conn.commit()
def insert_channel(channel_id: str, name: str, source_id: str) -> None:
"""Insert or update a channel."""
conn = _get_conn()
conn.execute(
"INSERT OR REPLACE INTO channels (id, name, source_id) VALUES (?, ?, ?)",
(channel_id, name, source_id),
)
def insert_icon(channel_id: str, url: str) -> None:
"""Insert or update a channel icon."""
conn = _get_conn()
conn.execute(
"INSERT OR REPLACE INTO icons (channel_id, url) VALUES (?, ?)",
(channel_id, url),
)
def insert_programs(programs: list[tuple[str, str, float, float, str, str]]) -> None:
"""Bulk insert programs. Each tuple: (channel_id, title, start_ts, stop_ts, desc, source_id)."""
conn = _get_conn()
conn.executemany(
"INSERT INTO programs (channel_id, title, start_ts, stop_ts, desc, source_id) VALUES (?, ?, ?, ?, ?, ?)",
programs,
)
def commit() -> None:
"""Commit current transaction."""
_get_conn().commit()
def get_icon(channel_id: str) -> str:
"""Get icon URL for a channel."""
conn = _get_conn()
row = conn.execute("SELECT url FROM icons WHERE channel_id = ?", (channel_id,)).fetchone()
return row["url"] if row else ""
def get_programs_in_range(
channel_id: str,
start: datetime,
end: datetime,
preferred_source_id: str = "",
) -> list[Program]:
"""Get programs for a channel within a time range."""
conn = _get_conn()
start_ts = start.timestamp()
end_ts = end.timestamp()
rows = conn.execute(
"""
SELECT channel_id, title, start_ts, stop_ts, desc, source_id
FROM programs
WHERE channel_id = ? AND stop_ts > ? AND start_ts < ?
ORDER BY start_ts
""",
(channel_id, start_ts, end_ts),
).fetchall()
programs = [
Program(
channel_id=row["channel_id"],
title=row["title"],
start=datetime.fromtimestamp(row["start_ts"], tz=UTC),
stop=datetime.fromtimestamp(row["stop_ts"], tz=UTC),
desc=row["desc"] or "",
source_id=row["source_id"] or "",
)
for row in rows
]
if not preferred_source_id or len(programs) <= 1:
return programs
# Deduplicate overlapping programs, preferring the preferred source
result: list[Program] = []
for p in programs:
dominated = False
for i, existing in enumerate(result):
if p.start < existing.stop and p.stop > existing.start:
if p.source_id == preferred_source_id and existing.source_id != preferred_source_id:
result[i] = p
dominated = True
break
if not dominated:
result.append(p)
return sorted(result, key=lambda p: p.start)
_MAX_IN_CLAUSE = 500 # SQLite limit is 999, stay well below
def _dedupe_programs(programs: list[Program], preferred_source_id: str) -> list[Program]:
"""Deduplicate overlapping programs, preferring the preferred source."""
if not preferred_source_id or len(programs) <= 1:
return programs
result: list[Program] = []
for p in programs:
dominated = False
for i, existing in enumerate(result):
# Check for overlap
if p.start < existing.stop and p.stop > existing.start:
# Prefer the preferred source
if p.source_id == preferred_source_id and existing.source_id != preferred_source_id:
result[i] = p
dominated = True
break
if not dominated:
result.append(p)
return sorted(result, key=lambda p: p.start)
def get_programs_batch(
channel_ids: list[str],
start: datetime,
end: datetime,
preferred_sources: dict[str, str] | None = None,
) -> dict[str, list[Program]]:
"""Get programs for multiple channels in a single query.
Args:
channel_ids: List of EPG channel IDs to query
start: Start of time window
end: End of time window
preferred_sources: Optional dict mapping channel_id -> preferred source_id
for deduplication. If provided, overlapping programs from the preferred
source will be kept over programs from other sources.
"""
if not channel_ids:
return {}
conn = _get_conn()
start_ts = start.timestamp()
end_ts = end.timestamp()
result: dict[str, list[Program]] = {ch: [] for ch in channel_ids}
# Process in chunks to avoid huge IN clauses
for i in range(0, len(channel_ids), _MAX_IN_CLAUSE):
chunk = channel_ids[i : i + _MAX_IN_CLAUSE]
placeholders = ",".join("?" * len(chunk))
rows = conn.execute(
f"""
SELECT channel_id, title, start_ts, stop_ts, desc, source_id
FROM programs
WHERE channel_id IN ({placeholders}) AND stop_ts > ? AND start_ts < ?
ORDER BY channel_id, start_ts
""",
[*chunk, start_ts, end_ts],
).fetchall()
for row in rows:
result[row["channel_id"]].append(
Program(
channel_id=row["channel_id"],
title=row["title"],
start=datetime.fromtimestamp(row["start_ts"], tz=UTC),
stop=datetime.fromtimestamp(row["stop_ts"], tz=UTC),
desc=row["desc"] or "",
source_id=row["source_id"] or "",
)
)
# Deduplicate overlapping programs if preferred_sources provided
if preferred_sources:
for ch_id in result:
if ch_id in preferred_sources and result[ch_id]:
result[ch_id] = _dedupe_programs(result[ch_id], preferred_sources[ch_id])
channels_with_programs = sum(1 for progs in result.values() if progs)
log.debug(
"EPG batch query: requested %d channel IDs, found programs for %d",
len(channel_ids),
channels_with_programs,
)
return result
def get_icons_batch(channel_ids: list[str]) -> dict[str, str]:
"""Get icons for multiple channels in a single query."""
if not channel_ids:
return {}
conn = _get_conn()
result: dict[str, str] = {}
for i in range(0, len(channel_ids), _MAX_IN_CLAUSE):
chunk = channel_ids[i : i + _MAX_IN_CLAUSE]
placeholders = ",".join("?" * len(chunk))
rows = conn.execute(
f"SELECT channel_id, url FROM icons WHERE channel_id IN ({placeholders})",
chunk,
).fetchall()
for row in rows:
result[row["channel_id"]] = row["url"]
return result
def has_programs() -> bool:
"""Check if there are any programs in the database."""
conn = _get_conn()
row = conn.execute("SELECT 1 FROM programs LIMIT 1").fetchone()
return row is not None
def get_program_count() -> int:
"""Get total program count."""
conn = _get_conn()
row = conn.execute("SELECT COUNT(*) FROM programs").fetchone()
return row[0] if row else 0
def get_channel_count() -> int:
"""Get total channel count."""
conn = _get_conn()
row = conn.execute("SELECT COUNT(*) FROM channels").fetchone()
return row[0] if row else 0
def prune_old_programs(before: datetime) -> int:
"""Delete programs that ended before the given time. Returns count deleted."""
conn = _get_conn()
cursor = conn.execute("DELETE FROM programs WHERE stop_ts < ?", (before.timestamp(),))
conn.commit()
return cursor.rowcount
# =============================================================================
# XMLTV Parsing
# =============================================================================
def _parse_epg_time(s: str) -> datetime:
"""Parse XMLTV time format: 20241130120000 +0000 or 20241130120000+0530."""
s = s.replace(" ", "")
if len(s) >= 14:
dt = datetime.strptime(s[:14], "%Y%m%d%H%M%S")
if len(s) > 14:
tz_str = s[14:]
sign = -1 if tz_str[0] == "-" else 1
tz_hours = int(tz_str[1:3]) if len(tz_str) >= 3 else 0
tz_mins = int(tz_str[3:5]) if len(tz_str) >= 5 else 0
offset = timedelta(hours=tz_hours, minutes=tz_mins)
dt = dt.replace(tzinfo=timezone(sign * offset))
return dt
return datetime.now(UTC)
def _sanitize_epg_xml(xml_str: str) -> str:
"""Try to fix corrupted EPG XML by extracting valid elements."""
channels = re.findall(r"<channel\s+[^>]*>.*?</channel>", xml_str, re.DOTALL)
programmes = re.findall(
r'<programme\s+start="[^"<>]+"\s+stop="[^"<>]+"\s+channel="[^"<>]+"[^>]*>.*?</programme>',
xml_str,
re.DOTALL,
)
log.info("Sanitized EPG: extracted %d channels, %d programmes", len(channels), len(programmes))
return '<?xml version="1.0"?>\n<tv>\n' + "\n".join(channels) + "\n".join(programmes) + "\n</tv>"
def fetch_epg(
epg_url: str,
cache_dir: Path,
timeout: int = 120,
source_id: str = "",
user_agent: str | None = None,
) -> int:
"""Fetch and parse XMLTV EPG data directly into sqlite.
Args:
epg_url: URL of the XMLTV EPG feed
cache_dir: Directory for debug files if parsing fails
timeout: Request timeout in seconds
source_id: Source identifier for multi-source support
user_agent: User-Agent header to send. If None, uses default.
Returns:
Number of programs inserted.
"""
with safe_urlopen(epg_url, timeout=timeout, user_agent=user_agent) as resp:
content = resp.read()
with contextlib.suppress(Exception):
content = gzip.decompress(content)
xml_str = content.decode("utf-8")
try:
root = ET.fromstring(xml_str)
except ET.ParseError as e:
debug_file = cache_dir / f"epg_debug_{int(time.time())}.xml"
debug_file.write_text(xml_str)
log.warning("EPG parse failed (%s), attempting sanitization...", e)
try:
sanitized = _sanitize_epg_xml(xml_str)
root = ET.fromstring(sanitized)
log.info("Sanitized EPG parsed successfully")
except ET.ParseError as e2:
log.error("Sanitized EPG also failed: %s", e2)
raise
# Parse channels directly into sqlite
channel_ids: set[str] = set()
for ch in root.findall("channel"):
ch_id = ch.get("id", "")
channel_ids.add(ch_id)
name_el = ch.find("display-name")
name = name_el.text if name_el is not None and name_el.text else ch_id
insert_channel(ch_id, name, source_id)
icon_el = ch.find("icon")
if icon_el is not None:
insert_icon(ch_id, icon_el.get("src", ""))
# Parse programs in batches
batch: list[tuple[str, str, float, float, str, str]] = []
batch_size = 10000
program_count = 0
program_channel_ids: set[str] = set()
for prog in root.findall("programme"):
ch_id = prog.get("channel", "")
program_channel_ids.add(ch_id)
start_str = prog.get("start", "")
stop_str = prog.get("stop", "")
title_el = prog.find("title")
title = title_el.text if title_el is not None and title_el.text else "Unknown"
desc_el = prog.find("desc")
desc = desc_el.text if desc_el is not None and desc_el.text else ""
try:
start = _parse_epg_time(start_str)
stop = _parse_epg_time(stop_str)
except Exception:
continue
batch.append((ch_id, title, start.timestamp(), stop.timestamp(), desc, source_id))
program_count += 1
if len(batch) >= batch_size:
insert_programs(batch)
batch.clear()
if batch:
insert_programs(batch)
commit()
log.debug(
"EPG parsed: %d channels, %d unique program channel IDs, %d programs",
len(channel_ids),
len(program_channel_ids),
program_count,
)
return program_count
================================================
FILE: epg_test.py
================================================
"""Tests for epg.py - EPG storage and parsing."""
from __future__ import annotations
from datetime import UTC, datetime, timedelta
from pathlib import Path
import pytest
from epg import Program
import epg
@pytest.fixture
def db(tmp_path: Path):
"""Initialize EPG database in temp directory."""
epg.init(tmp_path)
yield epg
# Clear thread-local connection
if hasattr(epg._local, "conn"):
epg._local.conn.close()
epg._local.conn = None
class TestInit:
"""Tests for database initialization."""
def test_init_creates_tables(self, db):
conn = db._get_conn()
tables = conn.execute("SELECT name FROM sqlite_master WHERE type='table'").fetchall()
table_names = {t["name"] for t in tables}
assert "channels" in table_names
assert "icons" in table_names
assert "programs" in table_names
class TestChannels:
"""Tests for channel operations."""
def test_insert_channel(self, db):
db.insert_channel("ch1", "Channel One", "src1")
db.commit()
conn = db._get_conn()
row = conn.execute("SELECT * FROM channels WHERE id = ?", ("ch1",)).fetchone()
assert row["name"] == "Channel One"
assert row["source_id"] == "src1"
def test_insert_channel_upsert(self, db):
db.insert_channel("ch1", "Old Name", "src1")
db.insert_channel("ch1", "New Name", "src1")
db.commit()
conn = db._get_conn()
rows = conn.execute("SELECT * FROM channels WHERE id = ?", ("ch1",)).fetchall()
assert len(rows) == 1
assert rows[0]["name"] == "New Name"
class TestIcons:
"""Tests for icon operations."""
def test_insert_icon(self, db):
db.insert_icon("ch1", "http://example.com/icon.png")
db.commit()
result = db.get_icon("ch1")
assert result == "http://example.com/icon.png"
def test_get_icon_not_found(self, db):
result = db.get_icon("nonexistent")
assert result == ""
def test_get_icons_batch(self, db):
db.insert_icon("ch1", "http://example.com/1.png")
db.insert_icon("ch2", "http://example.com/2.png")
db.insert_icon("ch3", "http://example.com/3.png")
db.commit()
result = db.get_icons_batch(["ch1", "ch3"])
assert result == {
"ch1": "http://example.com/1.png",
"ch3": "http://example.com/3.png",
}
def test_get_icons_batch_empty(self, db):
result = db.get_icons_batch([])
assert result == {}
class TestPrograms:
"""Tests for program operations."""
def test_insert_programs(self, db):
now = datetime.now(UTC)
programs = [
(
"ch1",
"Show 1",
now.timestamp(),
(now + timedelta(hours=1)).timestamp(),
"Desc 1",
"src1",
),
(
"ch1",
"Show 2",
(now + timedelta(hours=1)).timestamp(),
(now + timedelta(hours=2)).timestamp(),
"Desc 2",
"src1",
),
]
db.insert_programs(programs)
db.commit()
count = db.get_program_count()
assert count == 2
def test_get_programs_in_range(self, db):
now = datetime.now(UTC).replace(minute=0, second=0, microsecond=0)
programs = [
(
"ch1",
"Show 1",
now.timestamp(),
(now + timedelta(hours=1)).timestamp(),
"Desc 1",
"src1",
),
(
"ch1",
"Show 2",
(now + timedelta(hours=1)).timestamp(),
(now + timedelta(hours=2)).timestamp(),
"Desc 2",
"src1",
),
(
"ch1",
"Show 3",
(now + timedelta(hours=2)).timestamp(),
(now + timedelta(hours=3)).timestamp(),
"Desc 3",
"src1",
),
]
db.insert_programs(programs)
db.commit()
# Query for middle hour
result = db.get_programs_in_range(
"ch1",
now + timedelta(minutes=30),
now + timedelta(hours=1, minutes=30),
)
assert len(result) == 2
assert result[0].title == "Show 1"
assert result[1].title == "Show 2"
def test_get_programs_in_range_empty(self, db):
now = datetime.now(UTC)
result = db.get_programs_in_range("ch1", now, now + timedelta(hours=1))
assert result == []
def test_get_programs_batch(self, db):
now = datetime.now(UTC).replace(minute=0, second=0, microsecond=0)
programs = [
("ch1", "Show A", now.timestamp(), (now + timedelta(hours=1)).timestamp(), "", "src1"),
("ch2", "Show B", now.timestamp(), (now + timedelta(hours=1)).timestamp(), "", "src1"),
]
db.insert_programs(programs)
db.commit()
result = db.get_programs_batch(
["ch1", "ch2", "ch3"],
now,
now + timedelta(hours=1),
)
assert len(result["ch1"]) == 1
assert len(result["ch2"]) == 1
assert len(result["ch3"]) == 0
assert result["ch1"][0].title == "Show A"
assert result["ch2"][0].title == "Show B"
def test_get_programs_batch_empty_channels(self, db):
result = db.get_programs_batch([], datetime.now(UTC), datetime.now(UTC))
assert result == {}
def test_has_programs_false(self, db):
assert db.has_programs() is False
def test_has_programs_true(self, db):
now = datetime.now(UTC)
db.insert_programs(
[("ch1", "Show", now.timestamp(), (now + timedelta(hours=1)).timestamp(), "", "src1")]
)
db.commit()
assert db.has_programs() is True
def test_get_program_count(self, db):
now = datetime.now(UTC)
assert db.get_program_count() == 0
db.insert_programs(
[
(
"ch1",
"Show 1",
now.timestamp(),
(now + timedelta(hours=1)).timestamp(),
"",
"src1",
),
(
"ch1",
"Show 2",
(now + timedelta(hours=1)).timestamp(),
(now + timedelta(hours=2)).timestamp(),
"",
"src1",
),
]
)
db.commit()
assert db.get_program_count() == 2
def test_get_channel_count(self, db):
assert db.get_channel_count() == 0
db.insert_channel("ch1", "Channel 1", "src1")
db.insert_channel("ch2", "Channel 2", "src1")
db.commit()
assert db.get_channel_count() == 2
class TestClear:
"""Tests for clear operations."""
def test_clear_all(self, db):
now = datetime.now(UTC)
db.insert_channel("ch1", "Channel 1", "src1")
db.insert_icon("ch1", "http://example.com/icon.png")
db.insert_programs(
[("ch1", "Show", now.timestamp(), (now + timedelta(hours=1)).timestamp(), "", "src1")]
)
db.commit()
db.clear()
assert db.get_channel_count() == 0
assert db.get_program_count() == 0
assert db.get_icon("ch1") == ""
def test_clear_source(self, db):
now = datetime.now(UTC)
db.insert_channel("ch1", "Channel 1", "src1")
db.insert_channel("ch2", "Channel 2", "src2")
db.insert_programs(
[
(
"ch1",
"Show 1",
now.timestamp(),
(now + timedelta(hours=1)).timestamp(),
"",
"src1",
),
(
"ch2",
"Show 2",
now.timestamp(),
(now + timedelta(hours=1)).timestamp(),
"",
"src2",
),
]
)
db.commit()
db.clear_source("src1")
assert db.get_channel_count() == 1
assert db.get_program_count() == 1
class TestPrune:
"""Tests for prune operations."""
def test_prune_old_programs(self, db):
now = datetime.now(UTC)
old = now - timedelta(days=2)
db.insert_programs(
[
(
"ch1",
"Old Show",
old.timestamp(),
(old + timedelta(hours=1)).timestamp(),
"",
"src1",
),
(
"ch1",
"New Show",
now.timestamp(),
(now + timedelta(hours=1)).timestamp(),
"",
"src1",
),
]
)
db.commit()
deleted = db.prune_old_programs(now - timedelta(days=1))
assert deleted == 1
assert db.get_program_count() == 1
class TestPreferredSource:
"""Tests for preferred source deduplication."""
def test_prefer_source_in_range(self, db):
now = datetime.now(UTC).replace(minute=0, second=0, microsecond=0)
# Two overlapping programs from different sources
db.insert_programs(
[
(
"ch1",
"From Src1",
now.timestamp(),
(now + timedelta(hours=1)).timestamp(),
"",
"src1",
),
(
"ch1",
"From Src2",
now.timestamp(),
(now + timedelta(hours=1)).timestamp(),
"",
"src2",
),
]
)
db.commit()
# Prefer src2
result = db.get_programs_in_range(
"ch1", now, now + timedelta(hours=1), preferred_source_id="src2"
)
assert len(result) == 1
assert result[0].title == "From Src2"
# Prefer src1
result = db.get_programs_in_range(
"ch1", now, now + timedelta(hours=1), preferred_source_id="src1"
)
assert len(result) == 1
assert result[0].title == "From Src1"
class TestProgram:
"""Tests for Program dataclass."""
def test_program_dataclass(self):
now = datetime.now(UTC)
p = Program(
channel_id="ch1",
title="Test Show",
start=now,
stop=now + timedelta(hours=1),
desc="Description",
source_id="src1",
)
assert p.channel_id == "ch1"
assert p.title == "Test Show"
assert p.desc == "Description"
assert p.source_id == "src1"
def test_program_defaults(self):
now = datetime.now(UTC)
p = Program(channel_id="ch1", title="Test", start=now, stop=now + timedelta(hours=1))
assert p.desc == ""
assert p.source_id == ""
if __name__ == "__main__":
from testing import run_tests
run_tests(__file__)
================================================
FILE: ffmpeg_command.py
================================================
"""FFmpeg command building and media probing."""
from __future__ import annotations
from collections.abc import Callable
from contextlib import suppress
from dataclasses import dataclass
from typing import Any, Literal
import json
import logging
import pathlib
import re
import subprocess
import tempfile
import threading
import time
# Import VAAPI auto-detection results (avoid circular import by importing constants only)
from cache import AVAILABLE_ENCODERS, VAAPI_DEVICE
log = logging.getLogger(__name__)
HwAccel = Literal[
"nvenc+vaapi", "nvenc+software", "amf+vaapi", "amf+software", "qsv", "vaapi", "software"
]
def _parse_hw(hw: HwAccel) -> tuple[str, str]:
"""Parse hw into (encoder, fallback). e.g. 'nvenc+vaapi' -> ('nvenc', 'vaapi')"""
if "+" in hw:
encoder, fallback = hw.split("+", 1)
return encoder, fallback
return hw, "software" # standalone options fallback to software
# Timing constants
_HLS_SEGMENT_DURATION_SEC = 3.0 # Short segments for faster startup/seeking
_PROBE_CACHE_TTL_SEC = 3_600
_SERIES_PROBE_CACHE_TTL_SEC = 7 * 24 * 3_600 # 7 days
_PROBE_TIMEOUT_SEC = 30
# Segment file naming
SEG_PREFIX = "seg" # Segment files are named seg000.ts, seg001.ts, etc.
DEFAULT_LIVE_BUFFER_SECS = 30.0 # Default live buffer when DVR disabled
TEXT_SUBTITLE_CODECS = {
"subrip",
"ass",
"ssa",
"mov_text",
"webvtt",
"srt",
}
# User-Agent presets
_USER_AGENT_PRESETS = {
"vlc": "VLC/3.0.20 LibVLC/3.0.20",
"chrome": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
"tivimate": "TiviMate/4.7.0",
}
# NVDEC capabilities by minimum compute capability
# https://developer.nvidia.com/video-encode-and-decode-gpu-support-matrix-new
_NVDEC_MIN_COMPUTE: dict[str, float] = {
"h264": 5.0, # Maxwell+
"hevc": 6.0, # Pascal+ (HEVC 10-bit requires Pascal; Maxwell GM206 is edge case we ignore)
"av1": 8.0, # Ampere+
}
# VAAPI/QSV: static conservative lists (unlike NVIDIA, no clean runtime probe available).
# Could parse `vainfo` output, but format varies by driver (i965 vs iHD vs radeonsi).
# These codecs are nearly universal on any GPU from the last decade.
_VAAPI_SAFE_CODECS = {"h264", "hevc", "mpeg2video", "vp8", "vp9", "vc1", "av1"}
_QSV_SAFE_CODECS = {"h264", "hevc", "mpeg2video", "vp9", "vc1", "av1"}
# Max resolution height by setting
_MAX_RES_HEIGHT: dict[str, int] = {
"4k": 2160,
"1080p": 1080,
"720p": 720,
"480p": 480,
}
# Quality presets -> QP/CRF values (lower = higher quality)
_QUALITY_QP: dict[str, int] = {"high": 20, "medium": 28, "low": 35}
_QUALITY_CRF: dict[str, int] = {"high": 20, "medium": 26, "low": 32}
# Module state
_probe_lock = threading.Lock()
_probe_cache: dict[str, tuple[float, MediaInfo | None, list[SubtitleStream]]] = {}
_series_probe_cache: dict[int, dict[str, Any]] = {}
_gpu_nvdec_codecs: set[str] | None = None # None = not probed yet
_has_libplacebo: bool | None = None # None = not probed yet
_load_settings: Callable[[], dict[str, Any]] = dict
# Super-resolution configuration (set by init())
# Directory containing TensorRT engines: {model}_{height}p_fp16.engine
_sr_engine_dir: str = ""
# Use old "cache" if it exists (backwards compat), otherwise ".cache"
_OLD_CACHE = pathlib.Path(__file__).parent / "cache"
_CACHE_DIR = _OLD_CACHE if _OLD_CACHE.exists() else pathlib.Path(__file__).parent / ".cache"
_SERIES_PROBE_CACHE_FILE = _CACHE_DIR / "series_probe_cache.json"
_LANG_NAMES = {
"eng": "English",
"spa": "Spanish",
"fre": "French",
"ger": "German",
"por": "Portuguese",
"ita": "Italian",
"jpn": "Japanese",
"kor": "Korean",
"chi": "Chinese",
"ara": "Arabic",
"rus": "Russian",
"und": "Unknown",
}
@dataclass(slots=True)
class SubtitleStream:
index: int
lang: str
name: str
@dataclass(slots=True)
class MediaInfo:
video_codec: str
audio_codec: str
pix_fmt: str
audio_channels: int = 0
audio_sample_rate: int = 0
audio_profile: str = "" # e.g. "LC", "HE-AAC", "HE-AACv2"
subtitle_codecs: list[str] | None = None
duration: float = 0.0
height: int = 0
video_bitrate: int = 0 # bits per second, 0 if unknown
interlaced: bool = False # True if field_order indicates interlaced
is_10bit: bool = False # True if pix_fmt indicates 10-bit color
is_hdr: bool = False # True if color transfer indicates HDR
is_hls: bool = False # True if format is HLS (for input options)
def init(
load_settings: Callable[[], dict[str, Any]],
sr_engine_dir: str = "",
) -> None:
"""Initialize module with settings loader and optional AI Upscale config."""
global _load_settings, _sr_engine_dir
_load_settings = load_settings
_sr_engine_dir = sr_engine_dir
_load_series_probe_cache()
if _sr_engine_dir:
log.info("AI Upscale enabled: engine_dir=%s", _sr_engine_dir)
def get_settings() -> dict[str, Any]:
"""Get current settings."""
return _load_settings()
def get_ffmpeg_env() -> dict[str, str] | None:
"""Get environment for ffmpeg subprocess. Returns None (ffmpeg has libtorch via rpath)."""
# ffmpeg is built with -Wl,-rpath pointing to libtorch, so no LD_LIBRARY_PATH needed
return None
def _find_sr_engine(model_name: str, source_height: int) -> tuple[str, int, int, int] | None:
"""Find the best matching SR engine file for the given model and resolution.
Returns (engine_path, input_height, input_width, scale_factor) or None if not found.
"""
import pathlib
engine_dir = pathlib.Path(_sr_engine_dir)
if not engine_dir.exists():
return None
# Find all engines for this model
# Engine naming: {model}_{height}p_fp16.engine
engines: list[tuple[int, pathlib.Path]] = []
for engine in engine_dir.glob(f"{model_name}_*p_fp16.engine"):
# Extract height from filename
name = engine.stem # e.g., "2x-liveaction-span_1080p_fp16"
parts = name.rsplit("_", 2)
if len(parts) >= 3:
height_str = parts[1].rstrip("p")
if height_str.isdigit():
engines.append((int(height_str), engine))
if not engines:
return None
# Determine scale factor from model name prefix (e.g., "2x-", "4x-")
scale_match = re.match(r"^(\d+)x-", model_name)
if scale_match:
scale = int(scale_match.group(1))
elif model_name == "realesrgan":
# Legacy model name - was 4x
scale = 4
else:
log.error(
"SR: cannot determine scale from model name: %s (expected Nx- prefix)", model_name
)
return None
# Sort by height ascending
engines.sort(key=lambda x: x[0])
# Select appropriate engine based on source height
if source_height <= 0:
# Probe failed - use highest resolution engine
engine_height, engine_path = engines[-1]
log.warning("SR: probe failed, using %dp engine", engine_height)
else:
# Find engine closest to but >= source height, or use largest if source is bigger
engine_height, engine_path = engines[-1] # default to largest
for h, p in engines:
if h >= source_height:
engine_height, engine_path = h, p
break
# Calculate width assuming 16:9 aspect ratio, rounded to multiple of 8
engine_width = ((engine_height * 16 // 9) + 7) // 8 * 8
return str(engine_path), engine_height, engine_width, scale
def _build_sr_filter(source_height: int, target_height: int) -> str:
"""Build AI Upscale filter string if needed. Returns empty string if disabled.
SR is controlled by sr_model setting - if a model is selected, SR is applied
when source height < target height.
"""
if not _sr_engine_dir:
return ""
# Get selected model from settings
settings = _load_settings()
model_name = settings.get("sr_model", "")
if not model_name:
return "" # SR disabled (Off selected)
# Find engine for this model and resolution
engine_info = _find_sr_engine(model_name, source_height)
if not engine_info:
log.warning("SR: no engine found for model=%s, source=%dp", model_name, source_height)
return ""
engine_path, engine_height, engine_width, scale = engine_info
# Apply SR when source resolution is below target (upscaling scenario)
if target_height and source_height >= target_height:
log.info(
"SR: skipping %s - source %dp >= target %dp", model_name, source_height, target_height
)
return ""
log.info(
"SR: applying %s (%dx) to %dp -> %dp",
model_name,
scale,
source_height,
target_height or (source_height * scale),
)
# Build SR filter chain:
# 1. Scale to engine's expected input size (preserving aspect with padding if needed)
# 2. Convert to RGB (model expects 3-channel RGB input)
# 3. hwupload to GPU (critical for performance - keeps data on GPU)
# 4. Apply SR via TensorRT dnn_processing (outputs Nx resolution on GPU)
# 5. Scale down on GPU to target resolution
sr_filter = (
f"scale={engine_width}:{engine_height}:force_original_aspect_ratio=decrease,"
f"pad={engine_width}:{engine_height}:(ow-iw)/2:(oh-ih)/2,"
f"format=rgb24,"
f"hwupload,"
f"dnn_processing=dnn_backend=tensorrt:model={engine_path}"
)
if target_height:
# After dnn_processing, data is on GPU - use scale_cuda with explicit params
sr_filter += f",scale_cuda=w=-2:h={target_height}"
return sr_filter
def get_hls_segment_duration() -> float:
"""Get HLS segment duration in seconds."""
return _HLS_SEGMENT_DURATION_SEC
# ===========================================================================
# GPU Detection
# ===========================================================================
def _get_gpu_nvdec_codecs() -> set[str]:
"""Get supported NVDEC codecs, probing GPU on first call."""
global _gpu_nvdec_codecs
if _gpu_nvdec_codecs is not None:
return _gpu_nvdec_codecs
_gpu_nvdec_codecs = set()
try:
result = subprocess.run(
["nvidia-smi", "--query-gpu=name,compute_cap", "--format=csv,noheader"],
capture_output=True,
text=True,
timeout=5,
)
if result.returncode != 0:
log.info("No NVIDIA GPU detected")
return _gpu_nvdec_codecs
# Parse "NVIDIA GeForce GTX TITAN X, 5.2"
line = result.stdout.strip().split("\n")[0]
parts = line.rsplit(",", 1)
if len(parts) != 2:
return _gpu_nvdec_codecs
gpu_name = parts[0].strip()
compute_cap = float(parts[1].strip())
_gpu_nvdec_codecs = {
codec for codec, min_cap in _NVDEC_MIN_COMPUTE.items() if compute_cap >= min_cap
}
log.info(
"GPU: %s (compute %.1f) NVDEC: %s",
gpu_name,
compute_cap,
_gpu_nvdec_codecs or "none",
)
except Exception as e:
log.debug("GPU probe failed: %s", e)
return _gpu_nvdec_codecs
def _has_libplacebo_filter() -> bool:
"""Check if FFmpeg has libplacebo filter available (for GPU HDR tone mapping)."""
global _has_libplacebo
if _has_libplacebo is not None:
return _has_libplacebo
_has_libplacebo = False
try:
result = subprocess.run(
["ffmpeg", "-filters"],
capture_output=True,
text=True,
timeout=5,
)
_has_libplacebo = "libplacebo" in result.stdout
log.info("libplacebo filter available: %s", _has_libplacebo)
except Exception as e:
log.debug("libplacebo probe failed: %s", e)
return _has_libplacebo
# ===========================================================================
# User-Agent
# ===========================================================================
def get_user_agent() -> str | None:
"""Get user-agent string from settings, or None to use FFmpeg default."""
settings = _load_settings()
preset = settings.get("user_agent_preset", "default")
if preset == "default":
return None
if preset == "custom":
return settings.get("user_agent_custom") or None
return _USER_AGENT_PRESETS.get(preset)
# ===========================================================================
# Transcode Directory
# ===========================================================================
def get_transcode_dir() -> pathlib.Path:
"""Get the transcode output directory. Falls back to system temp if not set or inaccessible."""
custom_dir = _load_settings().get("transcode_dir", "")
if custom_dir:
path = pathlib.Path(custom_dir)
try:
path.mkdir(parents=True, exist_ok=True)
return path
except (PermissionError, OSError) as e:
log.warning("Transcode dir %s inaccessible (%s), using temp dir", custom_dir, e)
return pathlib.Path(tempfile.gettempdir())
# ===========================================================================
# Series Probe Cache Persistence
# ===========================================================================
def _load_series_probe_cache() -> None:
"""Load series probe cache from disk."""
if not _SERIES_PROBE_CACHE_FILE.exists():
return
try:
data = json.loads(_SERIES_PROBE_CACHE_FILE.read_text())
count = 0
with _probe_lock:
for sid_str, series_data in data.items():
sid = int(sid_str)
if sid not in _series_probe_cache:
_series_probe_cache[sid] = {
"name": series_data.get("name", ""),
"mru": series_data.get("mru"),
"episodes": {},
}
else:
_series_probe_cache[sid].setdefault("name", series_data.get("name", ""))
_series_probe_cache[sid].setdefault("mru", series_data.get("mru"))
_series_probe_cache[sid].setdefault("episodes", {})
for eid_str, entry in series_data.get("episodes", {}).items():
eid = int(eid_str)
if eid in _series_probe_cache[sid]["episodes"]:
continue
# Use .get() for all fields to handle corrupt/incomplete cache
video_codec = entry.get("video_codec", "")
if not video_codec:
continue # Skip entries without video codec
media_info = MediaInfo(
video_codec=video_codec,
audio_codec=entry.get("audio_codec", ""),
pix_fmt=entry.get("pix_fmt", ""),
audio_channels=entry.get("audio_channels", 0),
audio_sample_rate=entry.get("audio_sample_rate", 0),
subtitle_codecs=entry.get("subtitle_codecs"),
duration=entry.get("duration", 0),
height=entry.get("height", 0),
video_bitrate=entry.get("video_bitrate", 0),
interlaced=entry.get("interlaced", False),
is_10bit=entry.get("is_10bit", False),
is_hdr=entry.get("is_hdr", False),
is_hls=entry.get("is_hls", False),
)
subs = [
SubtitleStream(s["index"], s.get("lang", "und"), s.get("name", ""))
for s in entry.get("subtitles", [])
]
_series_probe_cache[sid]["episodes"][eid] = (
entry.get("time", 0),
media_info,
subs,
)
count += 1
log.info("Loaded %d series probe cache entries", count)
except Exception as e:
log.warning("Failed to load series probe cache: %s", e)
def _save_series_probe_cache() -> None:
"""Save series probe cache to disk."""
with _probe_lock:
data: dict[str, dict[str, Any]] = {}
for sid, series_data in _series_probe_cache.items():
episodes = series_data.get("episodes", {})
data[str(sid)] = {
"name": series_data.get("name", ""),
"mru": series_data.get("mru"),
"episodes": {},
}
for eid, (cache_time, media_info, subs) in episodes.items():
if media_info is None:
continue
data[str(sid)]["episodes"][str(eid)] = {
"time": cache_time,
"video_codec": media_info.video_codec,
"audio_codec": media_info.audio_codec,
"pix_fmt": media_info.pix_fmt,
"audio_channels": media_info.audio_channels,
"audio_sample_rate": media_info.audio_sample_rate,
"subtitle_codecs": media_info.subtitle_codecs,
"duration": media_info.duration,
"height": media_info.height,
"video_bitrate": media_info.video_bitrate,
"interlaced": media_info.interlaced,
"is_10bit": media_info.is_10bit,
"is_hdr": media_info.is_hdr,
"subtitles": [{"index": s.index, "lang": s.lang, "name": s.name} for s in subs],
}
try:
_SERIES_PROBE_CACHE_FILE.parent.mkdir(parents=True, exist_ok=True)
_SERIES_PROBE_CACHE_FILE.write_text(json.dumps(data, indent=2))
except Exception as e:
log.warning("Failed to save series probe cache: %s", e)
# ===========================================================================
# Probe Cache Management
# ===========================================================================
def get_series_probe_cache_stats() -> list[dict[str, Any]]:
"""Get stats about cached series probes for settings UI."""
with _probe_lock:
log.info(
"get_series_probe_cache_stats: cache has %d series: %s",
len(_series_probe_cache),
list(_series_probe_cache.keys()),
)
result = []
for series_id, series_data in _series_probe_cache.items():
episodes = series_data.get("episodes", {})
if not episodes:
continue
# Get most recent entry for display info
most_recent = max(episodes.values(), key=lambda x: x[0])
_, media_info, subs = most_recent
if media_info is None:
continue
# Build episode list
episode_list = []
for eid, (_, emedia, esubs) in episodes.items():
if emedia:
episode_list.append(
{
"episode_id": eid,
"duration": emedia.duration,
"subtitle_count": len(esubs),
}
)
result.append(
{
"series_id": series_id,
"name": series_data.get("name", ""),
"mru": series_data.get("mru"),
"episode_count": len(episodes),
"video_codec": media_info.video_codec,
"audio_codec": media_info.audio_codec,
"subtitle_count": len(subs),
"episodes": sorted(episode_list, key=lambda x: x["episode_id"]),
}
)
return sorted(result, key=lambda x: x.get("name") or str(x["series_id"]))
def clear_all_probe_cache() -> int:
"""Clear all probe caches. Returns count of entries cleared."""
with _probe_lock:
url_count = len(_probe_cache)
series_count = sum(len(s.get("episodes", {})) for s in _series_probe_cache.values())
_probe_cache.clear()
_series_probe_cache.clear()
_save_series_probe_cache()
log.info("Cleared probe cache: %d URL entries, %d series entries", url_count, series_count)
return url_count + series_count
def invalidate_series_probe_cache(series_id: int, episode_id: int | None = None) -> None:
"""Invalidate cached probe for series/episode.
If episode_id is None, clears entire series. Otherwise clears just that episode.
"""
with _probe_lock:
if series_id not in _series_probe_cache:
return
if episode_id is None:
del _series_probe_cache[series_id]
log.info("Cleared probe cache for series=%d", series_id)
else:
series_data = _series_probe_cache[series_id]
episodes = series_data.get("episodes", {})
if episode_id in episodes:
del episodes[episode_id]
log.info(
"Cleared probe cache for series=%d episode=%d",
series_id,
episode_id,
)
_save_series_probe_cache()
def clear_series_mru(series_id: int) -> None:
"""Clear only the MRU for a series, keeping episode cache intact."""
with _probe_lock:
if series_id not in _series_probe_cache:
return
if "mru" in _series_probe_cache[series_id]:
del _series_probe_cache[series_id]["mru"]
log.info("Cleared MRU for series=%d", series_id)
_save_series_probe_cache()
def restore_probe_cache_entry(
url: str,
media_info: MediaInfo,
subs: list[SubtitleStream],
series_id: int | None = None,
episode_id: int | None = None,
) -> None:
"""Restore a probe cache entry (used during session recovery)."""
now = time.time()
with _probe_lock:
if url not in _probe_cache:
_probe_cache[url] = (now, media_info, subs)
if series_id is not None:
if series_id not in _series_probe_cache:
_series_probe_cache[series_id] = {"name": "", "episodes": {}}
_series_probe_cache[series_id].setdefault("episodes", {})
eid = episode_id or 0
if eid not in _series_probe_cache[series_id]["episodes"]:
_series_probe_cache[series_id]["episodes"][eid] = (now, media_info, subs)
# ===========================================================================
# Media Probing
# ===========================================================================
def _lang_display_name(code: str) -> str:
return _LANG_NAMES.get(code, code.upper())
def resolve_hls_master_playlist(url: str) -> str:
"""Resolve HLS master playlist to highest bandwidth variant URL.
If the URL points to an HLS master playlist (contains #EXT-X-STREAM-INF),
this fetches and parses it to find the variant with the highest bandwidth.
Returns the resolved variant URL, or the original URL if not a master playlist
or on any error.
"""
from urllib.parse import urljoin
import urllib.request
if not url.endswith(".m3u8") and ".m3u8?" not in url:
return url # Not an m3u8, return as-is
try:
req = urllib.request.Request(url)
user_agent = get_user_agent()
if user_agent:
req.add_header("User-Agent", user_agent)
with urllib.request.urlopen(req, timeout=10) as response:
content = response.read().decode("utf-8", errors="replace")
# Check if this is a master playlist (has #EXT-X-STREAM-INF)
if "#EXT-X-STREAM-INF" not in content:
return url # Not a master playlist
# Parse variants: each #EXT-X-STREAM-INF is followed by a URL line
lines = content.strip().split("\n")
variants: list[tuple[int, str]] = []
for i, line in enumerate(lines):
if line.startswith("#EXT-X-STREAM-INF:"):
# Extract BANDWIDTH from the tag
bandwidth = 0
for attr in line.split(":")[1].split(","):
if attr.startswith("BANDWIDTH="):
with suppress(ValueError):
bandwidth = int(attr.split("=")[1])
break
# Next non-comment line is the variant URL
for j in range(i + 1, len(lines)):
variant_line = lines[j].strip()
if variant_line and not variant_line.startswith("#"):
# Resolve relative URL
variant_url = urljoin(url, variant_line)
variants.append((bandwidth, variant_url))
break
if not variants:
log.warning("HLS master playlist has no variants: %s", url[:80])
return url
# Select highest bandwidth variant
variants.sort(key=lambda x: x[0], reverse=True)
best_bandwidth, best_url = variants[0]
log.info(
"HLS master playlist resolved: %d variants, selected %d bps: %s",
len(variants),
best_bandwidth,
best_url[:80],
)
return best_url
except Exception as e:
log.warning("Failed to resolve HLS master playlist %s: %s", url[:80], e)
return url
def probe_media(
url: str,
series_id: int | None = None,
episode_id: int | None = None,
series_name: str = "",
) -> tuple[MediaInfo | None, list[SubtitleStream]]:
"""Probe media, returns (media_info, subtitles)."""
# Check series/episode cache first
cache_hit_result: tuple[MediaInfo, list[SubtitleStream]] | None = None
save_mru = False
if series_id is not None:
with _probe_lock:
series_data = _series_probe_cache.get(series_id)
if series_data:
episodes = series_data.get("episodes", {})
mru_eid = series_data.get("mru")
# Try exact episode first
if episode_id is not None and episode_id in episodes:
cache_time, media_info, subtitles = episodes[episode_id]
if time.time() - cache_time < _SERIES_PROBE_CACHE_TTL_SEC:
# Update MRU to this episode
if series_data.get("mru") != episode_id:
series_data["mru"] = episode_id
save_mru = True
log.info(
"Probe cache hit for series=%d episode=%d",
series_id,
episode_id,
)
cache_hit_result = (media_info, subtitles)
# Fall back to MRU if set
elif mru_eid is not None and mru_eid in episodes:
cache_time, media_info, subtitles = episodes[mru_eid]
if time.time() - cache_time < _SERIES_PROBE_CACHE_TTL_SEC:
log.info(
"Probe cache hit for series=%d (fallback from mru=%d)",
series_id,
mru_eid,
)
cache_hit_result = (media_info, subtitles)
# Save MRU update outside the lock to avoid deadlock
if save_mru:
_save_series_probe_cache()
if cache_hit_result:
return cache_hit_result
# Check URL cache (for movies, or series cache miss)
with _probe_lock:
cached = _probe_cache.get(url)
if cached:
cache_time, media_info, subtitles = cached
if time.time() - cache_time < _PROBE_CACHE_TTL_SEC:
log.info("Probe cache hit for %s", url[:50])
return media_info, subtitles
log.info(
"Probe cache miss for %s (series=%s, episode=%s)",
url[:50],
series_id,
episode_id,
)
# Build base probe command
# MPEG-TS streams (HDHomeRun, live TV) need ~1MB to reach first keyframe
# which contains the sequence header with dimensions. GOP at 15Mbps = ~1-2MB.
base_cmd = [
"ffprobe",
"-probesize",
"1000000", # Had to increase for HDHomerun; was 50000.
"-analyzeduration",
"1500000", # Had to increase for HDHomerun; was 500000.
"-v",
"quiet",
"-print_format",
"json",
"-show_streams",
"-show_format",
]
user_agent = get_user_agent()
if user_agent:
base_cmd.extend(["-user_agent", user_agent])
# Try probe without forcing HLS first, retry with HLS options if it fails
is_hls = False
data = None
for force_hls in (False, True):
try:
cmd = base_cmd.copy()
if force_hls:
cmd.extend(["-f", "hls", "-extension_picky", "0"])
cmd.append(url)
log.info("Probing%s: %s", " (HLS mode)" if force_hls else "", " ".join(cmd))
result = subprocess.run(
cmd,
check=False,
capture_output=True,
text=True,
timeout=_PROBE_TIMEOUT_SEC,
)
if result.returncode == 0:
data = json.loads(result.stdout)
# Check detected format or if we forced HLS
format_name = data.get("format", {}).get("format_name", "").lower()
is_hls = force_hls or "hls" in format_name
break
except Exception as e:
log.warning("Probe failed%s: %s", " (HLS mode)" if force_hls else "", e)
continue
if data is None:
return None, []
video_codec = audio_codec = pix_fmt = audio_profile = ""
audio_channels = audio_sample_rate = 0
subtitle_codecs: list[str] = []
subtitles: list[SubtitleStream] = []
height = 0
video_bitrate = 0
interlaced = False
is_10bit = False
is_hdr = False
for stream in data.get("streams", []):
codec = stream.get("codec_name", "").lower()
codec_type = stream.get("codec_type", "")
if codec_type == "video" and not video_codec:
video_codec = codec
pix_fmt = stream.get("pix_fmt", "")
height = stream.get("height", 0) or 0
# Detect interlacing from field_order (tt, bb, tb, bt = interlaced)
field_order = stream.get("field_order", "").lower()
interlaced = field_order in ("tt", "bb", "tb", "bt")
# Detect 10-bit from pix_fmt (e.g. yuv420p10le, p010le)
# Check for "p10" or "10le/10be" to avoid false positive on yuv410p
is_10bit = "p10" in pix_fmt or "10le" in pix_fmt or "10be" in pix_fmt
# Detect HDR from color_transfer (PQ = smpte2084, HLG = arib-std-b67)
color_transfer = stream.get("color_transfer", "").lower()
is_hdr = color_transfer in ("smpte2084", "arib-std-b67")
# Try to get bitrate from stream, fall back to format
with suppress(ValueError, TypeError):
video_bitrate = int(stream.get("bit_rate", 0) or 0)
elif codec_type == "audio" and not audio_codec:
audio_codec = codec
audio_channels = stream.get("channels", 0)
audio_sample_rate = int(stream.get("sample_rate", 0) or 0)
audio_profile = stream.get("profile", "")
elif codec_type == "subtitle":
subtitle_codecs.append(codec)
if codec in TEXT_SUBTITLE_CODECS:
idx = stream.get("index")
if idx is not None:
tags = stream.get("tags", {})
lang = tags.get("language", "und").lower()
name = tags.get("name") or tags.get("title") or _lang_display_name(lang)
subtitles.append(
SubtitleStream(
index=idx,
lang=lang,
name=name,
)
)
duration = 0.0
fmt = data.get("format", {})
if fmt.get("duration"):
with suppress(ValueError, TypeError):
duration = float(fmt["duration"])
# Fall back to format bitrate if stream bitrate unavailable (common for MKV)
if not video_bitrate and fmt.get("bit_rate"):
with suppress(ValueError, TypeError):
video_bitrate = int(fmt["bit_rate"])
if not video_codec:
return None, []
media_info = MediaInfo(
video_codec=video_codec,
audio_codec=audio_codec,
pix_fmt=pix_fmt,
audio_channels=audio_channels,
audio_sample_rate=audio_sample_rate,
audio_profile=audio_profile,
subtitle_codecs=subtitle_codecs or None,
duration=duration,
height=height,
video_bitrate=video_bitrate,
interlaced=interlaced,
is_10bit=is_10bit,
is_hdr=is_hdr,
is_hls=is_hls,
)
# Only cache if we got valid video info (height > 0)
if height <= 0:
log.warning("Probe returned invalid height=%d, not caching: %s", height, url[:80])
return media_info, subtitles
with _probe_lock:
_probe_cache[url] = (time.time(), media_info, subtitles)
# Cache by series_id/episode_id if provided
if series_id is not None:
if series_id not in _series_probe_cache:
_series_probe_cache[series_id] = {"name": series_name, "episodes": {}}
elif not _series_probe_cache[series_id].get("name") and series_name:
_series_probe_cache[series_id]["name"] = series_name
eid = episode_id if episode_id is not None else 0
_series_probe_cache[series_id].setdefault("episodes", {})[eid] = (
time.time(),
media_info,
subtitles,
)
# Set MRU to this episode
old_mru = _series_probe_cache[series_id].get("mru")
_series_probe_cache[series_id]["mru"] = eid
log.info(
"Probe cached: series=%s episode=%s, mru changed from %s to %s",
series_id,
eid,
old_mru,
eid,
)
if series_id is not None:
_save_series_probe_cache()
return media_info, subtitles
# ===========================================================================
# FFmpeg Command Building
# ===========================================================================
def _build_video_args(
*,
copy_video: bool,
hw: HwAccel,
deinterlace: bool,
use_hw_pipeline: bool,
max_resolution: str,
quality: str,
is_hdr: bool = False,
source_height: int = 0,
) -> tuple[list[str], list[str]]:
"""Build video args. Returns (pre_input_args, post_input_args)."""
if copy_video:
return [], ["-c:v", "copy"]
# Parse hw into encoder and fallback
enc_type, fallback = _parse_hw(hw)
max_h = _MAX_RES_HEIGHT.get(max_resolution)
# Check if SR should be applied (discrete GPUs only)
sr_filter = ""
sr_model = _load_settings().get("sr_model", "")
if sr_model and enc_type in ("nvenc", "amf") and _sr_engine_dir:
sr_filter = _build_sr_filter(source_height, max_h or 0)
# SR requires CPU frames, so disable hw pipeline when SR active
if sr_filter:
use_hw_pipeline = False
# Fall back gracefully if VAAPI is needed but no device was detected
needs_vaapi = enc_type == "vaapi" or fallback == "vaapi"
if needs_vaapi and not VAAPI_DEVICE:
if enc_type == "vaapi":
# Pure VAAPI encoder requested but not available - fall back to software
log.warning("VAAPI unavailable (no Intel/AMD GPU), falling back to software encoding")
enc_type = "software"
else:
# VAAPI fallback requested but not available - use software decode instead
log.warning("VAAPI fallback unavailable (no Intel/AMD GPU), using software decode")
fallback = "software"
# Height expr for scale filter (scale down only, -2 keeps width divisible by 2)
h = f"min(ih\\,{max_h})" if max_h else None
qp = _QUALITY_QP.get(quality, 28)
if enc_type == "nvenc":
if use_hw_pipeline:
# CUDA decode path
pre = [
"-hwaccel",
"cuda",
"-hwaccel_output_format",
"cuda",
"-extra_hw_frames",
"3",
]
scale = f"scale_cuda=-2:{h}:format=nv12" if h else "scale_cuda=format=nv12"
deint = "yadif_cuda=0," if deinterlace else "" # mode=0 keeps original framerate
# HDR tone mapping: prefer libplacebo (Vulkan GPU), fall back to CPU zscale+tonemap
if is_hdr:
if _has_libplacebo_filter():
tonemap = "hwdownload,format=p010le,libplacebo=tonemapping=hable:colorspace=bt709:color_primaries=bt709:color_trc=bt709,format=nv12,hwupload_cuda,"
else:
tonemap = "hwdownload,format=p010le,zscale=t=linear:npl=100,format=gbrpf32le,zscale=p=bt709,tonemap=hable:desat=0,zscale=t=bt709:m=bt709:r=tv,format=nv12,hwupload_cuda,"
else:
tonemap = ""
vf = f"{deint}{tonemap}{scale}"
elif fallback == "vaapi":
# VAAPI decode + VAAPI filters + hwdownload + hwupload_cuda for NVENC
pre = [
"-hwaccel",
"vaapi",
"-hwaccel_output_format",
"vaapi",
"-hwaccel_device",
VAAPI_DEVICE,
]
scale = f"scale_vaapi=w=-2:h={h}:format=nv12" if h else "scale_vaapi=format=nv12"
tonemap = "tonemap_vaapi=format=nv12:t=bt709:m=bt709:p=bt709," if is_hdr else ""
deint = "deinterlace_vaapi," if deinterlace else ""
vf = f"{deint}{tonemap}{scale},hwdownload,format=nv12,hwupload_cuda"
else:
# Software decode, upload to GPU for scaling/encoding
pre = []
scale = f"scale_cuda=-2:{h}:format=nv12" if h else "scale_cuda=format=nv12"
# HDR tone mapping: prefer libplacebo (Vulkan GPU), fall back to CPU zscale+tonemap
# Deinterlace before tonemap (CPU yadif) for consistency with hw decode path
if sr_filter:
# SR path: CPU decode -> deinterlace -> SR (GPU) -> encode
# SR filter ends with scale_cuda, outputs CUDA frames ready for nvenc
# Need init_hw_device for TensorRT dnn_processing to use GPU
pre = ["-init_hw_device", "cuda=cu", "-filter_hw_device", "cu"]
deint = "yadif=0," if deinterlace else ""
vf = f"{deint}{sr_filter}"
elif is_hdr:
deint = "yadif=0," if deinterlace else "" # CPU deinterlace before tonemap
if _has_libplacebo_filter():
tonemap = "libplacebo=tonemapping=hable:colorspace=bt709:color_primaries=bt709:color_trc=bt709,format=nv12,hwupload_cuda,"
else:
tonemap = "zscale=t=linear:npl=100,format=gbrpf32le,zscale=p=bt709,tonemap=hable:desat=0,zscale=t=bt709:m=bt709:r=tv,format=nv12,hwupload_cuda,"
vf = f"{deint}{tonemap}{scale}"
else:
deint = "yadif_cuda=0," if deinterlace else "" # GPU deinterlace after upload
tonemap = "format=nv12,hwupload_cuda,"
vf = f"{tonemap}{deint}{scale}"
preset = "p4" if deinterlace or sr_filter else "p2"
encoder = "h264_nvenc"
# Lookahead for better quality, B-frames for compression, AQ for adaptive quantization
enc_opts = [
"-preset",
preset,
"-rc",
"constqp",
"-qp",
str(qp),
"-rc-lookahead",
"32",
"-bf",
"3",
"-spatial-aq",
"1",
"-temporal-aq",
"1",
]
elif enc_type == "amf":
# AMF has no hardware decode - always uses fallback for decode/filter
if fallback == "vaapi":
# VAAPI decode + VAAPI filters + hwdownload for AMF encode
pre = [
"-hwaccel",
"vaapi",
"-hwaccel_output_format",
"vaapi",
"-hwaccel_device",
VAAPI_DEVICE,
]
scale = f"scale_vaapi=w=-2:h={h}:format=nv12" if h else "scale_vaapi=format=nv12"
tonemap = "tonemap_vaapi=format=nv12:t=bt709:m=bt709:p=bt709," if is_hdr else ""
deint = "deinterlace_vaapi," if deinterlace else ""
vf = f"{deint}{tonemap}{scale},hwdownload,format=nv12"
else:
# Software decode + software filters
pre = []
if is_hdr:
tonemap = "zscale=t=linear:npl=100,format=gbrpf32le,zscale=p=bt709,tonemap=hable:desat=0,zscale=t=bt709:m=bt709:r=tv,format=nv12,"
else:
tonemap = ""
deint = "yadif=0," if deinterlace else ""
scale = f"scale=-2:{h}" if h else ""
vf = f"{deint}{tonemap}{scale},format=nv12".strip(",").replace(",,", ",")
encoder = "h264_amf"
enc_opts = [
"-rc",
"cqp",
"-qp_i",
str(qp),
"-qp_p",
str(qp),
"-quality",
"balanced",
]
elif enc_type == "vaapi":
if use_
gitextract_lipmxb5x/ ├── .dockerignore ├── .github/ │ └── workflows/ │ ├── ai-upscale.yml │ ├── ci.yml │ ├── ffmpeg-base.yml │ └── release.yml ├── .gitignore ├── Dockerfile ├── Dockerfile.ai_upscale ├── Dockerfile.ffmpeg ├── LICENSE ├── README.md ├── __init__.py ├── auth.py ├── auth_test.py ├── cache.py ├── cache_test.py ├── docker-compose.yml ├── entrypoint-ai_upscale.sh ├── entrypoint.sh ├── epg.py ├── epg_test.py ├── ffmpeg_command.py ├── ffmpeg_command_test.py ├── ffmpeg_session.py ├── ffmpeg_session_test.py ├── m3u.py ├── m3u_test.py ├── main.py ├── main_test.py ├── pyproject.toml ├── static/ │ └── js/ │ ├── app.js │ ├── favorites-grid.js │ ├── player.js │ ├── settings.js │ └── virtual-guide.js ├── templates/ │ ├── base.html │ ├── error.html │ ├── guide.html │ ├── login.html │ ├── movie_detail.html │ ├── player.html │ ├── search.html │ ├── series.html │ ├── series_detail.html │ ├── settings.html │ ├── setup.html │ └── vod.html ├── testing.py ├── tools/ │ ├── alignm3u.py │ ├── export-tensorrt.py │ ├── install-ai_upscale.sh │ ├── install-ffmpeg.sh │ ├── install-letsencrypt.sh │ ├── install-netv.sh │ ├── install-prereqs.sh │ ├── patches/ │ │ ├── dnn_backend_tensorrt.cpp │ │ ├── dnn_backend_torch.cpp │ │ ├── dnn_cuda_kernels.cu │ │ ├── dnn_cuda_kernels.h │ │ └── vf_dnn_processing.c │ ├── uninstall-netv.sh │ ├── xtream2m3u.py │ └── zap2xml.py ├── util.py ├── util_test.py ├── xtream.py └── xtream_test.py
SYMBOL INDEX (917 symbols across 31 files)
FILE: auth.py
function _get_settings_file (line 24) | def _get_settings_file() -> pathlib.Path:
function _get_secret_key (line 29) | def _get_secret_key() -> str:
function _hash_password (line 41) | def _hash_password(password: str, salt: str | None = None) -> str:
function _verify_hashed_password (line 49) | def _verify_hashed_password(password: str, hashed: str) -> bool:
function _get_users (line 57) | def _get_users() -> dict[str, dict[str, Any]]:
function get_all_usernames (line 69) | def get_all_usernames() -> list[str]:
function is_setup_required (line 74) | def is_setup_required() -> bool:
function create_user (line 79) | def create_user(username: str, password: str, admin: bool = False) -> None:
function _ensure_one_admin (line 97) | def _ensure_one_admin(users: dict[str, dict[str, Any]]) -> None:
function delete_user (line 104) | def delete_user(username: str) -> bool:
function verify_password (line 120) | def verify_password(username: str, password: str) -> bool:
function change_password (line 129) | def change_password(username: str, new_password: str) -> bool:
function is_admin (line 144) | def is_admin(username: str) -> bool:
function set_admin (line 151) | def set_admin(username: str, admin: bool) -> bool:
function get_users_with_admin (line 167) | def get_users_with_admin() -> list[dict[str, Any]]:
function get_user_limits (line 181) | def get_user_limits(username: str) -> dict[str, Any]:
function set_user_limits (line 191) | def set_user_limits(
function create_token (line 213) | def create_token(payload: dict[str, Any]) -> str:
function verify_token (line 221) | def verify_token(token: str) -> dict[str, Any] | None:
FILE: auth_test.py
function auth_module (line 14) | def auth_module(tmp_path: Path):
class TestPasswordHashing (line 29) | class TestPasswordHashing:
method test_hash_password_creates_salt (line 30) | def test_hash_password_creates_salt(self, auth_module):
method test_hash_password_with_salt_deterministic (line 37) | def test_hash_password_with_salt_deterministic(self, auth_module):
method test_verify_hashed_password_correct (line 43) | def test_verify_hashed_password_correct(self, auth_module):
method test_verify_hashed_password_wrong (line 47) | def test_verify_hashed_password_wrong(self, auth_module):
class TestUserManagement (line 52) | class TestUserManagement:
method test_is_setup_required_no_users (line 53) | def test_is_setup_required_no_users(self, auth_module):
method test_create_user_and_verify (line 56) | def test_create_user_and_verify(self, auth_module):
class TestTokens (line 64) | class TestTokens:
method test_create_and_verify_token (line 65) | def test_create_and_verify_token(self, auth_module):
method test_token_format (line 74) | def test_token_format(self, auth_module):
method test_invalid_token_rejected (line 80) | def test_invalid_token_rejected(self, auth_module):
method test_tampered_token_rejected (line 85) | def test_tampered_token_rejected(self, auth_module):
method test_expired_token_rejected (line 92) | def test_expired_token_rejected(self, auth_module):
class TestSecretKey (line 99) | class TestSecretKey:
method test_get_secret_key_generates_and_persists (line 100) | def test_get_secret_key_generates_and_persists(self, auth_module):
FILE: cache.py
function _get_gpu_vendor (line 27) | def _get_gpu_vendor() -> str | None:
function _detect_vaapi_device (line 54) | def _detect_vaapi_device() -> str | None:
function _detect_libva_driver (line 60) | def _detect_libva_driver() -> str | None:
function _detect_dri_path (line 75) | def _detect_dri_path() -> str | None:
function _parse_json_file (line 124) | def _parse_json_file(path: str) -> tuple[Any, float] | None:
function load_file_cache (line 134) | def load_file_cache(name: str, use_process: bool = False) -> tuple[Any, ...
function save_file_cache (line 157) | def save_file_cache(name: str, data: Any) -> None:
function clear_all_caches (line 163) | def clear_all_caches() -> None:
function clear_all_file_caches (line 172) | def clear_all_file_caches() -> int:
function get_cache (line 186) | def get_cache() -> dict[str, Any]:
function get_cache_lock (line 191) | def get_cache_lock() -> threading.Lock:
function _sanitize_name (line 196) | def _sanitize_name(name: str) -> str:
function _url_to_filename (line 204) | def _url_to_filename(url: str) -> str:
function get_cached_logo (line 222) | def get_cached_logo(source_name: str, url: str) -> pathlib.Path | None:
function save_logo (line 241) | def save_logo(source_name: str, url: str, data: bytes, content_type: str...
function get_cached_info (line 264) | def get_cached_info(cache_key: str, fetch_fn: Callable[[], Any], force: ...
function _test_encoder (line 311) | def _test_encoder(cmd: list[str], timeout: int = 5, env: dict | None = N...
function detect_encoders (line 337) | def detect_encoders() -> dict[str, bool]:
function refresh_encoders (line 438) | def refresh_encoders() -> dict[str, bool]:
function _default_encoder (line 445) | def _default_encoder() -> str:
class Source (line 463) | class Source:
function load_server_settings (line 478) | def load_server_settings() -> dict[str, Any]:
function save_server_settings (line 512) | def save_server_settings(settings: dict[str, Any]) -> None:
function _validate_username (line 517) | def _validate_username(username: str) -> None:
function load_user_settings (line 529) | def load_user_settings(username: str) -> dict[str, Any]:
function save_user_settings (line 547) | def save_user_settings(username: str, settings: dict[str, Any]) -> None:
function get_watch_position (line 555) | def get_watch_position(username: str, stream_url: str) -> dict[str, Any]...
function save_watch_position (line 570) | def save_watch_position(username: str, stream_url: str, position: float,...
function get_sources (line 586) | def get_sources() -> list[Source]:
function update_source_epg_url (line 592) | def update_source_epg_url(source_id: str, epg_url: str) -> None:
FILE: cache_test.py
function cache_module (line 16) | def cache_module(tmp_path: Path):
class TestFileCache (line 40) | class TestFileCache:
method test_save_and_load_file_cache (line 41) | def test_save_and_load_file_cache(self, cache_module):
method test_load_nonexistent_cache (line 49) | def test_load_nonexistent_cache(self, cache_module):
method test_load_corrupted_cache (line 52) | def test_load_corrupted_cache(self, cache_module):
class TestMemoryCache (line 58) | class TestMemoryCache:
method test_get_cache_returns_reference (line 59) | def test_get_cache_returns_reference(self, cache_module):
method test_clear_all_caches_preserves_epg (line 64) | def test_clear_all_caches_preserves_epg(self, cache_module):
class TestCachedInfo (line 73) | class TestCachedInfo:
method test_get_cached_info_calls_fetch (line 74) | def test_get_cached_info_calls_fetch(self, cache_module):
method test_get_cached_info_uses_memory_cache (line 80) | def test_get_cached_info_uses_memory_cache(self, cache_module):
method test_get_cached_info_force_bypasses_memory (line 87) | def test_get_cached_info_force_bypasses_memory(self, cache_module):
class TestSettings (line 94) | class TestSettings:
method test_load_settings_defaults (line 95) | def test_load_settings_defaults(self, cache_module):
method test_save_and_load_settings (line 110) | def test_save_and_load_settings(self, cache_module):
class TestUserSettings (line 118) | class TestUserSettings:
method test_load_user_settings_defaults (line 119) | def test_load_user_settings_defaults(self, cache_module):
method test_save_and_load_user_settings (line 125) | def test_save_and_load_user_settings(self, cache_module):
method test_watch_position_save_and_get (line 132) | def test_watch_position_save_and_get(self, cache_module):
method test_watch_position_resets_at_95_percent (line 139) | def test_watch_position_resets_at_95_percent(self, cache_module):
class TestSource (line 146) | class TestSource:
method test_source_dataclass (line 147) | def test_source_dataclass(self, cache_module):
method test_get_sources_empty (line 159) | def test_get_sources_empty(self, cache_module):
method test_get_sources_from_settings (line 163) | def test_get_sources_from_settings(self, cache_module):
class TestUpdateSourceEpgUrl (line 181) | class TestUpdateSourceEpgUrl:
method test_update_source_epg_url (line 182) | def test_update_source_epg_url(self, cache_module):
method test_update_source_epg_url_not_overwrite (line 189) | def test_update_source_epg_url_not_overwrite(self, cache_module):
method test_update_source_epg_url_empty_noop (line 206) | def test_update_source_epg_url_empty_noop(self, cache_module):
class TestEncoderDetection (line 214) | class TestEncoderDetection:
method test_test_encoder_success (line 217) | def test_test_encoder_success(self):
method test_test_encoder_failure (line 226) | def test_test_encoder_failure(self):
method test_test_encoder_timeout (line 234) | def test_test_encoder_timeout(self):
method test_test_encoder_exception (line 242) | def test_test_encoder_exception(self):
method test_detect_encoders_all_available (line 250) | def test_detect_encoders_all_available(self):
method test_detect_encoders_none_available (line 267) | def test_detect_encoders_none_available(self):
method test_detect_encoders_partial (line 278) | def test_detect_encoders_partial(self):
method test_detect_encoders_nvenc_only (line 299) | def test_detect_encoders_nvenc_only(self):
method test_detect_encoders_vaapi_command_structure (line 314) | def test_detect_encoders_vaapi_command_structure(self):
method test_detect_encoders_qsv_command_structure (line 343) | def test_detect_encoders_qsv_command_structure(self):
method test_refresh_encoders_updates_global (line 360) | def test_refresh_encoders_updates_global(self):
method test_default_encoder_prefers_nvenc_with_vaapi (line 381) | def test_default_encoder_prefers_nvenc_with_vaapi(self):
method test_default_encoder_nvenc_without_vaapi (line 395) | def test_default_encoder_nvenc_without_vaapi(self):
method test_default_encoder_falls_back_to_amf (line 409) | def test_default_encoder_falls_back_to_amf(self):
method test_default_encoder_falls_back_to_qsv (line 423) | def test_default_encoder_falls_back_to_qsv(self):
method test_default_encoder_falls_back_to_vaapi (line 437) | def test_default_encoder_falls_back_to_vaapi(self):
method test_default_encoder_falls_back_to_software (line 451) | def test_default_encoder_falls_back_to_software(self):
class TestLogoCache (line 466) | class TestLogoCache:
method test_sanitize_name_removes_path_traversal (line 469) | def test_sanitize_name_removes_path_traversal(self):
method test_sanitize_name_keeps_safe_chars (line 474) | def test_sanitize_name_keeps_safe_chars(self):
method test_sanitize_name_truncates_long_names (line 478) | def test_sanitize_name_truncates_long_names(self):
method test_sanitize_name_empty_returns_default (line 483) | def test_sanitize_name_empty_returns_default(self):
method test_url_to_filename_extracts_name (line 487) | def test_url_to_filename_extracts_name(self):
method test_url_to_filename_strips_extension (line 492) | def test_url_to_filename_strips_extension(self):
method test_url_to_filename_hash_differs_by_url (line 497) | def test_url_to_filename_hash_differs_by_url(self):
method test_url_to_filename_fallback_to_hash (line 505) | def test_url_to_filename_fallback_to_hash(self):
method test_save_and_get_cached_logo (line 509) | def test_save_and_get_cached_logo(self, cache_module, tmp_path):
method test_get_cached_logo_returns_none_when_missing (line 526) | def test_get_cached_logo_returns_none_when_missing(self, cache_module,...
method test_get_cached_logo_expires (line 533) | def test_get_cached_logo_expires(self, cache_module, tmp_path):
method test_save_logo_content_type_mapping (line 554) | def test_save_logo_content_type_mapping(self, cache_module, tmp_path):
FILE: epg.py
class Program (line 31) | class Program:
function init (line 48) | def init(cache_dir: Path) -> None:
function _get_conn (line 80) | def _get_conn() -> sqlite3.Connection:
function clear (line 91) | def clear() -> None:
function clear_source (line 98) | def clear_source(source_id: str) -> None:
function insert_channel (line 106) | def insert_channel(channel_id: str, name: str, source_id: str) -> None:
function insert_icon (line 115) | def insert_icon(channel_id: str, url: str) -> None:
function insert_programs (line 124) | def insert_programs(programs: list[tuple[str, str, float, float, str, st...
function commit (line 133) | def commit() -> None:
function get_icon (line 138) | def get_icon(channel_id: str) -> str:
function get_programs_in_range (line 145) | def get_programs_in_range(
function _dedupe_programs (line 199) | def _dedupe_programs(programs: list[Program], preferred_source_id: str) ...
function get_programs_batch (line 219) | def get_programs_batch(
function get_icons_batch (line 282) | def get_icons_batch(channel_ids: list[str]) -> dict[str, str]:
function has_programs (line 300) | def has_programs() -> bool:
function get_program_count (line 307) | def get_program_count() -> int:
function get_channel_count (line 314) | def get_channel_count() -> int:
function prune_old_programs (line 321) | def prune_old_programs(before: datetime) -> int:
function _parse_epg_time (line 334) | def _parse_epg_time(s: str) -> datetime:
function _sanitize_epg_xml (line 350) | def _sanitize_epg_xml(xml_str: str) -> str:
function fetch_epg (line 362) | def fetch_epg(
FILE: epg_test.py
function db (line 16) | def db(tmp_path: Path):
class TestInit (line 26) | class TestInit:
method test_init_creates_tables (line 29) | def test_init_creates_tables(self, db):
class TestChannels (line 38) | class TestChannels:
method test_insert_channel (line 41) | def test_insert_channel(self, db):
method test_insert_channel_upsert (line 50) | def test_insert_channel_upsert(self, db):
class TestIcons (line 61) | class TestIcons:
method test_insert_icon (line 64) | def test_insert_icon(self, db):
method test_get_icon_not_found (line 71) | def test_get_icon_not_found(self, db):
method test_get_icons_batch (line 75) | def test_get_icons_batch(self, db):
method test_get_icons_batch_empty (line 87) | def test_get_icons_batch_empty(self, db):
class TestPrograms (line 92) | class TestPrograms:
method test_insert_programs (line 95) | def test_insert_programs(self, db):
method test_get_programs_in_range (line 121) | def test_get_programs_in_range(self, db):
method test_get_programs_in_range_empty (line 162) | def test_get_programs_in_range_empty(self, db):
method test_get_programs_batch (line 167) | def test_get_programs_batch(self, db):
method test_get_programs_batch_empty_channels (line 187) | def test_get_programs_batch_empty_channels(self, db):
method test_has_programs_false (line 191) | def test_has_programs_false(self, db):
method test_has_programs_true (line 194) | def test_has_programs_true(self, db):
method test_get_program_count (line 202) | def test_get_program_count(self, db):
method test_get_channel_count (line 230) | def test_get_channel_count(self, db):
class TestClear (line 240) | class TestClear:
method test_clear_all (line 243) | def test_clear_all(self, db):
method test_clear_source (line 258) | def test_clear_source(self, db):
class TestPrune (line 290) | class TestPrune:
method test_prune_old_programs (line 293) | def test_prune_old_programs(self, db):
class TestPreferredSource (line 324) | class TestPreferredSource:
method test_prefer_source_in_range (line 327) | def test_prefer_source_in_range(self, db):
class TestProgram (line 367) | class TestProgram:
method test_program_dataclass (line 370) | def test_program_dataclass(self):
method test_program_defaults (line 385) | def test_program_defaults(self):
FILE: ffmpeg_command.py
function _parse_hw (line 30) | def _parse_hw(hw: HwAccel) -> tuple[str, str]:
class SubtitleStream (line 124) | class SubtitleStream:
class MediaInfo (line 131) | class MediaInfo:
function init (line 148) | def init(
function get_settings (line 161) | def get_settings() -> dict[str, Any]:
function get_ffmpeg_env (line 166) | def get_ffmpeg_env() -> dict[str, str] | None:
function _find_sr_engine (line 172) | def _find_sr_engine(model_name: str, source_height: int) -> tuple[str, i...
function _build_sr_filter (line 233) | def _build_sr_filter(source_height: int, target_height: int) -> str:
function get_hls_segment_duration (line 290) | def get_hls_segment_duration() -> float:
function _get_gpu_nvdec_codecs (line 300) | def _get_gpu_nvdec_codecs() -> set[str]:
function _has_libplacebo_filter (line 337) | def _has_libplacebo_filter() -> bool:
function get_user_agent (line 362) | def get_user_agent() -> str | None:
function get_transcode_dir (line 378) | def get_transcode_dir() -> pathlib.Path:
function _load_series_probe_cache (line 396) | def _load_series_probe_cache() -> None:
function _save_series_probe_cache (line 454) | def _save_series_probe_cache() -> None:
function get_series_probe_cache_stats (line 496) | def get_series_probe_cache_stats() -> list[dict[str, Any]]:
function clear_all_probe_cache (line 540) | def clear_all_probe_cache() -> int:
function invalidate_series_probe_cache (line 552) | def invalidate_series_probe_cache(series_id: int, episode_id: int | None...
function clear_series_mru (line 576) | def clear_series_mru(series_id: int) -> None:
function restore_probe_cache_entry (line 587) | def restore_probe_cache_entry(
function _lang_display_name (line 613) | def _lang_display_name(code: str) -> str:
function resolve_hls_master_playlist (line 617) | def resolve_hls_master_playlist(url: str) -> str:
function probe_media (line 685) | def probe_media(
function _build_video_args (line 913) | def _build_video_args(
function _build_audio_args (line 1170) | def _build_audio_args(*, copy_audio: bool, audio_sample_rate: int) -> li...
function get_live_hls_list_size (line 1178) | def get_live_hls_list_size() -> int:
function build_hls_ffmpeg_cmd (line 1188) | def build_hls_ffmpeg_cmd(
FILE: ffmpeg_command_test.py
function mock_vaapi_device (line 33) | def mock_vaapi_device():
class FakeMediaInfo (line 39) | class FakeMediaInfo:
method __init__ (line 42) | def __init__(
class TestBuildVideoArgs (line 74) | class TestBuildVideoArgs:
method test_all_hw_combinations (line 83) | def test_all_hw_combinations(self, hw: HwAccel, deinterlace: bool, max...
method test_copy_video (line 114) | def test_copy_video(self, hw: HwAccel):
method test_nvenc_hw_pipeline_filters (line 127) | def test_nvenc_hw_pipeline_filters(self):
method test_nvenc_sw_fallback_filters (line 142) | def test_nvenc_sw_fallback_filters(self):
method test_vaapi_filters (line 159) | def test_vaapi_filters(self):
method test_qsv_filters (line 173) | def test_qsv_filters(self):
method test_software_filters (line 187) | def test_software_filters(self):
method test_quality_presets (line 204) | def test_quality_presets(self, quality: str, expected_qp: str):
method test_invalid_hw_raises (line 216) | def test_invalid_hw_raises(self):
method test_nvenc_hdr_with_libplacebo (line 229) | def test_nvenc_hdr_with_libplacebo(self, mock_placebo):
method test_nvenc_hdr_zscale_fallback (line 248) | def test_nvenc_hdr_zscale_fallback(self, mock_placebo):
method test_nvenc_hdr_deinterlace_order (line 265) | def test_nvenc_hdr_deinterlace_order(self, mock_placebo):
method test_nvenc_sw_hdr_deinterlace_order (line 283) | def test_nvenc_sw_hdr_deinterlace_order(self, mock_placebo):
method test_vaapi_hdr_tonemap (line 301) | def test_vaapi_hdr_tonemap(self):
class TestBuildAudioArgs (line 321) | class TestBuildAudioArgs:
method test_copy_audio (line 324) | def test_copy_audio(self):
method test_sample_rates (line 338) | def test_sample_rates(self, sample_rate: int, expected: str):
class TestBuildHlsFfmpegCmd (line 350) | class TestBuildHlsFfmpegCmd:
method test_command_structure (line 358) | def test_command_structure(self, hw: HwAccel, is_vod: bool):
method test_vod_hls_flags (line 384) | def test_vod_hls_flags(self):
method test_live_hls_flags (line 392) | def test_live_hls_flags(self):
method test_copy_video_with_compatible_media (line 399) | def test_copy_video_with_compatible_media(self):
method test_no_copy_for_10bit (line 414) | def test_no_copy_for_10bit(self):
method test_no_copy_when_scaling_needed (line 427) | def test_no_copy_when_scaling_needed(self):
method test_user_agent (line 440) | def test_user_agent(self):
method test_probe_args_without_media_info (line 451) | def test_probe_args_without_media_info(self):
method test_no_probe_args_with_media_info (line 457) | def test_no_probe_args_with_media_info(self):
method test_subtitle_extraction (line 463) | def test_subtitle_extraction(self):
class TestAspectRatioHandling (line 482) | class TestAspectRatioHandling:
method test_scaling_decisions (line 496) | def test_scaling_decisions(self, input_height: int, max_res: str, shou...
class TestGpuDetection (line 519) | class TestGpuDetection:
method test_nvidia_gpu_detected (line 522) | def test_nvidia_gpu_detected(self):
method test_no_nvidia_gpu (line 538) | def test_no_nvidia_gpu(self):
method test_older_nvidia_gpu (line 551) | def test_older_nvidia_gpu(self):
class TestUserAgent (line 573) | class TestUserAgent:
method test_default_user_agent (line 576) | def test_default_user_agent(self):
method test_vlc_user_agent (line 581) | def test_vlc_user_agent(self):
method test_chrome_user_agent (line 588) | def test_chrome_user_agent(self):
method test_custom_user_agent (line 595) | def test_custom_user_agent(self):
method test_custom_empty_returns_none (line 603) | def test_custom_empty_returns_none(self):
class TestTranscodeDir (line 617) | class TestTranscodeDir:
method test_default_transcode_dir (line 620) | def test_default_transcode_dir(self):
method test_custom_transcode_dir (line 626) | def test_custom_transcode_dir(self, tmp_path):
class TestHlsListSize (line 642) | class TestHlsListSize:
method test_default_list_size (line 645) | def test_default_list_size(self):
method test_dvr_enabled_list_size (line 650) | def test_dvr_enabled_list_size(self):
class TestProbeMedia (line 662) | class TestProbeMedia:
method test_probe_success (line 665) | def test_probe_success(self):
method test_probe_interlaced_detection (line 707) | def test_probe_interlaced_detection(self):
method test_probe_10bit_detection (line 751) | def test_probe_10bit_detection(self, pix_fmt: str, expected: bool):
method test_probe_hdr_detection (line 792) | def test_probe_hdr_detection(self, color_transfer: str, expected: bool):
method test_probe_failure (line 826) | def test_probe_failure(self):
method test_probe_cache_hit (line 844) | def test_probe_cache_hit(self):
method test_probe_extracts_subtitles (line 868) | def test_probe_extracts_subtitles(self):
class TestProbeCacheManagement (line 916) | class TestProbeCacheManagement:
method test_clear_all_probe_cache (line 919) | def test_clear_all_probe_cache(self):
method test_invalidate_series_probe_cache_entire_series (line 940) | def test_invalidate_series_probe_cache_entire_series(self):
method test_invalidate_series_probe_cache_single_episode (line 954) | def test_invalidate_series_probe_cache_single_episode(self):
method test_clear_series_mru (line 970) | def test_clear_series_mru(self):
method test_restore_probe_cache_entry (line 986) | def test_restore_probe_cache_entry(self):
method test_get_series_probe_cache_stats (line 1002) | def test_get_series_probe_cache_stats(self):
class TestResolveHlsMasterPlaylist (line 1026) | class TestResolveHlsMasterPlaylist:
method test_non_m3u8_url_returns_unchanged (line 1029) | def test_non_m3u8_url_returns_unchanged(self):
method test_master_playlist_selects_highest_bandwidth (line 1036) | def test_master_playlist_selects_highest_bandwidth(self):
method test_media_playlist_returns_unchanged (line 1060) | def test_media_playlist_returns_unchanged(self):
method test_fetch_error_returns_original_url (line 1084) | def test_fetch_error_returns_original_url(self):
method test_relative_url_resolved_correctly (line 1095) | def test_relative_url_resolved_correctly(self):
FILE: ffmpeg_session.py
class _DeadProcess (line 64) | class _DeadProcess:
method terminate (line 69) | def terminate(self) -> None:
method kill (line 72) | def kill(self) -> None:
function get_vod_cache_timeout (line 81) | def get_vod_cache_timeout() -> int:
function get_live_cache_timeout (line 86) | def get_live_cache_timeout() -> int:
function _is_process_alive (line 96) | def _is_process_alive(proc: Any) -> bool:
function is_session_valid (line 107) | def is_session_valid(session: dict[str, Any]) -> bool:
function _kill_process (line 133) | def _kill_process(proc: Any) -> bool:
function stop_session (line 155) | def stop_session(session_id: str, force: bool = False) -> None:
function cleanup_expired_sessions (line 193) | def cleanup_expired_sessions() -> None:
function shutdown (line 205) | def shutdown() -> None:
function get_user_sessions (line 220) | def get_user_sessions(username: str) -> list[tuple[str, dict[str, Any]]]:
function get_source_sessions (line 229) | def get_source_sessions(source_id: str) -> list[tuple[str, dict[str, Any...
function enforce_stream_limits (line 238) | def enforce_stream_limits(
function cleanup_and_recover_sessions (line 290) | def cleanup_and_recover_sessions() -> None:
function _monitor_ffmpeg_stderr (line 419) | async def _monitor_ffmpeg_stderr(
function _monitor_resume_ffmpeg (line 437) | async def _monitor_resume_ffmpeg(
function _monitor_seek_ffmpeg (line 461) | async def _monitor_seek_ffmpeg(
function _spawn_background_task (line 475) | def _spawn_background_task(coro: Any) -> None:
function _wait_for_playlist (line 486) | async def _wait_for_playlist(
function _calc_hls_duration (line 514) | def _calc_hls_duration(playlist_path: pathlib.Path, segment_count: int) ...
function _build_subtitle_tracks (line 523) | def _build_subtitle_tracks(
function _regenerate_playlist (line 540) | def _regenerate_playlist(output_dir: pathlib.Path, start_segment: int) -...
class _SessionSnapshot (line 581) | class _SessionSnapshot:
function _get_session_snapshot (line 591) | def _get_session_snapshot(session_id: str) -> _SessionSnapshot | None:
function _update_session_process (line 607) | def _update_session_process(session_id: str, process: Any) -> bool:
function _build_session_response (line 617) | def _build_session_response(
function _get_existing_session (line 639) | def _get_existing_session(url: str) -> tuple[str | None, bool, float]:
function _handle_existing_vod_session (line 655) | async def _handle_existing_vod_session(
function _try_reuse_session (line 767) | async def _try_reuse_session(
function _cleanup_invalid_session (line 795) | def _cleanup_invalid_session(url: str, session_id: str) -> None:
function _do_start_transcode (line 807) | async def _do_start_transcode(
function start_transcode (line 984) | async def start_transcode(
function get_session (line 1054) | def get_session(session_id: str) -> dict[str, Any] | None:
function touch_session (line 1061) | def touch_session(session_id: str) -> bool:
function get_session_progress (line 1071) | def get_session_progress(session_id: str) -> dict[str, Any] | None:
function clear_url_session (line 1088) | def clear_url_session(url: str) -> str | None:
class _SeekSessionInfo (line 1100) | class _SeekSessionInfo:
function _get_seek_session_info (line 1111) | def _get_seek_session_info(session_id: str) -> _SeekSessionInfo | None:
function _update_seek_session (line 1127) | def _update_seek_session(
function seek_transcode (line 1145) | async def seek_transcode(session_id: str, seek_time: float) -> dict[str,...
FILE: ffmpeg_session_test.py
class FakeProcess (line 38) | class FakeProcess:
method __init__ (line 41) | def __init__(self, alive: bool = True, killed: bool = False):
method terminate (line 45) | def terminate(self) -> None:
method kill (line 50) | def kill(self) -> None:
function _clear_session_state (line 56) | def _clear_session_state():
class TestIsProcessAlive (line 68) | class TestIsProcessAlive:
method test_none_is_dead (line 71) | def test_none_is_dead(self):
method test_dead_process_placeholder (line 74) | def test_dead_process_placeholder(self):
method test_alive_process (line 77) | def test_alive_process(self):
method test_dead_process (line 81) | def test_dead_process(self):
class TestKillProcess (line 86) | class TestKillProcess:
method test_kill_alive_process (line 89) | def test_kill_alive_process(self):
method test_kill_already_dead (line 95) | def test_kill_already_dead(self):
class TestIsSessionValid (line 105) | class TestIsSessionValid:
method test_active_process_with_recent_heartbeat (line 108) | def test_active_process_with_recent_heartbeat(self):
method test_active_process_stale_heartbeat (line 118) | def test_active_process_stale_heartbeat(self):
method test_dead_process_live_session_no_cache (line 128) | def test_dead_process_live_session_no_cache(self):
method test_dead_process_vod_session_within_cache (line 139) | def test_dead_process_vod_session_within_cache(self):
method test_dead_process_vod_session_expired_cache (line 150) | def test_dead_process_vod_session_expired_cache(self):
method test_heartbeat_timeout_boundary (line 161) | def test_heartbeat_timeout_boundary(self):
method test_missing_last_access_uses_started (line 176) | def test_missing_last_access_uses_started(self):
class TestCacheTimeouts (line 191) | class TestCacheTimeouts:
method test_vod_cache_timeout_default (line 194) | def test_vod_cache_timeout_default(self):
method test_vod_cache_timeout_custom (line 199) | def test_vod_cache_timeout_custom(self):
method test_live_cache_timeout_default (line 204) | def test_live_cache_timeout_default(self):
method test_live_cache_timeout_custom (line 209) | def test_live_cache_timeout_custom(self):
class TestStopSession (line 220) | class TestStopSession:
method setup_method (line 223) | def setup_method(self):
method teardown_method (line 226) | def teardown_method(self):
method test_stop_nonexistent_session (line 229) | def test_stop_nonexistent_session(self):
method test_stop_session_force (line 233) | def test_stop_session_force(self):
method test_stop_session_skip_recent_vod (line 251) | def test_stop_session_skip_recent_vod(self):
method test_stop_session_skips_recent_live (line 268) | def test_stop_session_skips_recent_live(self):
method test_stop_session_multi_user_grace_period (line 288) | def test_stop_session_multi_user_grace_period(self):
method test_stop_session_caches_vod (line 313) | def test_stop_session_caches_vod(self):
class TestCleanupExpiredSessions (line 334) | class TestCleanupExpiredSessions:
method setup_method (line 337) | def setup_method(self):
method teardown_method (line 340) | def teardown_method(self):
method test_cleanup_removes_expired (line 343) | def test_cleanup_removes_expired(self):
method test_cleanup_keeps_valid (line 362) | def test_cleanup_keeps_valid(self):
class TestShutdown (line 380) | class TestShutdown:
method setup_method (line 383) | def setup_method(self):
method teardown_method (line 386) | def teardown_method(self):
method test_shutdown_kills_all_processes (line 389) | def test_shutdown_kills_all_processes(self):
class TestGetUserSessions (line 411) | class TestGetUserSessions:
method setup_method (line 414) | def setup_method(self):
method teardown_method (line 417) | def teardown_method(self):
method test_get_user_sessions_filters_by_username (line 420) | def test_get_user_sessions_filters_by_username(self):
method test_get_user_sessions_empty (line 432) | def test_get_user_sessions_empty(self):
class TestGetSourceSessions (line 438) | class TestGetSourceSessions:
method setup_method (line 441) | def setup_method(self):
method teardown_method (line 444) | def teardown_method(self):
method test_get_source_sessions_filters_by_source (line 447) | def test_get_source_sessions_filters_by_source(self):
class TestEnforceStreamLimits (line 460) | class TestEnforceStreamLimits:
method setup_method (line 463) | def setup_method(self):
method teardown_method (line 466) | def teardown_method(self):
method test_no_limits_returns_none (line 469) | def test_no_limits_returns_none(self):
method test_user_limit_stops_oldest (line 474) | def test_user_limit_stops_oldest(self):
method test_source_limit_stops_user_session (line 501) | def test_source_limit_stops_user_session(self):
method test_source_limit_returns_error_for_other_user (line 520) | def test_source_limit_returns_error_for_other_user(self):
class TestGetSession (line 543) | class TestGetSession:
method setup_method (line 546) | def setup_method(self):
method teardown_method (line 549) | def teardown_method(self):
method test_get_existing_session (line 552) | def test_get_existing_session(self):
method test_get_nonexistent_session (line 561) | def test_get_nonexistent_session(self):
class TestTouchSession (line 566) | class TestTouchSession:
method setup_method (line 569) | def setup_method(self):
method teardown_method (line 572) | def teardown_method(self):
method test_touch_updates_last_access (line 575) | def test_touch_updates_last_access(self):
method test_touch_nonexistent_returns_false (line 586) | def test_touch_nonexistent_returns_false(self):
class TestGetSessionProgress (line 591) | class TestGetSessionProgress:
method setup_method (line 594) | def setup_method(self):
method teardown_method (line 597) | def teardown_method(self):
method test_progress_with_playlist (line 600) | def test_progress_with_playlist(self):
method test_progress_no_playlist (line 615) | def test_progress_no_playlist(self):
method test_progress_nonexistent_session (line 625) | def test_progress_nonexistent_session(self):
class TestClearUrlSession (line 630) | class TestClearUrlSession:
method setup_method (line 633) | def setup_method(self):
method teardown_method (line 636) | def teardown_method(self):
method test_clear_existing_url (line 639) | def test_clear_existing_url(self):
method test_clear_nonexistent_url (line 649) | def test_clear_nonexistent_url(self):
class TestCalcHlsDuration (line 660) | class TestCalcHlsDuration:
method test_duration_from_playlist (line 663) | def test_duration_from_playlist(self):
method test_duration_estimate_from_segments (line 673) | def test_duration_estimate_from_segments(self):
class TestBuildSubtitleTracks (line 682) | class TestBuildSubtitleTracks:
method test_builds_track_list (line 685) | def test_builds_track_list(self):
method test_empty_sub_info (line 701) | def test_empty_sub_info(self):
method test_non_dict_sub_info (line 706) | def test_non_dict_sub_info(self):
class TestRegeneratePlaylist (line 711) | class TestRegeneratePlaylist:
method test_regenerates_playlist_from_segments (line 714) | def test_regenerates_playlist_from_segments(self):
method test_regenerate_skips_small_segments (line 734) | def test_regenerate_skips_small_segments(self):
class TestCleanupAndRecoverSessions (line 754) | class TestCleanupAndRecoverSessions:
method setup_method (line 757) | def setup_method(self):
method teardown_method (line 760) | def teardown_method(self):
method test_removes_orphaned_dirs (line 763) | def test_removes_orphaned_dirs(self):
method test_recovers_valid_vod_session (line 779) | def test_recovers_valid_vod_session(self):
method test_removes_expired_vod_session (line 806) | def test_removes_expired_vod_session(self):
FILE: m3u.py
function parse_m3u (line 38) | def parse_m3u(content: str, source_id: str) -> tuple[list[dict], list[di...
function fetch_m3u (line 102) | def fetch_m3u(url: str, source_id: str, timeout: int = 30) -> tuple[list...
function _fetch_all_live_data (line 109) | def _fetch_all_live_data() -> tuple[list[dict], list[dict], list[tuple[s...
function fetch_source_live_data (line 151) | def fetch_source_live_data(source: Any) -> tuple[list[dict], list[dict],...
function fetch_source_vod_data (line 185) | def fetch_source_vod_data(source: Any) -> tuple[list[dict], list[dict]]:
function parse_epg_urls (line 200) | def parse_epg_urls(raw: list) -> list[tuple[str, int, str]]:
function load_all_live_data (line 205) | def load_all_live_data() -> tuple[list[dict], list[dict], list[tuple[str...
function _fetch_vod_data (line 252) | def _fetch_vod_data() -> tuple[list[dict], list[dict]]:
function load_vod_data (line 275) | def load_vod_data() -> tuple[list[dict], list[dict]]:
function _fetch_series_data (line 318) | def _fetch_series_data() -> tuple[list[dict], list[dict]]:
function load_series_data (line 341) | def load_series_data() -> tuple[list[dict], list[dict]]:
function get_first_xtream_client (line 384) | def get_first_xtream_client() -> XtreamClient | None:
function get_xtream_client_by_source (line 392) | def get_xtream_client_by_source(source_id: str) -> XtreamClient | None:
function get_first_xtream_source_and_client (line 400) | def get_first_xtream_source_and_client() -> tuple[str, XtreamClient] | t...
function get_fetch_lock (line 408) | def get_fetch_lock(name: str) -> threading.Lock:
function get_refresh_in_progress (line 413) | def get_refresh_in_progress() -> set[str]:
FILE: m3u_test.py
function m3u_module (line 11) | def m3u_module(tmp_path: Path):
class TestParseM3u (line 29) | class TestParseM3u:
method test_parse_basic_m3u (line 30) | def test_parse_basic_m3u(self, m3u_module):
method test_parse_m3u_with_epg_url (line 50) | def test_parse_m3u_with_epg_url(self, m3u_module):
method test_parse_m3u_x_tvg_url (line 58) | def test_parse_m3u_x_tvg_url(self, m3u_module):
method test_parse_m3u_uncategorized (line 66) | def test_parse_m3u_uncategorized(self, m3u_module):
method test_parse_m3u_category_ids_prefixed (line 76) | def test_parse_m3u_category_ids_prefixed(self, m3u_module):
method test_parse_m3u_empty (line 85) | def test_parse_m3u_empty(self, m3u_module):
class TestParseEpgUrls (line 92) | class TestParseEpgUrls:
method test_parse_tuple_list (line 93) | def test_parse_tuple_list(self, m3u_module):
method test_parse_tuple_passthrough (line 100) | def test_parse_tuple_passthrough(self, m3u_module):
method test_parse_empty (line 105) | def test_parse_empty(self, m3u_module):
method test_parse_skips_malformed (line 108) | def test_parse_skips_malformed(self, m3u_module):
class TestFetchLocks (line 115) | class TestFetchLocks:
method test_get_fetch_lock (line 116) | def test_get_fetch_lock(self, m3u_module):
method test_get_refresh_in_progress (line 120) | def test_get_refresh_in_progress(self, m3u_module):
FILE: main.py
function get_sr_models (line 130) | def get_sr_models() -> list[str]:
function is_sr_available (line 154) | def is_sr_available() -> bool:
function _logo_url_filter (line 159) | def _logo_url_filter(url: str) -> str:
function _safe_float (line 172) | def _safe_float(value: float | str | None, default: float = 0.0) -> float:
function lifespan (line 192) | async def lifespan(app: FastAPI) -> AsyncIterator[None]:
class AuthRequired (line 349) | class AuthRequired(Exception):
function auth_required_handler (line 354) | async def auth_required_handler(request: Request, _exc: AuthRequired):
function http_exception_handler (line 359) | async def http_exception_handler(request: Request, exc: HTTPException):
function get_current_user (line 374) | def get_current_user(request: Request) -> dict | None:
function require_auth (line 381) | def require_auth(request: Request) -> dict:
function require_admin (line 388) | def require_admin(request: Request) -> dict:
function setup_page (line 402) | async def setup_page(request: Request):
function setup_create_user (line 410) | async def setup_create_user(
function login_page (line 437) | async def login_page(request: Request, error: str | None = None):
function _check_rate_limit (line 447) | def _check_rate_limit(ip: str) -> None:
function login (line 467) | async def login(
function logout (line 489) | async def logout():
function favicon (line 501) | async def favicon():
function index (line 506) | async def index(request: Request, _user: Annotated[dict, Depends(require...
function _fetch_all_epg (line 510) | def _fetch_all_epg(epg_urls: list[tuple[str, int, str]]) -> int:
function load_all_epg (line 543) | def load_all_epg(epg_urls: list[tuple[str, int, str]]) -> None:
function _start_guide_background_load (line 565) | def _start_guide_background_load() -> None:
function epg_events (line 592) | async def epg_events(_user: Annotated[dict, Depends(require_auth)]):
function guide_page (line 624) | async def guide_page(
function _get_guide_streams (line 753) | def _get_guide_streams(cats: str, username: str) -> tuple[list[dict], li...
function _build_guide_rows (line 796) | def _build_guide_rows(
function guide_rows_api (line 887) | async def guide_rows_api(
function _start_vod_background_load (line 933) | def _start_vod_background_load() -> None:
function vod_page (line 954) | async def vod_page(
function _start_series_background_load (line 1042) | def _start_series_background_load() -> None:
function series_page (line 1063) | async def series_page(
function series_detail_page (line 1152) | async def series_detail_page(
function movie_detail_page (line 1251) | async def movie_detail_page(
class PlayerInfo (line 1318) | class PlayerInfo:
function _get_episode_desc (line 1331) | def _get_episode_desc(ep: dict) -> str:
function _get_live_player_info (line 1344) | def _get_live_player_info(stream_id: str) -> PlayerInfo:
function _get_movie_player_info (line 1387) | def _get_movie_player_info(stream_id: str, ext: str) -> PlayerInfo:
function _get_series_player_info (line 1423) | def _get_series_player_info(
function _ensure_live_cache (line 1494) | def _ensure_live_cache() -> None:
function player_page (line 1508) | async def player_page(
function search_page (line 1612) | async def search_page(
function stream_redirect (line 1757) | async def stream_redirect(
function playlist_xspf (line 1771) | async def playlist_xspf(
function transcode_start (line 1792) | async def transcode_start(
function transcode_seek (line 1835) | async def transcode_seek(
function transcode_progress (line 1845) | async def transcode_progress(
function transcode_file (line 1857) | async def transcode_file(
function subtitle_file (line 1898) | async def subtitle_file(session_id: str, filename: str):
function transcode_stop (line 1928) | async def transcode_stop(
function transcode_stop_post (line 1938) | async def transcode_stop_post(
function transcode_clear (line 1948) | async def transcode_clear(
function _build_all_groups (line 1960) | def _build_all_groups() -> list[dict[str, str]]:
function _get_content_access (line 2005) | def _get_content_access(username: str) -> dict[str, bool]:
function _get_content_access_from_request (line 2037) | def _get_content_access_from_request(request: Request) -> dict[str, bool]:
function settings_page (line 2055) | async def settings_page(request: Request, user: Annotated[dict, Depends(...
function settings_guide_filter (line 2150) | async def settings_guide_filter(
function settings_vod_filter (line 2166) | async def settings_vod_filter(
function settings_series_filter (line 2182) | async def settings_series_filter(
function settings_add_source (line 2198) | async def settings_add_source(
function settings_edit_source (line 2254) | async def settings_edit_source(
function settings_delete_source (line 2308) | async def settings_delete_source(
function guide_refresh (line 2322) | async def guide_refresh(_user: Annotated[dict, Depends(require_auth)]):
function guide_refresh_status (line 2370) | async def guide_refresh_status(_user: Annotated[dict, Depends(require_au...
function settings_refresh_source (line 2379) | async def settings_refresh_source(
function settings_refresh_status (line 2514) | async def settings_refresh_status(_user: Annotated[dict, Depends(require...
function settings_captions (line 2531) | async def settings_captions(
function cast_log_endpoint (line 2543) | async def cast_log_endpoint(request: Request):
function get_user_prefs (line 2555) | async def get_user_prefs(user: Annotated[dict, Depends(require_auth)]):
function save_user_prefs (line 2569) | async def save_user_prefs(
function _fetch_logo (line 2594) | def _fetch_logo(url: str, timeout: int = 10) -> tuple[bytes, str]:
function get_logo (line 2609) | async def get_logo(
function settings_transcode (line 2641) | async def settings_transcode(
function settings_refresh_encoders (line 2679) | async def settings_refresh_encoders(
function settings_user_agent (line 2688) | async def settings_user_agent(
function _enrich_probe_cache_stats (line 2703) | def _enrich_probe_cache_stats(stats: list[dict], xtream: Any) -> list[di...
function get_probe_cache (line 2737) | async def get_probe_cache(
function clear_probe_cache (line 2752) | async def clear_probe_cache(_user: Annotated[dict, Depends(require_admin...
function clear_series_probe_cache (line 2759) | async def clear_series_probe_cache(
function clear_series_mru (line 2770) | async def clear_series_mru(
function clear_data_cache (line 2780) | async def clear_data_cache(_user: Annotated[dict, Depends(require_admin)]):
function get_settings_api (line 2787) | async def get_settings_api(_user: Annotated[dict, Depends(require_auth)]):
function update_settings_api (line 2792) | async def update_settings_api(
function save_watch_position_api (line 2817) | async def save_watch_position_api(
function get_watch_position_api (line 2833) | async def get_watch_position_api(
function settings_delete_user (line 2847) | async def settings_delete_user(
function settings_add_user (line 2870) | async def settings_add_user(
function settings_change_own_password (line 2902) | async def settings_change_own_password(
function settings_change_password (line 2919) | async def settings_change_password(
function settings_set_admin (line 2936) | async def settings_set_admin(
function settings_set_user_limits (line 2948) | async def settings_set_user_limits(
FILE: main_test.py
function mock_deps (line 17) | def mock_deps():
function client (line 31) | def client(tmp_path: Path, mock_deps):
function auth_client (line 56) | def auth_client(tmp_path: Path, mock_deps):
class TestSetup (line 86) | class TestSetup:
method test_setup_page_shown_when_no_users (line 89) | def test_setup_page_shown_when_no_users(self, client):
method test_setup_redirects_when_users_exist (line 94) | def test_setup_redirects_when_users_exist(self, client, tmp_path):
method test_setup_creates_user (line 103) | def test_setup_creates_user(self, client):
method test_setup_validates_username_length (line 116) | def test_setup_validates_username_length(self, client):
method test_setup_validates_password_length (line 124) | def test_setup_validates_password_length(self, client):
method test_setup_validates_password_match (line 132) | def test_setup_validates_password_match(self, client):
class TestLogin (line 141) | class TestLogin:
method test_login_page_redirects_to_setup_when_no_users (line 144) | def test_login_page_redirects_to_setup_when_no_users(self, client):
method test_login_page_shown_when_users_exist (line 149) | def test_login_page_shown_when_users_exist(self, client, tmp_path):
method test_login_success_sets_cookie (line 157) | def test_login_success_sets_cookie(self, client, tmp_path):
method test_login_failure_returns_401 (line 170) | def test_login_failure_returns_401(self, client, tmp_path):
class TestLogout (line 184) | class TestLogout:
method test_logout_clears_cookie (line 187) | def test_logout_clears_cookie(self, auth_client):
class TestAuthRequired (line 193) | class TestAuthRequired:
method test_index_redirects_to_login (line 196) | def test_index_redirects_to_login(self, client, tmp_path):
method test_guide_redirects_to_login (line 205) | def test_guide_redirects_to_login(self, client, tmp_path):
method test_vod_redirects_to_login (line 214) | def test_vod_redirects_to_login(self, client, tmp_path):
method test_series_redirects_to_login (line 223) | def test_series_redirects_to_login(self, client, tmp_path):
class TestIndex (line 233) | class TestIndex:
method test_index_redirects_to_guide (line 236) | def test_index_redirects_to_guide(self, auth_client):
class TestFavicon (line 242) | class TestFavicon:
method test_favicon_returns_204 (line 245) | def test_favicon_returns_204(self, client):
class TestGuide (line 250) | class TestGuide:
method test_guide_shows_loading_when_no_cache (line 253) | def test_guide_shows_loading_when_no_cache(self, auth_client):
method test_guide_shows_channels_from_cache (line 260) | def test_guide_shows_channels_from_cache(self, auth_client):
method test_guide_uses_saved_filter (line 272) | def test_guide_uses_saved_filter(self, auth_client, tmp_path):
class TestVod (line 286) | class TestVod:
method test_vod_shows_loading_when_no_cache (line 289) | def test_vod_shows_loading_when_no_cache(self, auth_client):
method test_vod_shows_movies_from_cache (line 294) | def test_vod_shows_movies_from_cache(self, auth_client):
method test_vod_filters_by_category (line 305) | def test_vod_filters_by_category(self, auth_client):
method test_vod_sorts_by_alpha (line 318) | def test_vod_sorts_by_alpha(self, auth_client):
class TestSeries (line 329) | class TestSeries:
method test_series_shows_loading_when_no_cache (line 332) | def test_series_shows_loading_when_no_cache(self, auth_client):
method test_series_shows_list_from_cache (line 337) | def test_series_shows_list_from_cache(self, auth_client):
class TestSearch (line 349) | class TestSearch:
method test_search_page_renders (line 352) | def test_search_page_renders(self, auth_client):
method test_search_finds_live_streams (line 360) | def test_search_finds_live_streams(self, auth_client):
method test_search_regex_mode (line 373) | def test_search_regex_mode(self, auth_client):
method test_search_rejects_long_regex (line 386) | def test_search_rejects_long_regex(self, auth_client):
class TestSettings (line 393) | class TestSettings:
method test_settings_page_renders (line 396) | def test_settings_page_renders(self, auth_client):
method test_settings_guide_filter (line 403) | def test_settings_guide_filter(self, auth_client):
method test_settings_captions (line 411) | def test_settings_captions(self, auth_client):
method test_settings_transcode (line 419) | def test_settings_transcode(self, auth_client):
class TestAddSource (line 432) | class TestAddSource:
method test_add_xtream_source (line 435) | def test_add_xtream_source(self, auth_client):
method test_add_m3u_source (line 451) | def test_add_m3u_source(self, auth_client):
method test_add_source_validates_type (line 465) | def test_add_source_validates_type(self, auth_client):
method test_add_source_validates_url_scheme (line 476) | def test_add_source_validates_url_scheme(self, auth_client):
method test_add_source_validates_name_length (line 487) | def test_add_source_validates_name_length(self, auth_client):
class TestDeleteSource (line 499) | class TestDeleteSource:
method test_delete_source (line 502) | def test_delete_source(self, auth_client, tmp_path):
class TestUserPrefs (line 524) | class TestUserPrefs:
method test_get_user_prefs (line 527) | def test_get_user_prefs(self, auth_client):
method test_save_user_prefs (line 534) | def test_save_user_prefs(self, auth_client):
class TestWatchPosition (line 543) | class TestWatchPosition:
method test_save_watch_position (line 546) | def test_save_watch_position(self, auth_client):
method test_get_watch_position (line 553) | def test_get_watch_position(self, auth_client):
method test_get_watch_position_not_found (line 566) | def test_get_watch_position_not_found(self, auth_client):
class TestUserManagement (line 573) | class TestUserManagement:
method test_delete_user (line 576) | def test_delete_user(self, auth_client, tmp_path):
method test_cannot_delete_self (line 584) | def test_cannot_delete_self(self, auth_client):
method test_change_password (line 588) | def test_change_password(self, auth_client):
method test_change_password_wrong_current (line 595) | def test_change_password_wrong_current(self, auth_client):
class TestPlaylistXspf (line 603) | class TestPlaylistXspf:
method test_playlist_xspf (line 606) | def test_playlist_xspf(self, auth_client):
class TestApiSettings (line 614) | class TestApiSettings:
method test_get_settings (line 617) | def test_get_settings(self, auth_client):
method test_update_settings (line 623) | def test_update_settings(self, auth_client):
class TestTranscodeRoutes (line 631) | class TestTranscodeRoutes:
method test_transcode_file_not_found (line 634) | def test_transcode_file_not_found(self, auth_client):
method test_transcode_stop (line 639) | def test_transcode_stop(self, auth_client):
method test_transcode_stop_post (line 645) | def test_transcode_stop_post(self, auth_client):
method test_transcode_progress_not_found (line 650) | def test_transcode_progress_not_found(self, auth_client):
class TestSubtitleRoutes (line 656) | class TestSubtitleRoutes:
method test_subtitle_invalid_filename (line 659) | def test_subtitle_invalid_filename(self, auth_client):
method test_subtitle_session_not_found (line 663) | def test_subtitle_session_not_found(self, auth_client):
class TestProbeCache (line 669) | class TestProbeCache:
method test_get_probe_cache (line 672) | def test_get_probe_cache(self, auth_client):
method test_clear_probe_cache (line 678) | def test_clear_probe_cache(self, auth_client):
method test_clear_series_probe_cache (line 684) | def test_clear_series_probe_cache(self, auth_client):
class TestRefreshStatus (line 690) | class TestRefreshStatus:
method test_guide_refresh_status (line 693) | def test_guide_refresh_status(self, auth_client):
method test_settings_refresh_status (line 702) | def test_settings_refresh_status(self, auth_client):
FILE: static/js/app.js
function getFocusables (line 13) | function getFocusables(container = document) {
function getGridInfo (line 19) | function getGridInfo(element) {
function moveFocus (line 43) | function moveFocus(direction) {
function setInitialFocus (line 90) | function setInitialFocus() {
function toggleFocusedFavorite (line 112) | function toggleFocusedFavorite() {
FILE: static/js/favorites-grid.js
function escapeHtml (line 10) | function escapeHtml(s) {
function escapeAttr (line 15) | function escapeAttr(s) {
function getFavorites (line 22) | function getFavorites() {
function saveFavorites (line 26) | function saveFavorites() {
function getOrder (line 76) | async function getOrder() {
function saveOrder (line 84) | async function saveOrder(order) {
function initDragDrop (line 141) | function initDragDrop() {
function saveCurrentOrder (line 217) | async function saveCurrentOrder() {
FILE: static/js/player.js
function formatTime (line 35) | function formatTime(seconds) {
function parseTime (line 43) | function parseTime(str) {
function parseVttTime (line 56) | function parseVttTime(str) {
function parseVttCues (line 63) | function parseVttCues(vttText) {
function hideLoading (line 91) | function hideLoading() {
function showLoading (line 95) | function showLoading() {
function showError (line 99) | function showError() {
function updateTranscodeCheck (line 104) | function updateTranscodeCheck() {
function updateCcButton (line 109) | function updateCcButton() {
function updatePlayIcon (line 113) | function updatePlayIcon() {
function updateMuteIcon (line 122) | function updateMuteIcon() {
function updateFullscreenIcon (line 131) | function updateFullscreenIcon() {
function disableCcButton (line 141) | function disableCcButton() {
function enableCcButton (line 146) | function enableCcButton() {
method newCue (line 158) | newCue(track, startTime, endTime, captionScreen) {
function createHlsConfig (line 178) | function createHlsConfig(options = {}) {
function applyCaptionStyles (line 210) | function applyCaptionStyles() {
function getPreferredSubtitleTrack (line 244) | function getPreferredSubtitleTrack(tracks) {
function applyCaptionsSetting (line 267) | function applyCaptionsSetting() {
function startSubtitlePolling (line 292) | function startSubtitlePolling(subtitles, prefIdx) {
function savePosition (line 357) | function savePosition() {
function restorePosition (line 371) | function restorePosition() {
function setupPositionTracking (line 383) | function setupPositionTracking() {
function startProgressPolling (line 406) | function startProgressPolling() {
function stopProgressPolling (line 422) | function stopProgressPolling() {
function cleanupTranscode (line 433) | async function cleanupTranscode() {
function cleanupTranscodeSync (line 463) | function cleanupTranscodeSync() {
function handleSeekToPosition (line 481) | async function handleSeekToPosition(targetTime) {
function startTranscode (line 542) | async function startTranscode(onError) {
function playWithUrl (line 583) | function playWithUrl(url, onError, subtitles) {
function setupKeyboardControls (line 714) | function setupKeyboardControls() {
function loadSettings (line 806) | function loadSettings() {
function saveSettings (line 811) | function saveSettings(updates) {
function setupActivityTracking (line 816) | function setupActivityTracking() {
function setupButtonHandlers (line 845) | function setupButtonHandlers() {
function setupCast (line 1202) | function setupCast() {
function init (line 1314) | function init() {
FILE: static/js/settings.js
function escapeHtml (line 11) | function escapeHtml(s) {
function showFeedback (line 16) | function showFeedback(el, success) {
function saveWithFeedback (line 23) | async function saveWithFeedback(url, options, feedbackEl) {
function getFeedbackEl (line 35) | function getFeedbackEl(el) {
function setupDragDrop (line 45) | function setupDragDrop(containerSelector, chipSelector, onDrop) {
function setupSearch (line 100) | function setupSearch(inputId, clearBtnId, chipSelector) {
function setupSourceTypeSelect (line 176) | function setupSourceTypeSelect() {
function setupSourceEditForms (line 209) | function setupSourceEditForms() {
function setupCategoryFilter (line 251) | function setupCategoryFilter() {
function setupVodCategoryFilter (line 290) | function setupVodCategoryFilter() {
function setupSeriesCategoryFilter (line 329) | function setupSeriesCategoryFilter() {
function setupChromeCcLink (line 368) | function setupChromeCcLink() {
function setupCaptionSettings (line 389) | function setupCaptionSettings() {
function setupGuideSettings (line 453) | function setupGuideSettings() {
function setupTranscodeSettings (line 470) | function setupTranscodeSettings() {
function setupUserAgentSettings (line 554) | function setupUserAgentSettings() {
function setupDataCache (line 582) | function setupDataCache() {
function setupProbeCache (line 595) | function setupProbeCache() {
function setupRefreshButtons (line 681) | function setupRefreshButtons() {
function setupUserForms (line 739) | function setupUserForms() {
function setupUserGroupDragDrop (line 855) | function setupUserGroupDragDrop() {
function init (line 918) | function init() {
FILE: static/js/virtual-guide.js
constant VIRTUAL_GUIDE_DEFAULTS (line 7) | const VIRTUAL_GUIDE_DEFAULTS = {
class VirtualGuide (line 20) | class VirtualGuide {
method constructor (line 21) | constructor(options) {
method currentRowHeight (line 59) | get currentRowHeight() {
method visibleCount (line 63) | get visibleCount() {
method init (line 68) | init() {
method setupDOM (line 108) | setupDOM() {
method bindEvents (line 144) | bindEvents() {
method onScroll (line 176) | onScroll() {
method updateVisibleRange (line 216) | async updateVisibleRange() {
method findMissingRanges (line 271) | findMissingRanges(start, end) {
method fetchMissingRanges (line 296) | async fetchMissingRanges(ranges) {
method render (line 408) | render() {
method pruneCache (line 431) | pruneCache() {
method renderPlaceholder (line 456) | renderPlaceholder(index) {
method renderRow (line 486) | renderRow(row, index) {
method destroy (line 584) | destroy() {
FILE: testing.py
function run_tests (line 11) | def run_tests(test_file: str) -> None:
FILE: tools/alignm3u.py
function parse_callsign (line 35) | def parse_callsign(coded_callsign: str) -> tuple[str, str, int]:
function parse_m3u (line 46) | def parse_m3u(path: pathlib.Path) -> list[list]:
function parse_xmltv_channels (line 69) | def parse_xmltv_channels(path: pathlib.Path) -> dict[str, tuple[str, ...]]:
function build_lookup (line 81) | def build_lookup(xmltv_channels: dict[str, tuple[str, ...]]) -> dict[str...
function align_channels (line 90) | def align_channels(
function write_m3u (line 125) | def write_m3u(
function main (line 157) | def main() -> None:
FILE: tools/export-tensorrt.py
class SRVGGNetCompact (line 51) | class SRVGGNetCompact(nn.Module):
method __init__ (line 58) | def __init__(
method forward (line 77) | def forward(self, x: torch.Tensor) -> torch.Tensor:
class ResidualDenseBlock (line 86) | class ResidualDenseBlock(nn.Module):
method __init__ (line 96) | def __init__(self, nf: int = 64, gc: int = 32):
method forward (line 105) | def forward(self, x: torch.Tensor) -> torch.Tensor:
class RRDB (line 114) | class RRDB(nn.Module):
method __init__ (line 121) | def __init__(self, nf: int, gc: int = 32):
method forward (line 127) | def forward(self, x: torch.Tensor) -> torch.Tensor:
class RRDBNet (line 134) | class RRDBNet(nn.Module):
method __init__ (line 147) | def __init__(
method forward (line 167) | def forward(self, x: torch.Tensor) -> torch.Tensor:
class ModelInfo (line 193) | class ModelInfo(TypedDict):
function resolve_model (line 236) | def resolve_model(model_name: str) -> tuple[str, ModelInfo]:
function download_model (line 244) | def download_model(model_name: str, cache_dir: Path) -> Path:
function list_models (line 283) | def list_models() -> None:
function get_model_and_onnx (line 298) | def get_model_and_onnx(
function export_onnx (line 369) | def export_onnx(model: nn.Module, opt_shape: tuple[int, int], onnx_path:...
function _get_trt_dtype_map (line 402) | def _get_trt_dtype_map() -> dict[str, trt.DataType]:
function _trt_dtype_str (line 413) | def _trt_dtype_str(dtype: trt.DataType) -> str:
function build_engine (line 421) | def build_engine(
function height_to_shape (line 525) | def height_to_shape(h: int, aspect: float = 16 / 9) -> tuple[int, int]:
function main (line 540) | def main() -> None:
FILE: tools/patches/dnn_backend_tensorrt.cpp
type CachedEngine (line 47) | struct CachedEngine {
method CachedEngine (line 52) | CachedEngine(nvinfer1::ICudaEngine *e, nvinfer1::IRuntime *r)
function load_libs (line 208) | static int load_libs(void *log_ctx) {
function log_gpu_memory (line 389) | static void log_gpu_memory(void *log_ctx, const char *label) {
class TRTLogger (line 399) | class TRTLogger : public nvinfer1::ILogger {
method TRTLogger (line 402) | TRTLogger(void *ctx = nullptr) : log_ctx(ctx) {}
method log (line 404) | void log(Severity severity, const char *msg) noexcept override {
type TRTDataType (line 426) | enum TRTDataType {
function trt_dtype_size (line 446) | static size_t trt_dtype_size(TRTDataType dt) {
function TRTDataType (line 457) | static TRTDataType nvinfer_to_trt_dtype(nvinfer1::DataType dt) {
type TRTModel (line 468) | struct TRTModel {
type TRTInferRequest (line 515) | struct TRTInferRequest {
type TRTRequestItem (line 519) | struct TRTRequestItem {
function load_cuda_kernels (line 543) | static int load_cuda_kernels(TRTModel *trt_model, void *log_ctx) {
function launch_kernel (line 616) | static int launch_kernel(CUfunction func, CUstream stream,
function ensure_execution_context (line 639) | static int ensure_execution_context(TRTModel *trt_model, void *log_ctx)
function extract_lltask_from_task (line 706) | static int extract_lltask_from_task(TaskItem *task, Queue *lltask_queue)
function trt_free_request (line 726) | static void trt_free_request(TRTInferRequest *request)
function destroy_request_item (line 735) | static inline void destroy_request_item(TRTRequestItem **arg)
function dnn_free_model_trt (line 748) | static void dnn_free_model_trt(DNNModel **model)
function get_input_trt (line 879) | static int get_input_trt(DNNModel *model, DNNData *input, const char *in...
function fill_model_input_trt (line 904) | static int fill_model_input_trt(TRTModel *trt_model, TRTRequestItem *req...
function trt_capture_cuda_graph (line 1044) | static int trt_capture_cuda_graph(TRTModel *trt_model, void *log_ctx)
function trt_start_inference (line 1106) | static int trt_start_inference(void *args)
function infer_completion_callback (line 1165) | static void infer_completion_callback(void *args)
function execute_model_trt (line 1355) | static int execute_model_trt(TRTRequestItem *request, Queue *lltask_queue)
function get_output_trt (line 1419) | static int get_output_trt(DNNModel *model, const char *input_name, int i...
function TRTInferRequest (line 1431) | static TRTInferRequest *trt_create_inference_request(void)
function DNNModel (line 1437) | static DNNModel *dnn_load_model_trt(DnnContext *ctx, DNNFunctionType fun...
function dnn_execute_model_trt (line 1748) | static int dnn_execute_model_trt(const DNNModel *model, DNNExecBaseParam...
function DNNAsyncStatusType (line 1813) | static DNNAsyncStatusType dnn_get_result_trt(const DNNModel *model, AVFr...
function dnn_flush_trt (line 1819) | static int dnn_flush_trt(const DNNModel *model)
FILE: tools/patches/dnn_backend_torch.cpp
type THModel (line 49) | struct THModel {
type THInferRequest (line 63) | struct THInferRequest {
type THRequestItem (line 68) | struct THRequestItem {
function extract_lltask_from_task (line 82) | static int extract_lltask_from_task(TaskItem *task, Queue *lltask_queue)
function th_free_request (line 102) | static void th_free_request(THInferRequest *request)
function destroy_request_item (line 117) | static inline void destroy_request_item(THRequestItem **arg)
function dnn_free_model_th (line 131) | static void dnn_free_model_th(DNNModel **model)
function get_input_th (line 205) | static int get_input_th(DNNModel *model, DNNData *input, const char *inp...
function deleter (line 217) | static void deleter(void *arg)
function fill_model_input_th (line 222) | static int fill_model_input_th(THModel *th_model, THRequestItem *request)
function th_start_inference (line 377) | static int th_start_inference(void *args)
function infer_completion_callback (line 432) | static void infer_completion_callback(void *args) {
function th_worker_thread (line 633) | static void th_worker_thread(THModel *th_model) {
function execute_model_th (line 658) | static int execute_model_th(THRequestItem *request, Queue *lltask_queue)
function get_output_th (line 714) | static int get_output_th(DNNModel *model, const char *input_name, int in...
function THInferRequest (line 760) | static THInferRequest *th_create_inference_request(void)
function DNNModel (line 771) | static DNNModel *dnn_load_model_th(DnnContext *ctx, DNNFunctionType func...
function dnn_execute_model_th (line 911) | static int dnn_execute_model_th(const DNNModel *model, DNNExecBaseParams...
function DNNAsyncStatusType (line 974) | static DNNAsyncStatusType dnn_get_result_th(const DNNModel *model, AVFra...
function dnn_flush_th (line 980) | static int dnn_flush_th(const DNNModel *model)
FILE: tools/patches/vf_dnn_processing.c
type DnnProcessingContext (line 39) | typedef struct DnnProcessingContext {
function av_cold (line 68) | static av_cold int init(AVFilterContext *context)
type AVPixelFormat (line 74) | enum AVPixelFormat
function check_modelinput_inlink (line 91) | static int check_modelinput_inlink(const DNNData *model_input, const AVF...
function config_input (line 154) | static int config_input(AVFilterLink *inlink)
function av_always_inline (line 176) | static av_always_inline int isPlanarYUV(enum AVPixelFormat pix_fmt)
function prepare_uv_scale (line 184) | static int prepare_uv_scale(AVFilterLink *outlink)
function config_output (line 228) | static int config_output(AVFilterLink *outlink)
function copy_uv_planes (line 282) | static int copy_uv_planes(DnnProcessingContext *ctx, AVFrame *out, const...
function flush_frame (line 321) | static int flush_frame(AVFilterLink *outlink, int64_t pts, int64_t *out_...
function activate (line 359) | static int activate(AVFilterContext *filter_ctx)
function av_cold (line 453) | static av_cold void uninit(AVFilterContext *ctx)
FILE: tools/xtream2m3u.py
class RetryableError (line 29) | class RetryableError(Exception):
function _load_config (line 39) | def _load_config() -> dict:
function _get_urls (line 55) | def _get_urls() -> tuple[str, str, str]:
function _get_filters (line 66) | def _get_filters() -> tuple[dict[int, str], str, set[str]]:
function main (line 75) | def main(cached_only: bool = False) -> None:
function fetch_all_data (line 126) | def fetch_all_data(api_url: str) -> None:
function fetch_text (line 182) | def fetch_text(url: str, timeout: int = 5) -> str:
function fetch_series_info (line 199) | def fetch_series_info(
class RateLimiter (line 274) | class RateLimiter(Protocol):
method __init__ (line 275) | def __init__(self, max_calls: int, per_seconds: float = 1): ...
method acquire (line 277) | def acquire(self) -> None: ...
function _task (line 280) | def _task(
function filter_live (line 310) | def filter_live(
function process (line 351) | def process(
function process_iptv_url (line 418) | def process_iptv_url(auth: dict[str, dict[str, Any]]) -> list[str]:
function toint (line 440) | def toint(x: str | None) -> int | None:
function tofloat (line 444) | def tofloat(x: str | None) -> float | None:
function load (line 448) | def load(filename: str) -> Any:
function load_dict (line 453) | def load_dict(filename: str) -> dict[str, Any]:
function load_list (line 460) | def load_list(filename: str) -> list[dict[str, Any]]:
class SlidingRateLimiter (line 467) | class SlidingRateLimiter:
method __init__ (line 468) | def __init__(self, max_calls: int, per_seconds: float = 1):
method acquire (line 474) | def acquire(self) -> None:
class ChunkingRateLimiter (line 487) | class ChunkingRateLimiter:
method __init__ (line 488) | def __init__(self, max_calls: int, per_seconds: float = 1):
method acquire (line 495) | def acquire(self) -> None:
function print_progress_bar (line 512) | def print_progress_bar(
function write_m3u_live (line 544) | def write_m3u_live(
function write_m3u_vod (line 568) | def write_m3u_vod(
function write_m3u_series (line 590) | def write_m3u_series(
function _descend (line 626) | def _descend(x: Any):
FILE: tools/zap2xml.py
class Namespace (line 46) | class Namespace(dict): # pyright: ignore[reportMissingTypeArgument]
function main (line 55) | def main() -> None:
function get_cached (line 161) | def get_cached(
function remove_stale_cache (line 208) | def remove_stale_cache(cache_dir: pathlib.Path, zap_time: int) -> None:
function _expired (line 228) | def _expired(
function add_programme (line 240) | def add_programme(
function add_programme_tvimate (line 431) | def add_programme_tvimate(
function get_channel_key (line 523) | def get_channel_key(c: Mapping[str, Any]) -> str:
function parse_callsign (line 529) | def parse_callsign(coded_callsign: str) -> str:
function strf_time_str (line 544) | def strf_time_str(tm: str, format_str: str = "%Y%m%d%H%M%S %z") -> str:
function strf_time_int (line 549) | def strf_time_int(timestamp: int, format_str: str = "%Y-%b-%d %_I:%M%P %...
function parse_time_iso (line 553) | def parse_time_iso(tm: str) -> datetime.datetime:
function parse_time_int (line 558) | def parse_time_int(timestamp: int) -> datetime.datetime:
function add_xml_child (line 562) | def add_xml_child(
function toint (line 581) | def toint(x: str | None, fail: int = 0) -> int:
function parse_args (line 587) | def parse_args() -> argparse.Namespace:
FILE: util.py
class _SafeRedirectHandler (line 12) | class _SafeRedirectHandler(urllib.request.HTTPRedirectHandler):
method redirect_request (line 15) | def redirect_request(
function safe_urlopen (line 33) | def safe_urlopen(url: str, timeout: int = 30, user_agent: str | None = N...
FILE: util_test.py
function _fake_request (line 14) | def _fake_request(url: str) -> Any:
class TestSafeRedirectHandler (line 29) | class TestSafeRedirectHandler:
method test_handler_allows_http (line 30) | def test_handler_allows_http(self):
method test_handler_allows_https (line 43) | def test_handler_allows_https(self):
method test_handler_rejects_file_scheme (line 56) | def test_handler_rejects_file_scheme(self):
method test_handler_rejects_data_scheme (line 69) | def test_handler_rejects_data_scheme(self):
method test_handler_rejects_javascript_scheme (line 82) | def test_handler_rejects_javascript_scheme(self):
FILE: xtream.py
class XtreamClient (line 15) | class XtreamClient:
method __post_init__ (line 25) | def __post_init__(self) -> None:
method _base_params (line 30) | def _base_params(self) -> dict[str, str]:
method api_url (line 34) | def api_url(self) -> str:
method _fetch (line 38) | def _fetch(self, url: str, timeout: int = 30) -> str:
method _api (line 42) | def _api(self, action: str | None = None, timeout: int = 30, **params:...
method get_server_info (line 50) | def get_server_info(self, timeout: int = 15) -> dict[str, Any]:
method get_live_categories (line 54) | def get_live_categories(self) -> list[dict[str, Any]]:
method get_live_streams (line 57) | def get_live_streams(self, category_id: int | None = None) -> list[dic...
method get_vod_categories (line 62) | def get_vod_categories(self) -> list[dict[str, Any]]:
method get_vod_streams (line 65) | def get_vod_streams(self, category_id: int | None = None) -> list[dict...
method get_series_categories (line 70) | def get_series_categories(self) -> list[dict[str, Any]]:
method get_series (line 73) | def get_series(self, category_id: int | None = None) -> list[dict[str,...
method get_series_info (line 78) | def get_series_info(self, series_id: int) -> dict[str, Any]:
method get_vod_info (line 81) | def get_vod_info(self, vod_id: int) -> dict[str, Any]:
method get_short_epg (line 84) | def get_short_epg(self, stream_id: int, limit: int = 10) -> dict[str, ...
method build_stream_url (line 88) | def build_stream_url(self, stream_type: str, stream_id: int, ext: str ...
method build_timeshift_url (line 95) | def build_timeshift_url(
method epg_url (line 109) | def epg_url(self) -> str:
FILE: xtream_test.py
class TestXtreamClient (line 14) | class TestXtreamClient:
method test_api_url_property (line 17) | def test_api_url_property(self):
method test_epg_url_property (line 21) | def test_epg_url_property(self):
method test_url_normalization_strips_trailing_slash (line 25) | def test_url_normalization_strips_trailing_slash(self):
method test_url_normalization_strips_multiple_trailing_slashes (line 31) | def test_url_normalization_strips_multiple_trailing_slashes(self):
method test_special_chars_in_credentials_are_encoded (line 35) | def test_special_chars_in_credentials_are_encoded(self):
method test_build_stream_url_live_no_ext (line 44) | def test_build_stream_url_live_no_ext(self):
method test_build_stream_url_live_with_ext (line 49) | def test_build_stream_url_live_with_ext(self):
method test_build_stream_url_movie (line 54) | def test_build_stream_url_movie(self):
method test_build_stream_url_series (line 59) | def test_build_stream_url_series(self):
method test_build_timeshift_url (line 64) | def test_build_timeshift_url(self):
method test_build_timeshift_url_custom_ext (line 69) | def test_build_timeshift_url_custom_ext(self):
class TestXtreamClientApi (line 75) | class TestXtreamClientApi:
method client (line 79) | def client(self):
method mock_urlopen (line 83) | def mock_urlopen(self):
method _setup_response (line 87) | def _setup_response(self, mock_urlopen, data):
method test_get_live_categories (line 95) | def test_get_live_categories(self, client, mock_urlopen):
method test_get_live_streams (line 106) | def test_get_live_streams(self, client, mock_urlopen):
method test_get_live_streams_with_category (line 116) | def test_get_live_streams_with_category(self, client, mock_urlopen):
method test_get_vod_categories (line 127) | def test_get_vod_categories(self, client, mock_urlopen):
method test_get_vod_streams (line 137) | def test_get_vod_streams(self, client, mock_urlopen):
method test_get_vod_streams_with_category (line 147) | def test_get_vod_streams_with_category(self, client, mock_urlopen):
method test_get_series_categories (line 157) | def test_get_series_categories(self, client, mock_urlopen):
method test_get_series (line 167) | def test_get_series(self, client, mock_urlopen):
method test_get_series_with_category (line 177) | def test_get_series_with_category(self, client, mock_urlopen):
method test_get_series_info (line 187) | def test_get_series_info(self, client, mock_urlopen):
method test_get_vod_info (line 198) | def test_get_vod_info(self, client, mock_urlopen):
method test_get_server_info (line 209) | def test_get_server_info(self, client, mock_urlopen):
method test_get_server_info_auth_failed (line 235) | def test_get_server_info_auth_failed(self, client, mock_urlopen):
method test_get_server_info_uses_shorter_timeout (line 243) | def test_get_server_info_uses_shorter_timeout(self, client, mock_urlop...
method test_custom_timeout_passed_through (line 252) | def test_custom_timeout_passed_through(self, client, mock_urlopen):
method test_api_encodes_special_chars_in_params (line 261) | def test_api_encodes_special_chars_in_params(self, client, mock_urlopen):
method test_get_short_epg (line 270) | def test_get_short_epg(self, client, mock_urlopen):
method test_get_short_epg_custom_limit (line 287) | def test_get_short_epg_custom_limit(self, client, mock_urlopen):
Condensed preview — 67 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (1,116K chars).
[
{
"path": ".dockerignore",
"chars": 455,
"preview": "# Git\n.git/\n.gitignore\n\n# Python\n__pycache__/\n*.py[cod]\n.venv/\n.ruff_cache/\n.pytest_cache/\nuv.lock\n\n# App data (user-spe"
},
{
"path": ".github/workflows/ai-upscale.yml",
"chars": 5922,
"preview": "name: AI Upscale Image\n\non:\n workflow_dispatch: # Manual trigger\n push:\n branches: [main]\n paths:\n - \"Dock"
},
{
"path": ".github/workflows/ci.yml",
"chars": 3159,
"preview": "name: CI\n\non:\n workflow_dispatch: # Manual trigger\n push:\n branches: [main]\n paths:\n # Only trigger on fil"
},
{
"path": ".github/workflows/ffmpeg-base.yml",
"chars": 5608,
"preview": "name: FFmpeg Base Image\n\non:\n schedule:\n # Build daily at 3 AM UTC\n - cron: \"0 3 * * *\"\n push:\n branches: [ma"
},
{
"path": ".github/workflows/release.yml",
"chars": 2125,
"preview": "name: Release\n\non:\n push:\n tags:\n - \"v*\"\n\nenv:\n REGISTRY: ghcr.io\n IMAGE_NAME: ${{ github.repository }}\n FFM"
},
{
"path": ".gitignore",
"chars": 254,
"preview": "# Python\n__pycache__/\n*.py[cod]\n.venv/\n.ruff_cache/\n.pytest_cache/\n\n# Debugging\n**/*.out\n**/*.log\n\n# UV\nuv.lock\n\n# App d"
},
{
"path": "Dockerfile",
"chars": 2821,
"preview": "# netv application image\n#\n# Default build uses pre-built FFmpeg with full hardware support:\n# docker compose build\n#\n"
},
{
"path": "Dockerfile.ai_upscale",
"chars": 4291,
"preview": "# netv with AI Upscale (TensorRT super-resolution)\n#\n# This image includes everything needed for AI upscaling:\n# - FFmpe"
},
{
"path": "Dockerfile.ffmpeg",
"chars": 14184,
"preview": "# FFmpeg Docker image with hardware acceleration (NVENC, VAAPI, QSV, AMF)\n#\n# REQUIREMENTS:\n# - Docker BuildKit (DOCKE"
},
{
"path": "LICENSE",
"chars": 10748,
"preview": " Apache License\n Version 2.0, January 2004\n "
},
{
"path": "README.md",
"chars": 16853,
"preview": "# neTV\n\nA minimal, self-hosted web interface for IPTV streams.\n\n\n\n\n"
},
{
"path": "entrypoint-ai_upscale.sh",
"chars": 3613,
"preview": "#!/bin/sh\nset -e\n# Entrypoint for AI Upscale image\n#\n# Same as base entrypoint, plus:\n# - Auto-builds TensorRT engines o"
},
{
"path": "entrypoint.sh",
"chars": 3028,
"preview": "#!/bin/sh\nset -e\n# Entrypoint: fix permissions and drop to netv user\n#\n# Handles two common Docker issues:\n# 1. Bind-mou"
},
{
"path": "epg.py",
"chars": 14782,
"preview": "\"\"\"EPG storage and XMLTV parsing.\"\"\"\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom datetim"
},
{
"path": "epg_test.py",
"chars": 11428,
"preview": "\"\"\"Tests for epg.py - EPG storage and parsing.\"\"\"\n\nfrom __future__ import annotations\n\nfrom datetime import UTC, datetim"
},
{
"path": "ffmpeg_command.py",
"chars": 51999,
"preview": "\"\"\"FFmpeg command building and media probing.\"\"\"\n\nfrom __future__ import annotations\n\nfrom collections.abc import Callab"
},
{
"path": "ffmpeg_command_test.py",
"chars": 38045,
"preview": "\"\"\"Tests for ffmpeg command generation and media probing.\"\"\"\n\nfrom pathlib import Path\nfrom unittest.mock import MagicMo"
},
{
"path": "ffmpeg_session.py",
"chars": 43247,
"preview": "\"\"\"FFmpeg session lifecycle management.\"\"\"\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom t"
},
{
"path": "ffmpeg_session_test.py",
"chars": 29198,
"preview": "\"\"\"Tests for ffmpeg session management.\"\"\"\n\nfrom unittest.mock import patch\n\nimport json\nimport pathlib\nimport tempfile\n"
},
{
"path": "m3u.py",
"chars": 15542,
"preview": "\"\"\"M3U parsing, live/VOD/series data loading.\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import Any\n\nimport log"
},
{
"path": "m3u_test.py",
"chars": 4252,
"preview": "\"\"\"Tests for m3u.py.\"\"\"\n\nfrom __future__ import annotations\n\nfrom pathlib import Path\n\nimport pytest\n\n\n@pytest.fixture\nd"
},
{
"path": "main.py",
"chars": 115311,
"preview": "#!/usr/bin/env python3\n# /// script\n# requires-python = \">=3.11\"\n# dependencies = [\"fastapi\", \"uvicorn[standard]\", \"jinj"
},
{
"path": "main_test.py",
"chars": 24042,
"preview": "\"\"\"Tests for main.py - FastAPI routes.\"\"\"\n\nfrom __future__ import annotations\n\nfrom pathlib import Path\nfrom unittest.mo"
},
{
"path": "pyproject.toml",
"chars": 2628,
"preview": "[project]\nname = \"netv\"\nversion = \"0.1.0\"\ndescription = \"Minimal self-hosted IPTV web interface with EPG guide, VOD, and"
},
{
"path": "static/js/app.js",
"chars": 7853,
"preview": "// Keyboard navigation for 10-foot UI\n(function() {\n 'use strict';\n\n // Pages with custom arrow key handling\n const c"
},
{
"path": "static/js/favorites-grid.js",
"chars": 8084,
"preview": "// Shared Favorites Grid Module for VOD/Series pages\n// Requires: window.FAVORITES_CONFIG = { type: 'movies'|'series', f"
},
{
"path": "static/js/player.js",
"chars": 50233,
"preview": "// IPTV Player Module\n// Requires: Hls.js, window.PLAYER_CONFIG\n\n(function() {\n 'use strict';\n\n const cfg = window.PLA"
},
{
"path": "static/js/settings.js",
"chars": 38422,
"preview": "// Settings Page Module\n(function() {\n 'use strict';\n\n const cfg = window.SETTINGS_CONFIG || {};\n\n // ==============="
},
{
"path": "static/js/virtual-guide.js",
"chars": 22414,
"preview": "/**\n * Virtual scrolling for the TV guide.\n * Only renders rows that are visible (plus buffer), fetches more as needed.\n"
},
{
"path": "templates/base.html",
"chars": 5906,
"preview": "<!DOCTYPE html>\n<html lang=\"en\" class=\"h-full\">\n<head>\n <meta charset=\"UTF-8\">\n <meta name=\"viewport\" content=\"width=d"
},
{
"path": "templates/error.html",
"chars": 645,
"preview": "{% extends \"base.html\" %}\n{% block title %}Error{% endblock %}\n\n{% block content %}\n<div class=\"flex flex-col items-cent"
},
{
"path": "templates/guide.html",
"chars": 25911,
"preview": "{% extends \"base.html\" %}\n{% block title %}Live TV - neTV{% endblock %}\n\n{% block head_extra %}\n{% if loading and not re"
},
{
"path": "templates/login.html",
"chars": 1597,
"preview": "<!DOCTYPE html>\n<html lang=\"en\" class=\"h-full\">\n<head>\n <meta charset=\"UTF-8\">\n <meta name=\"viewport\" content=\"width=d"
},
{
"path": "templates/movie_detail.html",
"chars": 3885,
"preview": "{% extends \"base.html\" %}\n{% block title %}{{ movie.name if movie else 'Movie' }} - neTV{% endblock %}\n\n{% block content"
},
{
"path": "templates/player.html",
"chars": 11070,
"preview": "{% extends \"base.html\" %}\n{% block title %}{% if channel_name %}{{ channel_name }}{% if program_title %} — {{ program_ti"
},
{
"path": "templates/search.html",
"chars": 9710,
"preview": "{% extends \"base.html\" %}\n{% block title %}Search - neTV{% endblock %}\n\n{% block content %}\n<div class=\"flex flex-col h-"
},
{
"path": "templates/series.html",
"chars": 4537,
"preview": "{% extends \"base.html\" %}\n{% block title %}Series - neTV{% endblock %}\n\n{% block head_extra %}\n{% if loading %}\n<meta ht"
},
{
"path": "templates/series_detail.html",
"chars": 4682,
"preview": "{% extends \"base.html\" %}\n{% block title %}{{ series.info.name if series.info else 'Series' }} - neTV{% endblock %}\n\n{% "
},
{
"path": "templates/settings.html",
"chars": 60443,
"preview": "{% extends \"base.html\" %}\n{% block title %}Settings - neTV{% endblock %}\n\n{% block head_extra %}\n<style>\n@keyframes spin"
},
{
"path": "templates/setup.html",
"chars": 1902,
"preview": "<!DOCTYPE html>\n<html lang=\"en\" class=\"h-full\">\n<head>\n <meta charset=\"UTF-8\">\n <meta name=\"viewport\" content=\"width=d"
},
{
"path": "templates/vod.html",
"chars": 4723,
"preview": "{% extends \"base.html\" %}\n{% block title %}Movies - neTV{% endblock %}\n\n{% block head_extra %}\n{% if loading %}\n<meta ht"
},
{
"path": "testing.py",
"chars": 698,
"preview": "\"\"\"Test utilities.\"\"\"\n\nimport sys\nimport warnings\n\n\n# Suppress unawaited coroutine warnings from AsyncMock in tests.\nwar"
},
{
"path": "tools/alignm3u.py",
"chars": 7726,
"preview": "#!/usr/bin/env python3\n# pyright: reportUnknownVariableType=false, reportUnknownMemberType=false, reportUnknownArgumentT"
},
{
"path": "tools/export-tensorrt.py",
"chars": 22849,
"preview": "#!/usr/bin/env python3\n\"\"\"Export upscaling models to TensorRT engines for FFmpeg dnn_processing filter.\n\nThis script con"
},
{
"path": "tools/install-ai_upscale.sh",
"chars": 6943,
"preview": "#!/bin/bash\n# Build TensorRT engines for AI Upscale\n#\n# Prerequisites: uv sync --group ai_upscale\n# Or: pip install to"
},
{
"path": "tools/install-ffmpeg.sh",
"chars": 62460,
"preview": "#!/bin/bash\n# Build ffmpeg from source with hardware acceleration support\n# Supports: NVIDIA NVENC, AMD AMF, Intel QSV/V"
},
{
"path": "tools/install-letsencrypt.sh",
"chars": 1680,
"preview": "#!/bin/bash\n# Install and configure Let's Encrypt certificates\nset -e\n\nDOMAIN=\"${1:-}\"\n\nif [ -z \"$DOMAIN\" ]; then\n ec"
},
{
"path": "tools/install-netv.sh",
"chars": 3840,
"preview": "#!/bin/bash\n# Install netv systemd service\n# Prerequisites: uv (install time only), install-letsencrypt.sh\n#\n# Usage: su"
},
{
"path": "tools/install-prereqs.sh",
"chars": 783,
"preview": "#!/bin/bash\n# Install prerequisites for netv\nset -e\n\necho \"=== Checking prerequisites ===\"\nfor cmd in git curl; do\n i"
},
{
"path": "tools/patches/dnn_backend_tensorrt.cpp",
"chars": 74002,
"preview": "/*\n * Copyright 2026 Joshua V. Dillon\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may n"
},
{
"path": "tools/patches/dnn_backend_torch.cpp",
"chars": 37742,
"preview": "/*\n * Copyright 2026 Joshua V. Dillon\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may n"
},
{
"path": "tools/patches/dnn_cuda_kernels.cu",
"chars": 17019,
"preview": "/*\n * Copyright 2026 Joshua V. Dillon\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may n"
},
{
"path": "tools/patches/dnn_cuda_kernels.h",
"chars": 2289,
"preview": "/*\n * Copyright 2026 Joshua V. Dillon\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may n"
},
{
"path": "tools/patches/vf_dnn_processing.c",
"chars": 17723,
"preview": "/*\n * Copyright (c) 2019 Guo Yejun\n * Copyright (c) 2026 Joshua V. Dillon (TensorRT/Torch backend integration, CUDA hw f"
},
{
"path": "tools/uninstall-netv.sh",
"chars": 925,
"preview": "#!/bin/bash\n# Uninstall netv systemd service\n#\n# Usage: sudo ./uninstall-netv.sh\nset -e\n\nif [ \"$EUID\" -ne 0 ]; then\n "
},
{
"path": "tools/xtream2m3u.py",
"chars": 22586,
"preview": "#!/usr/bin/env python3\n# pyright: reportUnknownVariableType=false, reportUnknownMemberType=false, reportUnknownArgumentT"
},
{
"path": "tools/zap2xml.py",
"chars": 21762,
"preview": "#!/usr/bin/env python3\n# pyright: reportUnknownVariableType=false, reportUnknownMemberType=false, reportUnknownArgumentT"
},
{
"path": "util.py",
"chars": 1583,
"preview": "\"\"\"Shared utilities.\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import Any\n\nimport urllib.error\nimport urllib.p"
},
{
"path": "util_test.py",
"chars": 2724,
"preview": "\"\"\"Tests for util.py.\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import Any\n\nimport urllib.error\n\nimport pytest"
},
{
"path": "xtream.py",
"chars": 4114,
"preview": "\"\"\"Xtream Codes API client.\"\"\"\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import"
},
{
"path": "xtream_test.py",
"chars": 10766,
"preview": "\"\"\"Tests for xtream.py - Xtream Codes API client.\"\"\"\n\nfrom __future__ import annotations\n\nfrom unittest.mock import Magi"
}
]
About this extraction
This page contains the full source code of the jvdillon/netv GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 67 files (1.0 MB), approximately 270.0k tokens, and a symbol index with 917 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.