[
  {
    "path": ".editorconfig",
    "content": "# EditorConfig helps maintain consistent coding styles for multiple developers working on the same\n# project across various editors and IDEs. The EditorConfig project consists of a file format for\n# defining coding styles and a collection of text editor plugins that enable editors to read the\n# file format and adhere to defined styles. EditorConfig files are easily readable and they work\n# nicely with version control systems. https://editorconfig.org/\n\n# top-most EditorConfig file\nroot = true\n\n# Unix-style newlines with a newline ending every file\n[*]\nend_of_line = lf\ninsert_final_newline = true\ncharset = utf-8\ntrim_trailing_whitespace = true\nindent_style = space\n\n# 4 space indentation\n[*.py]\nindent_size = 4\n"
  },
  {
    "path": ".github/actionlint-matcher.json",
    "content": "{\n  \"problemMatcher\": [\n    {\n      \"owner\": \"actionlint\",\n      \"pattern\": [\n        {\n          \"regexp\": \"^(?:\\\\x1b\\\\[\\\\d+m)?(.+?)(?:\\\\x1b\\\\[\\\\d+m)*:(?:\\\\x1b\\\\[\\\\d+m)*(\\\\d+)(?:\\\\x1b\\\\[\\\\d+m)*:(?:\\\\x1b\\\\[\\\\d+m)*(\\\\d+)(?:\\\\x1b\\\\[\\\\d+m)*: (?:\\\\x1b\\\\[\\\\d+m)*(.+?)(?:\\\\x1b\\\\[\\\\d+m)* \\\\[(.+?)\\\\]$\",\n          \"file\": 1,\n          \"line\": 2,\n          \"column\": 3,\n          \"message\": 4,\n          \"code\": 5\n        }\n      ]\n    }\n  ]\n}\n"
  },
  {
    "path": ".github/workflows/ci.yaml",
    "content": "name: CI\non:\n  push:\n    branches: [ master ]\n  pull_request:\n    branches: [ master ]\n\njobs:\n  test:\n    uses: ./.github/workflows/test.yaml\n"
  },
  {
    "path": ".github/workflows/close-inactive-issues.yaml",
    "content": "name: Close inactive issues\n\non:\n  schedule:\n    - cron: \"30 1 * * *\" # Runs daily at 1:30 AM UTC\n\njobs:\n  close-issues:\n    runs-on: ubuntu-latest\n    permissions:\n      issues: write\n      pull-requests: write\n    steps:\n      - uses: actions/stale@v9\n        with:\n          days-before-issue-stale: 90 # The number of days old an issue can be before marking it stale\n          days-before-issue-close: 14 # The number of days to wait to close an issue after it being marked stale\n          stale-issue-label: \"stale\"\n          stale-issue-message: \"This issue is stale because it has been open for 90 days with no activity.\"\n          close-issue-message: \"This issue was closed because it has been inactive for 14 days since being marked as stale.\"\n          days-before-pr-stale: -1 # Disables stale behavior for PRs\n          days-before-pr-close: -1 # Disables closing behavior for PRs\n          repo-token: ${{ secrets.GITHUB_TOKEN }}\n"
  },
  {
    "path": ".github/workflows/codeql-analysis.yaml",
    "content": "name: \"CodeQL Analysis\"\n\non:\n  pull_request:\n    branches: [ master ]\n  push:\n    branches: [ master ]\n  schedule:\n    - cron: '31 0 * * 1'\npermissions:\n  contents: read\n  security-events: write\n\njobs:\n  analyze:\n    name: Analyze Code\n    runs-on: ubuntu-latest\n    permissions:\n      contents: read\n      security-events: write\n\n    strategy:\n      fail-fast: false\n      matrix:\n        language: [ 'python' ]\n\n    steps:\n      - name: Checkout repository\n        uses: actions/checkout@v4\n\n      - name: Initialize CodeQL\n        uses: github/codeql-action/init@v3\n        with:\n          languages: ${{ matrix.language }}\n          build-mode: none\n\n      - name: Run CodeQL Analysis\n        uses: github/codeql-action/analyze@v3\n        with:\n          category: \"security\"\n"
  },
  {
    "path": ".github/workflows/release.yaml",
    "content": "name: Release\non:\n  push:\n    tags: [ 'v*' ]\njobs:\n  test:\n    uses: ./.github/workflows/test.yaml\n\n  publish-to-pypi:\n    name: Build and Publish to PyPI\n    needs:\n      - test\n    if: \"startsWith(github.ref, 'refs/tags/v')\"\n    runs-on: ubuntu-latest\n    environment:\n      name: pypi\n      url: https://pypi.org/p/fast-alpr\n    permissions:\n      id-token: write\n    steps:\n      - uses: actions/checkout@v4\n\n      - name: Install uv (and Python 3.10)\n        uses: astral-sh/setup-uv@v6\n        with:\n          version: \"latest\"\n          python-version: \"3.10\"\n          enable-cache: true\n\n      - name: Build distributions (sdist + wheel)\n        run: uv build --no-sources\n\n      - name: Publish distribution 📦 to PyPI\n        uses: pypa/gh-action-pypi-publish@release/v1\n\n  github-release:\n    name: Create GitHub release\n    needs:\n      - publish-to-pypi\n    runs-on: ubuntu-latest\n\n    permissions:\n      contents: write\n\n    steps:\n      - uses: actions/checkout@v4\n\n      - name: Check package version matches tag\n        id: check-version\n        uses: samuelcolvin/check-python-version@v4.1\n        with:\n          version_file_path: 'pyproject.toml'\n\n      - name: Create GitHub Release\n        env:\n          GITHUB_TOKEN: ${{ github.token }}\n          tag: ${{ github.ref_name }}\n        run: |\n          gh release create \"$tag\" \\\n              --repo=\"$GITHUB_REPOSITORY\" \\\n              --title=\"${GITHUB_REPOSITORY#*/} ${tag#v}\" \\\n              --generate-notes\n\n  update_docs:\n    name: Update documentation\n    needs:\n      - github-release\n    runs-on: ubuntu-latest\n\n    steps:\n      - uses: actions/checkout@v4\n        with:\n          fetch-depth: 0\n\n      - name: Install uv (and Python 3.10)\n        uses: astral-sh/setup-uv@v6\n        with:\n          version: \"latest\"\n          python-version: \"3.10\"\n          enable-cache: true\n\n      - name: Configure Git user\n        run: |\n          git config --local user.email \"github-actions[bot]@users.noreply.github.com\"\n          git config --local user.name \"github-actions[bot]\"\n\n      - name: Retrieve version\n        id: check-version\n        uses: samuelcolvin/check-python-version@v4.1\n        with:\n          version_file_path: 'pyproject.toml'\n          skip_env_check: true\n\n      - name: Install docs dependencies\n        run: uv sync --locked --no-default-groups --group docs\n\n      - name: Deploy the docs\n        run: |\n          uv run mike deploy \\\n            --update-aliases \\\n            --push \\\n            --branch docs-site \\\n            ${{ steps.check-version.outputs.VERSION_MAJOR_MINOR }} latest\n"
  },
  {
    "path": ".github/workflows/secret-scanning.yaml",
    "content": "on:\n  push:\n    branches:\n      - master\n  pull_request:\n    branches:\n      - '**'\n\nname: Secret Leaks\njobs:\n  trufflehog:\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout code\n        uses: actions/checkout@v4\n        with:\n          fetch-depth: 0\n      - name: Secret Scanning\n        uses: trufflesecurity/trufflehog@main\n        with:\n          extra_args: --results=verified,unknown\n"
  },
  {
    "path": ".github/workflows/test.yaml",
    "content": "name: Test\non:\n  workflow_call:\n\njobs:\n  test:\n    name: Test\n    strategy:\n      fail-fast: false\n      matrix:\n        python-version: [ '3.10', '3.11', '3.12', '3.13' ]\n        os: [ 'ubuntu-latest' ]\n    runs-on: ${{ matrix.os }}\n    steps:\n      - uses: actions/checkout@v4\n\n      - name: Install uv\n        uses: astral-sh/setup-uv@v6\n        with:\n          version: \"latest\"\n          python-version: ${{ matrix.python-version }}\n          enable-cache: true\n\n      - name: Install the project\n        run: make install\n\n      - name: Check format\n        run: make check_format\n\n      - name: Run linters\n        run: make lint\n\n      - name: Run tests\n        run: make test\n"
  },
  {
    "path": ".github/workflows/workflow-lint.yaml",
    "content": "name: Lint GitHub Actions workflows\non:\n  pull_request:\n    paths:\n      - '.github/workflows/**/*.yaml'\n      - '.github/workflows/**/*.yml'\n  push:\n    branches: [ master ]\n    paths:\n      - '.github/workflows/**/*.yaml'\n      - '.github/workflows/**/*.yml'\n\njobs:\n  actionlint:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v4\n\n      - name: Enable matcher for actionlint\n        run: echo \"::add-matcher::.github/actionlint-matcher.json\"\n\n      - name: Download and run actionlint\n        run: |\n          bash <(curl https://raw.githubusercontent.com/rhysd/actionlint/main/scripts/download-actionlint.bash)\n          ./actionlint -color\n        shell: bash\n"
  },
  {
    "path": ".gitignore",
    "content": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\nshare/python-wheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.nox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n*.py,cover\n.hypothesis/\n.pytest_cache/\ncover/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\ndb.sqlite3\ndb.sqlite3-journal\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\n.pybuilder/\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# IPython\nprofile_default/\nipython_config.py\n\n# pyenv\n#   For a library or package, you might want to ignore these files since the code is\n#   intended to run in multiple environments; otherwise, check them in:\n# .python-version\n\n# pdm\n#   Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.\n#pdm.lock\n#   pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it\n#   in version control.\n#   https://pdm.fming.dev/#use-with-ide\n.pdm.toml\n\n# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm\n__pypackages__/\n\n# Celery stuff\ncelerybeat-schedule\ncelerybeat.pid\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\n.env\n.venv\nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\n.dmypy.json\ndmypy.json\n\n# Pyre type checker\n.pyre/\n\n# pytype static type analyzer\n.pytype/\n\n# Cython debug symbols\ncython_debug/\n\n# pyenv\n.python-version\n\n# CUDA DNN\ncudnn64_7.dll\n\n# Train folder\ntrain_val_set/\n\n# VS Code\n.vscode/\n\n# JetBrains IDEs\n.idea/\n*.iml\n\n# macOS\n.DS_Store\n.AppleDouble\n.LSOverride\n\n# Windows\nThumbs.db\nehthumbs.db\nDesktop.ini\n\n# Linux\n.directory\n\n# Logs/runtime files\n*.pid\n*.tmp\n*.bak\n*.swp\n*.swo\n\n# Notebooks\n**/.ipynb_checkpoints/\n\n# Trained models\n**/trained_models/\n\n# Model artifacts\n*.keras\n*.h5\n*.hdf5\n*.weights.h5\n\n# TensorFlow ckpts\ncheckpoint\n*.ckpt\n*.ckpt.*\n*.index\n*.data-*\n\n# TF SavedModel\nsaved_model/\n**/saved_model/\n**/saved_model.pb\n**/variables/\n\n# Training outputs / logs\nlogs/\n**/logs/\nruns/\ntb_logs/\ntensorboard/\n\n# ONNX related\n*.onnx\n*.ort\n\n# Other Export formats\n*.tflite\n*.mlmodel\n*.mlpackage\n\n# Accelerator caches/artifacts\n*.engine\n*.plan\ntrt_engine_cache/\ntensorrt/\n"
  },
  {
    "path": ".yamllint.yaml",
    "content": "# yamllint configuration file: https://yamllint.readthedocs.io/\nextends: relaxed\n\nrules:\n  line-length: disable\n"
  },
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) 2024 ankandrew\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "Makefile",
    "content": "# Directories\nSRC_PATHS := fast_alpr/ test/\nYAML_PATHS := .github/ mkdocs.yml\n\n# Tasks\n.PHONY: help\nhelp:\n\t@echo \"Available targets:\"\n\t@echo \"  help             : Show this help message\"\n\t@echo \"  install          : Install project with all required dependencies\"\n\t@echo \"  format           : Format code using Ruff format\"\n\t@echo \"  check_format     : Check code formatting with Ruff format\"\n\t@echo \"  ruff             : Run Ruff linter\"\n\t@echo \"  yamllint         : Run yamllint linter\"\n\t@echo \"  pylint           : Run Pylint linter\"\n\t@echo \"  mypy             : Run MyPy static type checker\"\n\t@echo \"  lint             : Run linters (Ruff, Pylint and Mypy)\"\n\t@echo \"  test             : Run tests using pytest\"\n\t@echo \"  checks           : Check format, lint, and test\"\n\t@echo \"  clean            : Clean up caches and build artifacts\"\n\n.PHONY: install\ninstall:\n\t@echo \"==> Installing project with all required dependencies...\"\n\tuv sync --locked --all-groups --extra onnx\n\n.PHONY: format\nformat:\n\t@echo \"==> Sorting imports...\"\n\t@# Currently, the Ruff formatter does not sort imports, see https://docs.astral.sh/ruff/formatter/#sorting-imports\n\t@uv run ruff check --select I --fix $(SRC_PATHS)\n\t@echo \"=====> Formatting code...\"\n\t@uv run ruff format $(SRC_PATHS)\n\n.PHONY: check_format\ncheck_format:\n\t@echo \"=====> Checking format...\"\n\t@uv run ruff format --check --diff $(SRC_PATHS)\n\t@echo \"=====> Checking imports are sorted...\"\n\t@uv run ruff check --select I --exit-non-zero-on-fix $(SRC_PATHS)\n\n.PHONY: ruff\nruff:\n\t@echo \"=====> Running Ruff...\"\n\t@uv run ruff check $(SRC_PATHS)\n\n.PHONY: yamllint\nyamllint:\n\t@echo \"=====> Running yamllint...\"\n\t@uv run yamllint $(YAML_PATHS)\n\n.PHONY: pylint\npylint:\n\t@echo \"=====> Running Pylint...\"\n\t@uv run pylint $(SRC_PATHS)\n\n.PHONY: mypy\nmypy:\n\t@echo \"=====> Running Mypy...\"\n\t@uv run mypy $(SRC_PATHS)\n\n.PHONY: lint\nlint: ruff yamllint pylint mypy\n\n.PHONY: test\ntest:\n\t@echo \"=====> Running tests...\"\n\t@uv run pytest test/\n\n.PHONY: clean\nclean:\n\t@echo \"=====> Cleaning caches...\"\n\t@uv run ruff clean\n\t@rm -rf .cache .pytest_cache .mypy_cache build dist *.egg-info\n\nchecks: format lint test\n"
  },
  {
    "path": "README.md",
    "content": "# FastALPR\n\n[![Actions status](https://github.com/ankandrew/fast-alpr/actions/workflows/test.yaml/badge.svg)](https://github.com/ankandrew/fast-alpr/actions)\n[![Actions status](https://github.com/ankandrew/fast-alpr/actions/workflows/release.yaml/badge.svg)](https://github.com/ankandrew/fast-alpr/actions)\n[![Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json)](https://github.com/astral-sh/ruff)\n[![Pylint](https://img.shields.io/badge/linting-pylint-yellowgreen)](https://github.com/pylint-dev/pylint)\n[![Checked with mypy](http://www.mypy-lang.org/static/mypy_badge.svg)](http://mypy-lang.org/)\n[![ONNX Model](https://img.shields.io/badge/model-ONNX-blue?logo=onnx&logoColor=white)](https://onnx.ai/)\n[![Hugging Face Spaces](https://img.shields.io/badge/🤗%20Hugging%20Face-Spaces-orange)](https://huggingface.co/spaces/ankandrew/fast-alpr)\n[![Documentation Status](https://img.shields.io/badge/docs-latest-brightgreen.svg)](https://ankandrew.github.io/fast-alpr/)\n[![image](https://img.shields.io/pypi/pyversions/fast-alpr.svg)](https://pypi.python.org/pypi/fast-alpr)\n[![GitHub version](https://img.shields.io/github/v/release/ankandrew/fast-alpr)](https://github.com/ankandrew/fast-alpr/releases)\n[![License](https://img.shields.io/github/license/ankandrew/fast-alpr)](./LICENSE)\n\n[![ALPR Demo Animation](https://raw.githubusercontent.com/ankandrew/fast-alpr/f672fbbec2ddf86aabfc2afc0c45d1fa7612516c/assets/alpr.gif)](https://youtu.be/-TPJot7-HTs?t=652)\n\n**FastALPR** is a high-performance, customizable Automatic License Plate Recognition (ALPR) system. We offer fast and\nefficient ONNX models by default, but you can easily swap in your own models if needed.\n\nFor Optical Character Recognition (**OCR**), we use [fast-plate-ocr](https://github.com/ankandrew/fast-plate-ocr) by\ndefault, and for **license plate detection**, we\nuse [open-image-models](https://github.com/ankandrew/open-image-models). However, you can integrate any OCR or detection\nmodel of your choice.\n\n## 📋 Table of Contents\n\n* [✨ Features](#-features)\n* [📦 Installation](#-installation)\n* [🚀 Quick Start](#-quick-start)\n* [🛠️ Customization and Flexibility](#-customization-and-flexibility)\n* [📖 Documentation](#-documentation)\n* [🤝 Contributing](#-contributing)\n* [🙏 Acknowledgements](#-acknowledgements)\n* [📫 Contact](#-contact)\n\n## ✨ Features\n\n- **High Accuracy**: Uses advanced models for precise license plate detection and OCR.\n- **Customizable**: Easily switch out detection and OCR models.\n- **Easy to Use**: Quick setup with a simple API.\n- **Out-of-the-Box Models**: Includes ready-to-use detection and OCR models\n- **Fast Performance**: Optimized with ONNX Runtime for speed.\n\n## 📦 Installation\n\n```shell\npip install fast-alpr[onnx-gpu]\n```\n\nBy default, **no ONNX runtime is installed**. To run inference, you **must** install at least one ONNX backend using an appropriate extra.\n\n| Platform/Use Case  | Install Command                        | Notes                |\n|--------------------|----------------------------------------|----------------------|\n| CPU (default)      | `pip install fast-alpr[onnx]`          | Cross-platform       |\n| NVIDIA GPU (CUDA)  | `pip install fast-alpr[onnx-gpu]`      | Linux/Windows        |\n| Intel (OpenVINO)   | `pip install fast-alpr[onnx-openvino]` | Best on Intel CPUs   |\n| Windows (DirectML) | `pip install fast-alpr[onnx-directml]` | For DirectML support |\n| Qualcomm (QNN)     | `pip install fast-alpr[onnx-qnn]`      | Qualcomm chipsets    |\n\n\n## 🚀 Quick Start\n\n> [!TIP]\n> Try `fast-alpr` in [Hugging Spaces](https://huggingface.co/spaces/ankandrew/fast-alpr).\n\nHere's how to get started with FastALPR:\n\n```python\nfrom fast_alpr import ALPR\n\n# You can also initialize the ALPR with custom plate detection and OCR models.\nalpr = ALPR(\n    detector_model=\"yolo-v9-t-384-license-plate-end2end\",\n    ocr_model=\"cct-xs-v2-global-model\",\n)\n\n# The \"assets/test_image.png\" can be found in repo root dir\nalpr_results = alpr.predict(\"assets/test_image.png\")\nprint(alpr_results)\n```\n\nOutput:\n\n<img alt=\"ALPR Result\" src=\"https://raw.githubusercontent.com/ankandrew/fast-alpr/5063bd92fdd30f46b330d051468be267d4442c9b/assets/alpr_result.webp\"/>\n\nYou can also draw the predictions directly on the image:\n\n```python\nimport cv2\n\nfrom fast_alpr import ALPR\n\n# Initialize the ALPR\nalpr = ALPR(\n    detector_model=\"yolo-v9-t-384-license-plate-end2end\",\n    ocr_model=\"cct-xs-v2-global-model\",\n)\n\n# Load the image\nimage_path = \"assets/test_image.png\"\nframe = cv2.imread(image_path)\n\n# Draw predictions on the image and get the ALPR results\ndrawn = alpr.draw_predictions(frame)\nannotated_frame = drawn.image\nresults = drawn.results\n```\n\nAnnotated frame:\n\n<img alt=\"ALPR Draw Predictions\" src=\"https://github.com/ankandrew/fast-alpr/releases/download/assets/alpr_draw_predictions.webp\"/>\n\n## 🛠️ Customization and Flexibility\n\nFastALPR is designed to be flexible. You can customize the detector and OCR models according to your requirements.\nYou can very easily integrate with **Tesseract** OCR to leverage its capabilities:\n\n```python\nimport re\nfrom statistics import mean\n\nimport numpy as np\nimport pytesseract\n\nfrom fast_alpr.alpr import ALPR, BaseOCR, OcrResult\n\n\nclass PytesseractOCR(BaseOCR):\n    def __init__(self) -> None:\n        \"\"\"\n        Init PytesseractOCR.\n        \"\"\"\n\n    def predict(self, cropped_plate: np.ndarray) -> OcrResult | None:\n        if cropped_plate is None:\n            return None\n        # You can change 'eng' to the appropriate language code as needed\n        data = pytesseract.image_to_data(\n            cropped_plate,\n            lang=\"eng\",\n            config=\"--oem 3 --psm 6\",\n            output_type=pytesseract.Output.DICT,\n        )\n        plate_text = \" \".join(data[\"text\"]).strip()\n        plate_text = re.sub(r\"[^A-Za-z0-9]\", \"\", plate_text)\n        avg_confidence = mean(conf for conf in data[\"conf\"] if conf > 0) / 100.0\n        return OcrResult(text=plate_text, confidence=avg_confidence)\n\n\nalpr = ALPR(detector_model=\"yolo-v9-t-384-license-plate-end2end\", ocr=PytesseractOCR())\n\nalpr_results = alpr.predict(\"assets/test_image.png\")\nprint(alpr_results)\n```\n\n> [!TIP]\n> See the [docs](https://ankandrew.github.io/fast-alpr/) for more examples!\n\n## 📖 Documentation\n\nComprehensive documentation is available [here](https://ankandrew.github.io/fast-alpr/), including detailed API\nreferences and additional examples.\n\n## 🤝 Contributing\n\nContributions to the repo are greatly appreciated. Whether it's bug fixes, feature enhancements, or new models,\nyour contributions are warmly welcomed.\n\nTo start contributing or to begin development, you can follow these steps:\n\n1. Clone repo\n    ```shell\n    git clone https://github.com/ankandrew/fast-alpr.git\n    ```\n2. Install all dependencies (make sure you have [uv](https://docs.astral.sh/uv/getting-started/installation/) installed):\n    ```shell\n    make install\n    ```\n3. To ensure your changes pass linting and tests before submitting a PR:\n    ```shell\n    make checks\n    ```\n\n## 🙏 Acknowledgements\n\n- [fast-plate-ocr](https://github.com/ankandrew/fast-plate-ocr) for default **OCR** models.\n- [open-image-models](https://github.com/ankandrew/open-image-models) for default plate **detection** models.\n\n## 📫 Contact\n\nFor questions or suggestions, feel free to open an issue.\n"
  },
  {
    "path": "docs/contributing.md",
    "content": "Contributions to the repo are greatly appreciated. Whether it's bug fixes, feature enhancements, or new models,\nyour contributions are warmly welcomed.\n\nTo start contributing or to begin development, you can follow these steps:\n\n1. Clone repo\n    ```shell\n    git clone https://github.com/ankandrew/fast-alpr.git\n    ```\n2. Install all dependencies (make sure you have [uv](https://docs.astral.sh/uv/getting-started/installation/) installed):\n    ```shell\n    make install\n    ```\n3. To ensure your changes pass linting and tests before submitting a PR:\n    ```shell\n    make checks\n    ```\n"
  },
  {
    "path": "docs/custom_models.md",
    "content": "## 🛠️ Customization and Flexibility\n\nFastALPR is designed to be flexible. You can customize the detector and OCR models according to your requirements.\n\n### Using Tesseract OCR\n\nYou can very easily integrate with **Tesseract** OCR to leverage its capabilities:\n\n```python title=\"tesseract_ocr.py\"\nimport re\nfrom statistics import mean\n\nimport numpy as np\nimport pytesseract\n\nfrom fast_alpr.alpr import ALPR, BaseOCR, OcrResult\n\n\nclass PytesseractOCR(BaseOCR):\n    def __init__(self) -> None:\n        \"\"\"\n        Init PytesseractOCR.\n        \"\"\"\n\n    def predict(self, cropped_plate: np.ndarray) -> OcrResult | None:\n        if cropped_plate is None:\n            return None\n        # You can change 'eng' to the appropriate language code as needed\n        data = pytesseract.image_to_data(\n            cropped_plate,\n            lang=\"eng\",\n            config=\"--oem 3 --psm 6\",\n            output_type=pytesseract.Output.DICT,\n        )\n        plate_text = \" \".join(data[\"text\"]).strip()\n        plate_text = re.sub(r\"[^A-Za-z0-9]\", \"\", plate_text)\n        avg_confidence = mean(conf for conf in data[\"conf\"] if conf > 0) / 100.0\n        return OcrResult(text=plate_text, confidence=avg_confidence)\n\n\nalpr = ALPR(detector_model=\"yolo-v9-t-384-license-plate-end2end\", ocr=PytesseractOCR())\n\nalpr_results = alpr.predict(\"assets/test_image.png\")\nprint(alpr_results)\n```\n\n???+ tip\n\n    You can implement this with any OCR you want! For example, [EasyOCR](https://github.com/JaidedAI/EasyOCR).\n"
  },
  {
    "path": "docs/index.md",
    "content": "# FastALPR\n\n[![ALPR Demo Animation](https://raw.githubusercontent.com/ankandrew/fast-alpr/f672fbbec2ddf86aabfc2afc0c45d1fa7612516c/assets/alpr.gif)](https://youtu.be/-TPJot7-HTs?t=652)\n\n## Intro\n\n**FastALPR** is a high-performance, customizable Automatic License Plate Recognition (ALPR) system. We offer fast and\nefficient ONNX models by default, but you can easily swap in your own models if needed.\n\nFor Optical Character Recognition (**OCR**), we use [fast-plate-ocr](https://github.com/ankandrew/fast-plate-ocr) by\ndefault, and for **license plate detection**, we\nuse [open-image-models](https://github.com/ankandrew/open-image-models). However, you can integrate any OCR or detection\nmodel of your choice.\n\n## Features\n\n- **🔍 High Accuracy**: Uses advanced models for precise license plate detection and OCR.\n- **🔧 Customizable**: Easily switch out detection and OCR models.\n- **🚀 Easy to Use**: Quick setup with a simple API.\n- **📦 Out-of-the-Box Models**: Includes ready-to-use detection and OCR models\n- **⚡ Fast Performance**: Optimized with ONNX Runtime for speed.\n\n<br>\n<br>\n"
  },
  {
    "path": "docs/installation.md",
    "content": "## Installation\n\nFor **inference**, install:\n\n```shell\npip install fast-alpr[onnx-gpu]\n```\n\n???+ warning\n    By default, **no ONNX runtime is installed**.\n\n    To run inference, you **must install** one of the ONNX extras:\n\n    - `onnx` - for CPU inference (cross-platform)\n    - `onnx-gpu` - for NVIDIA GPUs (CUDA)\n    - `onnx-openvino` - for Intel CPUs / VPUs\n    - `onnx-directml` - for Windows devices via DirectML\n    - `onnx-qnn` - for Qualcomm chips on mobile\n\nDependencies for inference are kept **minimal by default**. Inference-related packages like **ONNX runtimes** are\n**optional** and not installed unless **explicitly requested via extras**.\n"
  },
  {
    "path": "docs/quick_start.md",
    "content": "## 🚀 Quick Start\n\nHere's how to get started with FastALPR:\n\n### Predictions\n\n```python\nfrom fast_alpr import ALPR\n\n# You can also initialize the ALPR with custom plate detection and OCR models.\nalpr = ALPR(\n    detector_model=\"yolo-v9-t-384-license-plate-end2end\",\n    ocr_model=\"cct-xs-v2-global-model\",\n)\n\n# The \"assets/test_image.png\" can be found in repo root dir\n# You can also pass a NumPy array containing cropped plate image\nalpr_results = alpr.predict(\"assets/test_image.png\")\nprint(alpr_results)\n```\n\n???+ note\n\n    See [reference](reference.md) for the available models.\n\nOutput:\n\n<img alt=\"ALPR Result\" height=\"350\" src=\"https://raw.githubusercontent.com/ankandrew/fast-alpr/5063bd92fdd30f46b330d051468be267d4442c9b/assets/alpr_result.webp\" width=\"700\"/>\n\n### Draw Results\n\nYou can also **draw** the predictions directly on the image:\n\n```python\nimport cv2\n\nfrom fast_alpr import ALPR\n\n# Initialize the ALPR\nalpr = ALPR(\n    detector_model=\"yolo-v9-t-384-license-plate-end2end\",\n    ocr_model=\"cct-xs-v2-global-model\",\n)\n\n# Load the image\nimage_path = \"assets/test_image.png\"\nframe = cv2.imread(image_path)\n\n# Draw predictions on the image and get the ALPR results\ndrawn = alpr.draw_predictions(frame)\nannotated_frame = drawn.image\nresults = drawn.results\n```\n\nAnnotated frame:\n\n<img alt=\"ALPR Draw Predictions\" src=\"https://github.com/ankandrew/fast-alpr/releases/download/assets/alpr_draw_predictions.webp\"/>\n"
  },
  {
    "path": "docs/reference.md",
    "content": "# Reference\n\nThis page shows the public API of FastALPR.\n\n## At a Glance\n\n- Use `ALPR.predict()` to get structured ALPR results\n- Use `ALPR.draw_predictions()` to get an annotated image and the same ALPR results\n- `BoundingBox` and `DetectionResult` come from `open-image-models`\n\n## Imports\n\n```python\nfrom fast_alpr import ALPR, ALPRResult, DrawPredictionsResult, OcrResult\n```\n\n## Common Inputs\n\n- A NumPy image in BGR format\n- A string path to an image file\n\n## Common Returns\n\n- `ALPR.predict(...)` returns `list[ALPRResult]`\n- `ALPR.draw_predictions(...)` returns `DrawPredictionsResult`\n\n`ALPRResult` contains:\n\n- `detection`: box, label, and detection confidence\n- `ocr`: recognized text and OCR confidence, or `None`\n\n`DrawPredictionsResult` contains:\n\n- `image`: the image with boxes and text drawn on it\n- `results`: the same ALPR results used for drawing\n\n## Available Models\n\nSee the available detection models in [open-image-models](https://ankandrew.github.io/open-image-models/0.4/reference/#open_image_models.detection.core.hub.PlateDetectorModel)\nand OCR models in [fast-plate-ocr](https://ankandrew.github.io/fast-plate-ocr/1.0/inference/model_zoo/).\n\n## Main Class\n\n::: fast_alpr.alpr.ALPR\n    options:\n      show_root_heading: true\n      show_root_toc_entry: false\n\n## Result Types\n\n::: fast_alpr.alpr.ALPRResult\n    options:\n      show_root_heading: true\n      show_root_toc_entry: false\n\n::: fast_alpr.alpr.DrawPredictionsResult\n    options:\n      show_root_heading: true\n      show_root_toc_entry: false\n\n::: fast_alpr.base.OcrResult\n    options:\n      show_root_heading: true\n      show_root_toc_entry: false\n\n## Interfaces\n\n::: fast_alpr.base.BaseDetector\n    options:\n      show_root_heading: true\n      show_root_toc_entry: false\n\n::: fast_alpr.base.BaseOCR\n    options:\n      show_root_heading: true\n      show_root_toc_entry: false\n\n## External Types\n\nSee [`BoundingBox`][open_image_models.detection.core.base.BoundingBox]\nand [`DetectionResult`][open_image_models.detection.core.base.DetectionResult].\n"
  },
  {
    "path": "fast_alpr/__init__.py",
    "content": "\"\"\"\nFastALPR package.\n\"\"\"\n\nfrom fast_alpr.alpr import ALPR, ALPRResult, DrawPredictionsResult\nfrom fast_alpr.base import BaseDetector, BaseOCR, DetectionResult, OcrResult\n\n__all__ = [\n    \"ALPR\",\n    \"ALPRResult\",\n    \"BaseDetector\",\n    \"BaseOCR\",\n    \"DetectionResult\",\n    \"DrawPredictionsResult\",\n    \"OcrResult\",\n]\n"
  },
  {
    "path": "fast_alpr/alpr.py",
    "content": "\"\"\"\nALPR module.\n\"\"\"\n\nimport os\nimport statistics\nfrom collections.abc import Sequence\nfrom dataclasses import dataclass\nfrom typing import Literal\n\nimport cv2\nimport numpy as np\nimport onnxruntime as ort\nfrom fast_plate_ocr.inference.hub import OcrModel\nfrom open_image_models.detection.core.hub import PlateDetectorModel\n\nfrom fast_alpr.base import BaseDetector, BaseOCR, DetectionResult, OcrResult\nfrom fast_alpr.default_detector import DefaultDetector\nfrom fast_alpr.default_ocr import DefaultOCR\n\n# pylint: disable=too-many-arguments, too-many-locals\n# ruff: noqa: PLR0913\n\n\n@dataclass(frozen=True)\nclass ALPRResult:\n    \"\"\"\n    Detection and OCR output for one license plate.\n\n    Attributes:\n        detection: Detector output for the plate.\n        ocr: OCR output for the plate, or None if OCR does not return a result.\n    \"\"\"\n\n    detection: DetectionResult\n    ocr: OcrResult | None\n\n\n@dataclass(frozen=True, slots=True)\nclass DrawPredictionsResult:\n    \"\"\"\n    Return value from draw_predictions.\n\n    Attributes:\n        image: The input image with boxes and text drawn on it.\n        results: The ALPR results used to draw the annotations.\n    \"\"\"\n\n    image: np.ndarray\n    results: list[ALPRResult]\n\n\nclass ALPR:\n    \"\"\"\n    Automatic License Plate Recognition (ALPR) system class.\n\n    This class combines a detector and an OCR model to recognize license plates in images.\n    \"\"\"\n\n    def __init__(\n        self,\n        detector: BaseDetector | None = None,\n        ocr: BaseOCR | None = None,\n        detector_model: PlateDetectorModel = \"yolo-v9-t-384-license-plate-end2end\",\n        detector_conf_thresh: float = 0.4,\n        detector_providers: Sequence[str | tuple[str, dict]] | None = None,\n        detector_sess_options: ort.SessionOptions = None,\n        ocr_model: OcrModel | None = \"cct-xs-v2-global-model\",\n        ocr_device: Literal[\"cuda\", \"cpu\", \"auto\"] = \"auto\",\n        ocr_providers: Sequence[str | tuple[str, dict]] | None = None,\n        ocr_sess_options: ort.SessionOptions | None = None,\n        ocr_model_path: str | os.PathLike | None = None,\n        ocr_config_path: str | os.PathLike | None = None,\n        ocr_force_download: bool = False,\n    ) -> None:\n        \"\"\"\n        Initialize the ALPR system.\n\n        Parameters:\n            detector: An instance of BaseDetector. If None, the DefaultDetector is used.\n            ocr: An instance of BaseOCR. If None, the DefaultOCR is used.\n            detector_model: The name of the detector model or a PlateDetectorModel enum instance.\n                Defaults to \"yolo-v9-t-384-license-plate-end2end\".\n            detector_conf_thresh: Confidence threshold for the detector.\n            detector_providers: Execution providers for the detector.\n            detector_sess_options: Session options for the detector.\n            ocr_model: The name of the OCR model from the model hub. This can be none and\n                `ocr_model_path` and `ocr_config_path` parameters are expected to pass them to\n                `fast-plate-ocr` library.\n            ocr_device: The device to run the OCR model on (\"cuda\", \"cpu\", or \"auto\").\n            ocr_providers: Execution providers for the OCR. If None, the default providers are used.\n            ocr_sess_options: Session options for the OCR. If None, default session options are\n                used.\n            ocr_model_path: Custom model path for the OCR. If None, the model is downloaded from the\n                hub or cache.\n            ocr_config_path: Custom config path for the OCR. If None, the default configuration is\n                used.\n            ocr_force_download: Whether to force download the OCR model.\n        \"\"\"\n        # Initialize the detector\n        self.detector = detector or DefaultDetector(\n            model_name=detector_model,\n            conf_thresh=detector_conf_thresh,\n            providers=detector_providers,\n            sess_options=detector_sess_options,\n        )\n\n        # Initialize the OCR\n        self.ocr = ocr or DefaultOCR(\n            hub_ocr_model=ocr_model,\n            device=ocr_device,\n            providers=ocr_providers,\n            sess_options=ocr_sess_options,\n            model_path=ocr_model_path,\n            config_path=ocr_config_path,\n            force_download=ocr_force_download,\n        )\n\n    def predict(self, frame: np.ndarray | str) -> list[ALPRResult]:\n        \"\"\"\n        Run plate detection and OCR on an image.\n\n        Parameters:\n            frame: Unprocessed frame (Colors in order: BGR) or image path.\n\n        Returns:\n            A list of ALPRResult objects, one for each detected plate.\n        \"\"\"\n        if isinstance(frame, str):\n            img_path = frame\n            img = cv2.imread(img_path)\n            if img is None:\n                raise ValueError(f\"Failed to load image from path: {img_path}\")\n        else:\n            img = frame\n\n        plate_detections = self.detector.predict(img)\n        alpr_results: list[ALPRResult] = []\n        for detection in plate_detections:\n            bbox = detection.bounding_box\n            x1, y1 = max(bbox.x1, 0), max(bbox.y1, 0)\n            x2, y2 = min(bbox.x2, img.shape[1]), min(bbox.y2, img.shape[0])\n            cropped_plate = img[y1:y2, x1:x2]\n            ocr_result = self.ocr.predict(cropped_plate)\n            alpr_result = ALPRResult(detection=detection, ocr=ocr_result)\n            alpr_results.append(alpr_result)\n        return alpr_results\n\n    def draw_predictions(self, frame: np.ndarray | str) -> DrawPredictionsResult:\n        \"\"\"\n        Draw detections and OCR results on an image.\n\n        Parameters:\n            frame: The original frame or image path.\n\n        Returns:\n            A DrawPredictionsResult with the annotated image and the ALPR results.\n        \"\"\"\n        # If frame is a string, assume it's an image path and load it\n        if isinstance(frame, str):\n            img_path = frame\n            img = cv2.imread(img_path)\n            if img is None:\n                raise ValueError(f\"Failed to load image from path: {img_path}\")\n        else:\n            img = frame\n\n        # Get ALPR results using the ndarray\n        alpr_results = self.predict(img)\n\n        for result in alpr_results:\n            detection = result.detection\n            ocr_result = result.ocr\n            bbox = detection.bounding_box\n            x1, y1, x2, y2 = bbox.x1, bbox.y1, bbox.x2, bbox.y2\n            # Draw the bounding box\n            cv2.rectangle(img, (x1, y1), (x2, y2), (36, 255, 12), 2)\n            if ocr_result is None or not ocr_result.text or not ocr_result.confidence:\n                continue\n            confidence: float = (\n                statistics.mean(ocr_result.confidence)\n                if isinstance(ocr_result.confidence, list)\n                else ocr_result.confidence\n            )\n            font_scale = min(1.25, max(0.4, img.shape[1] / 1000))\n            text_thickness = 1 if font_scale < 0.75 else 2\n            outline_thickness = text_thickness + max(3, round(font_scale * 3))\n            display_lines = [f\"{ocr_result.text} {confidence * 100:.0f}%\"]\n            if ocr_result.region:\n                region_text = ocr_result.region\n                if ocr_result.region_confidence is not None:\n                    region_text = f\"{region_text} {ocr_result.region_confidence * 100:.0f}%\"\n                display_lines.insert(0, region_text)\n\n            _, text_height = cv2.getTextSize(\n                display_lines[0], cv2.FONT_HERSHEY_SIMPLEX, font_scale, text_thickness\n            )[0]\n            line_gap = max(14, round(text_height * 0.6))\n            line_height = text_height + line_gap\n            text_y = y1 - 10 - ((len(display_lines) - 1) * line_height)\n            if text_y - text_height < 0:\n                text_y = y2 + text_height + 10\n\n            for idx, line in enumerate(display_lines):\n                text_width, current_text_height = cv2.getTextSize(\n                    line, cv2.FONT_HERSHEY_SIMPLEX, font_scale, text_thickness\n                )[0]\n                text_x = min(max(x1, 5), max(5, img.shape[1] - text_width - 5))\n                current_y = min(\n                    max(text_y + (idx * line_height), current_text_height + 5),\n                    img.shape[0] - 5,\n                )\n                # Draw black background for better readability\n                cv2.putText(\n                    img=img,\n                    text=line,\n                    org=(text_x, current_y),\n                    fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n                    fontScale=font_scale,\n                    color=(0, 0, 0),\n                    thickness=outline_thickness,\n                    lineType=cv2.LINE_AA,\n                )\n                # Draw white text\n                cv2.putText(\n                    img=img,\n                    text=line,\n                    org=(text_x, current_y),\n                    fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n                    fontScale=font_scale,\n                    color=(255, 255, 255),\n                    thickness=text_thickness,\n                    lineType=cv2.LINE_AA,\n                )\n\n        return DrawPredictionsResult(image=img, results=alpr_results)\n"
  },
  {
    "path": "fast_alpr/base.py",
    "content": "\"\"\"\nBase module.\n\"\"\"\n\nfrom abc import ABC, abstractmethod\nfrom dataclasses import dataclass\n\nimport numpy as np\nfrom open_image_models.detection.core.base import DetectionResult\n\n\n@dataclass(frozen=True)\nclass OcrResult:\n    \"\"\"\n    OCR output for one cropped plate image.\n\n    Attributes:\n        text: Recognized plate text.\n        confidence: OCR confidence as one value or one value per character.\n        region: Optional region or country prediction.\n        region_confidence: Confidence for the region prediction.\n    \"\"\"\n\n    text: str\n    confidence: float | list[float]\n    region: str | None = None\n    region_confidence: float | None = None\n\n\nclass BaseDetector(ABC):\n    @abstractmethod\n    def predict(self, frame: np.ndarray) -> list[DetectionResult]:\n        \"\"\"Perform detection on the input frame and return a list of detections.\"\"\"\n\n\nclass BaseOCR(ABC):\n    @abstractmethod\n    def predict(self, cropped_plate: np.ndarray) -> OcrResult | None:\n        \"\"\"Perform OCR on the cropped plate image and return the recognized text and character\n        probabilities.\"\"\"\n"
  },
  {
    "path": "fast_alpr/default_detector.py",
    "content": "\"\"\"\nDefault Detector module.\n\"\"\"\n\nfrom collections.abc import Sequence\n\nimport numpy as np\nimport onnxruntime as ort\nfrom open_image_models import LicensePlateDetector\nfrom open_image_models.detection.core.hub import PlateDetectorModel\n\nfrom fast_alpr.base import BaseDetector, DetectionResult\n\n\nclass DefaultDetector(BaseDetector):\n    \"\"\"\n    Default detector class for license plate detection using ONNX models.\n\n    This class utilizes the `LicensePlateDetector` from the `open_image_models` package\n    to perform detection on input frames.\n    \"\"\"\n\n    def __init__(\n        self,\n        model_name: PlateDetectorModel = \"yolo-v9-t-384-license-plate-end2end\",\n        conf_thresh: float = 0.4,\n        providers: Sequence[str | tuple[str, dict]] | None = None,\n        sess_options: ort.SessionOptions = None,\n    ) -> None:\n        \"\"\"\n        Initialize the DefaultDetector with the specified parameters. Uses `open-image-models`'s\n        `LicensePlateDetector`.\n\n        Parameters:\n            model_name: The name of the detector model. See `PlateDetectorModel` for the available\n                models.\n            conf_thresh: Confidence threshold for the detector. Defaults to 0.25.\n            providers: The execution providers to use in ONNX Runtime. If None, the default\n                providers are used.\n            sess_options: Custom session options for ONNX Runtime. If None, default session options\n                are used.\n        \"\"\"\n        self.detector = LicensePlateDetector(\n            detection_model=model_name,\n            conf_thresh=conf_thresh,\n            providers=providers,\n            sess_options=sess_options,\n        )\n\n    def predict(self, frame: np.ndarray) -> list[DetectionResult]:\n        \"\"\"\n        Perform detection on the input frame and return a list of detections.\n\n        Parameters:\n            frame: The input image/frame in which to detect license plates.\n\n        Returns:\n            A list of detection results, each containing the label,\n            confidence, and bounding box of a detected license plate.\n        \"\"\"\n        return self.detector.predict(frame)\n"
  },
  {
    "path": "fast_alpr/default_ocr.py",
    "content": "\"\"\"\nDefault OCR module.\n\"\"\"\n\nimport os\nfrom collections.abc import Sequence\nfrom typing import Literal\n\nimport cv2\nimport numpy as np\nimport onnxruntime as ort\nfrom fast_plate_ocr import LicensePlateRecognizer\nfrom fast_plate_ocr.inference.hub import OcrModel\n\nfrom fast_alpr.base import BaseOCR, OcrResult\n\n\nclass DefaultOCR(BaseOCR):\n    \"\"\"\n    Default OCR class for license plate recognition using `fast-plate-ocr` models.\n    \"\"\"\n\n    def __init__(\n        self,\n        hub_ocr_model: OcrModel | None = None,\n        device: Literal[\"cuda\", \"cpu\", \"auto\"] = \"auto\",\n        providers: Sequence[str | tuple[str, dict]] | None = None,\n        sess_options: ort.SessionOptions | None = None,\n        model_path: str | os.PathLike | None = None,\n        config_path: str | os.PathLike | None = None,\n        force_download: bool = False,\n    ) -> None:\n        \"\"\"\n        Initialize the DefaultOCR with the specified parameters. Uses `fast-plate-ocr`'s\n        `LicensePlateRecognizer`\n\n        Parameters:\n            hub_ocr_model: The name of the OCR model from the model hub.\n            device: The device to run the model on. Options are \"cuda\", \"cpu\", or \"auto\". Defaults\n             to \"auto\".\n            providers: The execution providers to use in ONNX Runtime. If None, the default\n             providers are used.\n            sess_options: Custom session options for ONNX Runtime. If None, default session options\n             are used.\n            model_path: Path to a custom OCR model file. If None, the model is downloaded from the\n             hub or cache.\n            config_path: Path to a custom configuration file. If None, the default configuration is\n             used.\n            force_download: If True, forces the download of the model and overwrites any existing\n             files.\n        \"\"\"\n        self.ocr_model = LicensePlateRecognizer(\n            hub_ocr_model=hub_ocr_model,\n            device=device,\n            providers=providers,\n            sess_options=sess_options,\n            onnx_model_path=model_path,\n            plate_config_path=config_path,\n            force_download=force_download,\n        )\n\n    def predict(self, cropped_plate: np.ndarray) -> OcrResult | None:\n        \"\"\"\n        Perform OCR on a cropped license plate image.\n\n        Parameters:\n            cropped_plate: The cropped image of the license plate in BGR format.\n\n        Returns:\n            OcrResult: An object containing the recognized text and per-character confidence.\n        \"\"\"\n        if cropped_plate is None:\n            return None\n        if self.ocr_model.config.image_color_mode == \"grayscale\":\n            cropped_plate = cv2.cvtColor(cropped_plate, cv2.COLOR_BGR2GRAY)\n        elif self.ocr_model.config.image_color_mode == \"rgb\":\n            cropped_plate = cv2.cvtColor(cropped_plate, cv2.COLOR_BGR2RGB)\n        prediction = self.ocr_model.run_one(cropped_plate, return_confidence=True)\n\n        char_probs = prediction.char_probs\n        confidence: float | list[float] = (\n            0.0 if char_probs is None else [float(x) for x in char_probs.tolist()]\n        )\n        return OcrResult(\n            text=prediction.plate,\n            confidence=confidence,\n            region=prediction.region,\n            region_confidence=prediction.region_prob,\n        )\n"
  },
  {
    "path": "mkdocs.yml",
    "content": "site_name: FastALPR\nsite_author: ankandrew\nsite_description: Fast ALPR.\nrepo_url: https://github.com/ankandrew/fast-alpr\ntheme:\n  name: material\n  features:\n    - content.code.copy\n    - content.code.select\n    - content.footnote.tooltips\n    - header.autohide\n    - navigation.expand\n    - navigation.footer\n    - navigation.instant\n    - navigation.instant.progress\n    - navigation.path\n    - navigation.sections\n    - search.highlight\n    - search.suggest\n    - toc.follow\n  palette:\n    - scheme: default\n      toggle:\n        icon: material/lightbulb-outline\n        name: Switch to dark mode\n    - scheme: slate\n      toggle:\n        icon: material/lightbulb\n        name: Switch to light mode\nnav:\n  - Introduction: index.md\n  - Installation: installation.md\n  - Quick Start: quick_start.md\n  - Custom Models: custom_models.md\n  - Contributing: contributing.md\n  - Reference: reference.md\nplugins:\n  - search\n  - mike:\n      alias_type: symlink\n      canonical_version: latest\n  - mkdocstrings:\n      handlers:\n        python:\n          paths: [ fast_alpr ]\n          load_external_modules: true\n          inventories:\n            - https://ankandrew.github.io/open-image-models/0.5/objects.inv\n            - https://ankandrew.github.io/fast-plate-ocr/1.1/objects.inv\n          options:\n            members_order: source\n            separate_signature: true\n            filters: [ \"!^_\" ]\n            show_category_heading: true\n            docstring_options:\n              ignore_init_summary: true\n            show_signature: true\n            show_source: true\n            heading_level: 2\n            show_root_full_path: false\n            merge_init_into_class: true\n            show_signature_annotations: true\n            signature_crossrefs: true\nextra:\n  version:\n    provider: mike\n  generator: false\nmarkdown_extensions:\n  - admonition\n  - pymdownx.highlight:\n      anchor_linenums: true\n      line_spans: __span\n      pygments_lang_class: true\n  - pymdownx.inlinehilite\n  - pymdownx.snippets\n  - pymdownx.details\n  - pymdownx.superfences\n  - toc:\n      permalink: true\n      title: Page contents\n"
  },
  {
    "path": "pyproject.toml",
    "content": "[project]\nname = \"fast-alpr\"\nversion = \"0.4.0\"\ndescription = \"Fast Automatic License Plate Recognition.\"\nauthors = [{ name = \"ankandrew\", email = \"61120139+ankandrew@users.noreply.github.com\" }]\nrequires-python = \">=3.10,<4.0\"\nreadme = \"README.md\"\nlicense = \"MIT\"\nkeywords = [\n    \"image-processing\",\n    \"computer-vision\",\n    \"deep-learning\",\n    \"object-detection\",\n    \"plate-detection\",\n    \"license-plate-ocr\",\n    \"onnx\",\n]\nclassifiers = [\n    \"Typing :: Typed\",\n    \"Intended Audience :: Developers\",\n    \"Intended Audience :: Education\",\n    \"Intended Audience :: Science/Research\",\n    \"Operating System :: OS Independent\",\n    \"Topic :: Software Development\",\n    \"Topic :: Scientific/Engineering\",\n    \"Topic :: Software Development :: Libraries\",\n    \"Topic :: Software Development :: Build Tools\",\n    \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n    \"Topic :: Software Development :: Libraries :: Python Modules\",\n    \"Programming Language :: Python :: 3\",\n    \"Programming Language :: Python :: 3 :: Only\",\n    \"Programming Language :: Python :: 3.10\",\n    \"Programming Language :: Python :: 3.11\",\n    \"Programming Language :: Python :: 3.12\",\n    \"Programming Language :: Python :: 3.13\"\n]\ndependencies = [\n    \"fast-plate-ocr>=1.1.0\",\n    \"open-image-models>=0.5.1\",\n    \"opencv-python-headless>=4.9.0.80\",\n]\n\n[project.optional-dependencies]\nonnx = [\"onnxruntime>=1.19.2\"]\nonnx-gpu = [\"onnxruntime-gpu>=1.19.2\"]\nonnx-openvino = [\"onnxruntime-openvino>=1.19.2\"]\nonnx-directml = [\"onnxruntime-directml>=1.19.2\"]\nonnx-qnn = [\"onnxruntime-qnn>=1.19.2\"]\n\n[dependency-groups]\ntest = [\"pytest\"]\ndev = [\n    \"mypy\",\n    \"ruff\",\n    \"pylint\",\n    \"types-pyyaml\",\n    \"yamllint\",\n]\ndocs = [\n    \"mkdocs-material\",\n    \"mkdocstrings[python]\",\n    \"mike\",\n]\n\n[tool.uv]\ndefault-groups = [\n    \"test\",\n    \"dev\",\n    \"docs\",\n]\n\n[build-system]\nrequires = [\"hatchling\"]\nbuild-backend = \"hatchling.build\"\n\n[tool.ruff]\nline-length = 100\ntarget-version = \"py310\"\n\n[tool.ruff.lint]\nselect = [\n    # pycodestyle\n    \"E\",\n    \"W\",\n    # Pyflakes\n    \"F\",\n    # pep8-naming\n    \"N\",\n    # pyupgrade\n    \"UP\",\n    # flake8-bugbear\n    \"B\",\n    # flake8-simplify\n    \"SIM\",\n    # flake8-unused-arguments\n    \"ARG\",\n    # Pylint\n    \"PL\",\n    # Perflint\n    \"PERF\",\n    # Ruff-specific rules\n    \"RUF\",\n    # pandas-vet\n    \"PD\",\n]\nignore = [\"N812\", \"PLR2004\", \"PD011\"]\nfixable = [\"ALL\"]\nunfixable = []\n\n[tool.ruff.lint.pylint]\nmax-args = 8\n\n[tool.ruff.format]\nline-ending = \"lf\"\n\n[tool.mypy]\ndisable_error_code = \"import-untyped\"\n\n[tool.pylint.typecheck]\ngenerated-members = [\"cv2.*\"]\nsignature-mutators = [\n    \"click.decorators.option\",\n    \"click.decorators.argument\",\n    \"click.decorators.version_option\",\n    \"click.decorators.help_option\",\n    \"click.decorators.pass_context\",\n    \"click.decorators.confirmation_option\"\n]\n\n[tool.pylint.format]\nmax-line-length = 100\n\n[tool.pylint.\"messages control\"]\ndisable = [\"missing-class-docstring\", \"missing-function-docstring\", \"too-many-positional-arguments\"]\n\n[tool.pylint.design]\nmax-args = 8\nmin-public-methods = 1\n\n[tool.pylint.basic]\nno-docstring-rgx = \"^__|^test_\"\n"
  },
  {
    "path": "test/__init__.py",
    "content": ""
  },
  {
    "path": "test/test_alpr.py",
    "content": "\"\"\"\nTest ALPR end-to-end.\n\"\"\"\n\nfrom pathlib import Path\n\nimport cv2\nimport numpy as np\nimport pytest\nfrom fast_plate_ocr.inference.hub import OcrModel\nfrom open_image_models.detection.core.hub import PlateDetectorModel\n\nfrom fast_alpr.alpr import ALPR\n\nASSETS_DIR = Path(__file__).resolve().parent.parent / \"assets\"\n\n\n@pytest.fixture(scope=\"module\", name=\"alpr\")\ndef alpr_fixture() -> ALPR:\n    return ALPR(\n        detector_model=\"yolo-v9-t-384-license-plate-end2end\",\n        ocr_model=\"cct-xs-v2-global-model\",\n    )\n\n\n@pytest.mark.parametrize(\n    \"img_path, expected_plates\", [(ASSETS_DIR / \"test_image.png\", {\"5AU5341\"})]\n)\n@pytest.mark.parametrize(\"detector_model\", [\"yolo-v9-t-384-license-plate-end2end\"])\n@pytest.mark.parametrize(\n    \"ocr_model\",\n    [\n        \"cct-s-v2-global-model\",\n        \"cct-xs-v2-global-model\",\n        \"cct-s-v1-global-model\",\n        \"cct-xs-v1-global-model\",\n        \"global-plates-mobile-vit-v2-model\",\n        \"european-plates-mobile-vit-v2-model\",\n    ],\n)\ndef test_default_alpr(\n    img_path: Path,\n    expected_plates: set[str],\n    detector_model: PlateDetectorModel,\n    ocr_model: OcrModel,\n) -> None:\n    # pylint: disable=too-many-locals\n    im = cv2.imread(str(img_path))\n    assert im is not None, \"Failed to load test image\"\n    alpr = ALPR(\n        detector_model=detector_model,\n        ocr_model=ocr_model,\n    )\n    actual_result = alpr.predict(im)\n    actual_plates = {x.ocr.text for x in actual_result if x.ocr is not None}\n    assert actual_plates == expected_plates\n\n    for res in actual_result:\n        bbox = res.detection.bounding_box\n        height, width = im.shape[:2]\n        x1, y1 = max(bbox.x1, 0), max(bbox.y1, 0)\n        x2, y2 = min(bbox.x2, width), min(bbox.y2, height)\n\n        assert 0 <= x1 < width, f\"x1 coordinate {x1} out of bounds (0, {width})\"\n        assert 0 <= x2 <= width, f\"x2 coordinate {x2} out of bounds (0, {width})\"\n        assert 0 <= y1 < height, f\"y1 coordinate {y1} out of bounds (0, {height})\"\n        assert 0 <= y2 <= height, f\"y2 coordinate {y2} out of bounds (0, {height})\"\n        assert x1 < x2, f\"x1 ({x1}) should be less than x2 ({x2})\"\n        assert y1 < y2, f\"y1 ({y1}) should be less than y2 ({y2})\"\n\n        if res.ocr is not None:\n            conf = res.ocr.confidence\n            if isinstance(conf, list):\n                assert all(0.0 <= x <= 1.0 for x in conf)\n            elif isinstance(conf, float):\n                assert 0.0 <= conf <= 1.0\n            else:\n                raise TypeError(f\"Unexpected type for confidence: {type(conf).__name__}\")\n\n\n@pytest.mark.parametrize(\"img_path\", [ASSETS_DIR / \"test_image.png\"])\ndef test_draw_predictions(img_path: Path, alpr: ALPR) -> None:\n    im = cv2.imread(str(img_path))\n    assert im is not None, \"Failed to load test image\"\n    h, w, c = im.shape\n\n    # ndarray input\n    drawn_nd = alpr.draw_predictions(im.copy())\n    assert isinstance(drawn_nd.image, np.ndarray)\n    assert drawn_nd.image.shape == (h, w, c)\n    assert drawn_nd.results\n\n    diff_nd = cv2.absdiff(drawn_nd.image, im)\n    assert int(diff_nd.sum()) > 0\n\n    # string path input\n    drawn_path = alpr.draw_predictions(str(img_path))\n    assert isinstance(drawn_path.image, np.ndarray)\n    assert drawn_path.image.shape == (h, w, c)\n    assert drawn_path.results\n\n    diff_path = cv2.absdiff(drawn_path.image, im)\n    assert int(diff_path.sum()) > 0\n"
  }
]