[
  {
    "path": ".dockerignore",
    "content": "*\n!wyoming_faster_whisper/*.py\n!pyproject.toml\n!docker_run.sh\n"
  },
  {
    "path": ".github/workflows/publish.yml",
    "content": "---\nname: Release (DockerHub + GitHub)\n\non:\n  workflow_dispatch:\n  push:\n    tags:\n      - \"v*.*.*\" # e.g. v1.2.3\n\njobs:\n  build:\n    runs-on: ubuntu-latest\n    outputs:\n      tag: ${{ steps.read_ver.outputs.tag }}\n      file_ver: ${{ steps.read_ver.outputs.file_ver }}\n    steps:\n      - uses: actions/checkout@v4\n\n      - uses: actions/setup-python@v5\n        with:\n          python-version: \"3.13\"\n\n      - name: Install build tools\n        run: pip install tomli setuptools wheel build\n\n      - name: Read version from pyproject.toml & verify tag\n        id: read_ver\n        shell: python\n        run: |\n          import os, sys\n          try:\n              import tomllib as toml   # Python 3.11+\n          except Exception:\n              import tomli as toml     # fallback\n\n          # Read pyproject version\n          with open(\"pyproject.toml\",\"rb\") as f:\n              ver = toml.load(f)[\"project\"][\"version\"]\n\n          # Extract tag from GITHUB_REF (e.g. \"refs/tags/v1.2.3\" -> \"1.2.3\")\n          ref = os.environ[\"GITHUB_REF\"]\n          # Safety checks & strip prefix\n          prefix = \"refs/tags/v\"\n          if not ref.startswith(prefix):\n              print(f\"Unexpected GITHUB_REF: {ref}\", file=sys.stderr)\n              sys.exit(1)\n          tag = ref[len(prefix):]\n\n          if tag != ver:\n              print(f\"Tag ({tag}) != pyproject.toml ({ver})\", file=sys.stderr)\n              sys.exit(1)\n\n          # Expose outputs\n          with open(os.environ[\"GITHUB_OUTPUT\"], \"a\") as fh:\n              fh.write(f\"tag={tag}\\n\")\n              fh.write(f\"file_ver={ver}\\n\")\n\n          print(f\"Version OK: tag={tag} matches pyproject.toml={ver}\")\n\n      - name: Build wheel\n        run: |\n          python -m build --wheel --sdist\n\n      - name: Upload wheels and sdist\n        uses: actions/upload-artifact@v4\n        with:\n          name: dist-wyoming-faster-whisper\n          path: |\n            dist/*.whl\n            dist/*.tar.gz\n          if-no-files-found: error\n\n      # Enables building foreign architectures (e.g., arm64 on amd64 runner)\n      - name: Set up QEMU\n        uses: docker/setup-qemu-action@v3\n\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@v3\n\n      - name: Log in to DockerHub\n        uses: docker/login-action@v3\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n\n      - name: Build\n        uses: docker/build-push-action@v5\n        with:\n          context: .\n          file: ./Dockerfile\n          platforms: linux/amd64,linux/arm64\n          push: true\n          tags: |\n            rhasspy/wyoming-whisper:latest\n            rhasspy/wyoming-whisper:${{ steps.read_ver.outputs.tag }}\n          cache-from: type=gha\n          cache-to: type=gha,mode=max\n\n  changelog:\n    needs: build\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v4\n\n      - name: Extract release notes (fail if missing)\n        id: notes\n        shell: python\n        env:\n          TAG: ${{ needs.build.outputs.tag }}\n        run: |\n          import os, sys, re, pathlib\n          ver = os.environ[\"TAG\"]                          # e.g., 1.0.2\n          chlog_path = pathlib.Path(\"CHANGELOG.md\")\n          if not chlog_path.exists():\n              print(\"CHANGELOG.md not found\", file=sys.stderr)\n              sys.exit(2)\n\n          lines = chlog_path.read_text(encoding=\"utf-8\").splitlines()\n\n          def is_heading_for_version(s: str) -> bool:\n              s = s.strip()\n              if not s.startswith(\"##\"):\n                  return False\n              s = s[2:].strip()                  # drop \"##\"\n              s = s.strip(\"[]\")                  # allow [1.0.2]\n              s = re.sub(r\"\\s*-\\s*.*$\", \"\", s)   # drop \" - date\"\n              s = s.lstrip(\"v\")                  # allow v1.0.2\n              return s == ver\n\n          start_idx = None\n          for i, line in enumerate(lines):\n              if is_heading_for_version(line):\n                  start_idx = i + 1              # start AFTER heading\n                  break\n\n          if start_idx is None:\n              print(f\"No changelog section found for {ver}\", file=sys.stderr)\n              sys.exit(3)\n\n          end_idx = len(lines)\n          for j in range(start_idx, len(lines)):\n              if lines[j].lstrip().startswith(\"## \"):\n                  end_idx = j\n                  break\n\n          section_lines = lines[start_idx:end_idx]\n          while section_lines and not section_lines[0].strip():\n              section_lines.pop(0)\n          while section_lines and not section_lines[-1].strip():\n              section_lines.pop()\n\n          section = \"\\n\".join(section_lines)\n          only_links = re.fullmatch(r\"(?:\\[[^\\]]+\\]:\\s*\\S+\\s*(?:\\n|$))*\", section or \"\", flags=re.MULTILINE)\n          if not section or only_links:\n              print(f\"Changelog section for {ver} is empty\", file=sys.stderr)\n              sys.exit(4)\n\n          pathlib.Path(\"RELEASE_NOTES.md\").write_text(section, encoding=\"utf-8\")\n\n      - name: Upload release notes\n        uses: actions/upload-artifact@v4\n        with:\n          name: release-notes\n          path: RELEASE_NOTES.md\n\n  publish:\n    needs: [build, changelog]\n    runs-on: ubuntu-latest\n    environment: pypi\n    permissions:\n      id-token: write # required for PyPI Trusted Publishing (OIDC)\n      contents: read\n    steps:\n      - name: Download all dists\n        uses: actions/download-artifact@v4\n        with:\n          pattern: dist-*\n          merge-multiple: true\n          path: dist\n\n      - name: Publish to PyPI via OIDC\n        uses: pypa/gh-action-pypi-publish@release/v1\n        with:\n          verbose: true\n\n  github_release:\n    needs: [build, changelog]\n    runs-on: ubuntu-latest\n    permissions:\n      contents: write\n    steps:\n      - name: Download all dists\n        uses: actions/download-artifact@v4\n        with:\n          pattern: dist-*\n          merge-multiple: true\n          path: dist\n      - uses: actions/download-artifact@v4\n        with:\n          name: release-notes\n          path: .\n      - name: Create GitHub Release\n        uses: softprops/action-gh-release@v2\n        with:\n          tag_name: ${{ github.ref_name }}\n          name: ${{ github.ref_name }}\n          body_path: RELEASE_NOTES.md\n          files: |\n            dist/*\n"
  },
  {
    "path": ".github/workflows/test.yml",
    "content": "---\nname: Test\n\non:\n  workflow_dispatch:\n  pull_request:\n    branches:\n      - main\n\njobs:\n  build_wheels:\n    name: Test on ${{ matrix.os }}\n    runs-on: ${{ matrix.os }}\n    strategy:\n      matrix:\n        os: [ubuntu-latest]\n\n    steps:\n      - uses: actions/checkout@v4\n\n      - uses: actions/setup-python@v5\n        with:\n          python-version: \"3.13\"\n\n      - name: Install\n        run: |\n          script/setup --dev --transformers --sherpa\n\n      - name: Run tests\n        run: script/test\n"
  },
  {
    "path": ".gitignore",
    "content": ".DS_Store\n.idea\n*.log\ntmp/\n\n*.py[cod]\n*.egg\n*.egg-info/\nbuild\nhtmlcov\n\n/.venv/\n.mypy_cache/\n__pycache__/\n\n/dist/\n/local/\n"
  },
  {
    "path": ".isort.cfg",
    "content": "[settings]\nmulti_line_output=3\ninclude_trailing_comma=True\nforce_grid_wrap=0\nuse_parentheses=True\nline_length=88\n"
  },
  {
    "path": ".projectile",
    "content": "- /.venv/\n"
  },
  {
    "path": "CHANGELOG.md",
    "content": "# Changelog\n\n## 3.1.1 (unrelased)\n\n- Fix transformers language\n- Add initial prompt to transformers\n\n## 3.1.0\n\n- Refactor to dynamically load models\n- Only prefer Parakeet for English (other languages don't detect reliably)\n- Add `--vad-filter`, `--vad-threshold`, `--vad-min-speech-ms`, `--vad-min-silence-ms` (thanks @lmoe)\n- Add `zeroconf` to Docker image\n\n## 3.0.2\n\n- Set `--data-dir /data` in Docker run script\n\n## 3.0.1\n\n- Fix model auto selection logic\n\n## 3.0.0\n\n- Add support for `sherpa-onnx` and Nvidia's parakeet model\n- Add support for [GigaAM](https://github.com/salute-developers/GigaAM) for Russian via [`onnx-asr`](https://github.com/istupakov/onnx-asr)\n- Add `--stt-library` to select speech-to-text library (deprecate `--use-transformers`)\n- Default `--model` to \"auto\" (prefer parakeet)\n- Add Docker build here\n- Default `--language` to \"auto\"\n- Add `--cpu-threads` for faster-whisper (@Zerwin)\n\n## 2.5.0\n\n- Add support for HuggingFace transformers Whisper models (--use-transformers)\n\n## 2.4.0\n\n- Add \"auto\" for model and beam size (0) to select values based on CPU\n\n## 2.3.0\n\n- Bump faster-whisper package to 1.1.0\n- Supports model `turbo` for faster processing\n\n## 2.2.0\n\n- Bump faster-whisper package to 1.0.3\n\n## 2.1.0\n\n- Added `--initial-prompt` (see https://github.com/openai/whisper/discussions/963)\n\n## 2.0.0\n\n- Use faster-whisper PyPI package\n- `--model` can now be a HuggingFace model like `Systran/faster-distil-whisper-small.en`\n\n## 1.1.0\n\n- Fix enum use for Python 3.11+\n- Add tests and Github actions\n- Bump tokenizers to 0.15\n- Bump wyoming to 1.5.2\n\n## 1.0.0\n\n- Initial release\n\n"
  },
  {
    "path": "Dockerfile",
    "content": "FROM debian:bookworm-slim\nARG TARGETARCH\nARG TARGETVARIANT\n\n# Install faster-whisper\nWORKDIR /usr/src\n\nCOPY ./pyproject.toml ./\nRUN \\\n    apt-get update \\\n    && apt-get install -y --no-install-recommends \\\n        python3 \\\n        python3-pip \\\n        python3-venv \\\n    \\\n    && python3 -m venv .venv \\\n    && .venv/bin/pip3 install --no-cache-dir -U \\\n        setuptools \\\n        wheel \\\n    && .venv/bin/pip3 install --no-cache-dir \\\n        --extra-index-url 'https://download.pytorch.org/whl/cpu' \\\n        'torch==2.6.0' \\\n    \\\n    && .venv/bin/pip3 install --no-cache-dir \\\n        --extra-index-url https://www.piwheels.org/simple \\\n        -e '.[zeroconf,transformers,sherpa,onnx-asr]' \\\n    \\\n    && rm -rf /var/lib/apt/lists/*\n\nCOPY ./ ./\n\nEXPOSE 10400\n\nENTRYPOINT [\"bash\", \"docker_run.sh\"]\n"
  },
  {
    "path": "LICENSE.md",
    "content": "MIT License\n\nCopyright (c) 2025 Michael Hansen\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "# Wyoming Faster Whisper\n\n[Wyoming protocol](https://github.com/rhasspy/wyoming) server for the [faster-whisper](https://github.com/guillaumekln/faster-whisper/) speech to text system.\n\n## Home Assistant Add-on\n\n[![Show add-on](https://my.home-assistant.io/badges/supervisor_addon.svg)](https://my.home-assistant.io/redirect/supervisor_addon/?addon=core_whisper)\n\n[Source](https://github.com/home-assistant/addons/tree/master/whisper)\n\n## Local Install\n\nClone the repository and set up Python virtual environment:\n\n``` sh\ngit clone https://github.com/rhasspy/wyoming-faster-whisper.git\ncd wyoming-faster-whisper\nscript/setup\n```\n\nRun a server anyone can connect to:\n\n```sh\nscript/run --model tiny-int8 --language en --uri 'tcp://0.0.0.0:10300' --data-dir /data --download-dir /data\n```\n\nThe `--model` can also be a HuggingFace model like `Systran/faster-distil-whisper-small.en`\n\n**NOTE**: Models are downloaded to the first `--data-dir` directory.\n\n## Docker Image\n\n``` sh\ndocker run -it -p 10300:10300 -v /path/to/local/data:/data rhasspy/wyoming-whisper \\\n    --model tiny-int8 --language en\n```\n\n**NOTE**: Models are downloaded to `/data`, so make sure this points to a Docker volume.\n\n[Source](https://github.com/rhasspy/wyoming-addons/tree/master/whisper)\n"
  },
  {
    "path": "docker_run.sh",
    "content": "#!/usr/bin/env bash\ncd /usr/src\n.venv/bin/python3 -m wyoming_faster_whisper \\\n    --uri 'tcp://0.0.0.0:10300' --data-dir '/data' \"$@\"\n"
  },
  {
    "path": "mypy.ini",
    "content": "[mypy]\nignore_missing_imports = true\n\n[mypy-setuptools.*]\nignore_missing_imports = True\n"
  },
  {
    "path": "pylintrc",
    "content": "[MASTER]\nignore=faster_whisper\n\n[MESSAGES CONTROL]\ndisable=\n  format,\n  abstract-method,\n  cyclic-import,\n  duplicate-code,\n  global-statement,\n  import-outside-toplevel,\n  inconsistent-return-statements,\n  locally-disabled,\n  not-context-manager,\n  too-few-public-methods,\n  too-many-arguments,\n  too-many-branches,\n  too-many-instance-attributes,\n  too-many-lines,\n  too-many-locals,\n  too-many-public-methods,\n  too-many-return-statements,\n  too-many-statements,\n  too-many-boolean-expressions,\n  unnecessary-pass,\n  unused-argument,\n  broad-except,\n  too-many-nested-blocks,\n  invalid-name,\n  unused-import,\n  fixme,\n  useless-super-delegation,\n  missing-module-docstring,\n  missing-class-docstring,\n  missing-function-docstring,\n  import-error,\n  consider-using-with,\n  too-many-positional-arguments\n\n[FORMAT]\nexpected-line-ending-format=LF\n"
  },
  {
    "path": "pyproject.toml",
    "content": "[project]\nname = \"wyoming-faster-whisper\"\nversion = \"3.1.1\"\ndescription = \"Wyoming Server for Faster Whisper\"\nreadme = \"README.md\"\nrequires-python = \">=3.8\"\nlicense = {text = \"MIT\"}\nauthors = [\n    {name = \"Michael Hansen\", email = \"mike@rhasspy.org\"}\n]\nkeywords = [\"rhasspy\", \"wyoming\", \"whisper\", \"stt\"]\nclassifiers = [\n    \"Development Status :: 3 - Alpha\",\n    \"Intended Audience :: Developers\",\n    \"Topic :: Multimedia :: Sound/Audio :: Speech\",\n    \"License :: OSI Approved :: MIT License\",\n    \"Programming Language :: Python :: 3.8\",\n    \"Programming Language :: Python :: 3.9\",\n    \"Programming Language :: Python :: 3.10\",\n    \"Programming Language :: Python :: 3.11\",\n    \"Programming Language :: Python :: 3.12\",\n    \"Programming Language :: Python :: 3.13\",\n]\ndependencies = [\n    \"wyoming>=1.8,<2\",\n    \"faster-whisper>=1.2.1,<2\",\n]\n\n[project.urls]\nHomepage = \"http://github.com/rhasspy/wyoming-faster-whisper\"\n\n[tool.setuptools]\nplatforms = [\"any\"]\nzip-safe  = true\ninclude-package-data = true\n\n[tool.setuptools.packages.find]\ninclude = [\"wyoming_faster_whisper\"]\nexclude = [\"tests\"]\n\n[project.scripts]\nwyoming-faster-whisper = \"wyoming_faster_whisper.__main__:run\"\n\n[build-system]\nrequires = [\"setuptools>=42\", \"wheel\"]\nbuild-backend = \"setuptools.build_meta\"\n\n[tool.black]\nline-length = 88\n\n[tool.isort]\nprofile = \"black\"\n\n[tool.pytest.ini_options]\nasyncio_mode = \"auto\"\n\n[tool.mypy]\ncheck_untyped_defs = true\ndisallow_untyped_defs = true\n\n[project.optional-dependencies]\ndev = [\n    \"black\",\n    \"flake8\",\n    \"isort\",\n    \"mypy\",\n    \"pylint\",\n    \"pytest\",\n    \"pytest-asyncio\",\n]\ntransformers = [\n    \"transformers[torch]==4.52.4\",\n]\nsherpa = [\n    \"sherpa-onnx>=1.12.19,<2\",\n]\nonnx_asr = [\n    \"onnx-asr[cpu,hub]==0.7.0\",\n]\nzeroconf = [\n    \"wyoming[zeroconf]\",\n]\n"
  },
  {
    "path": "script/format",
    "content": "#!/usr/bin/env python3\nimport subprocess\nimport venv\nfrom pathlib import Path\n\n_DIR = Path(__file__).parent\n_PROGRAM_DIR = _DIR.parent\n_VENV_DIR = _PROGRAM_DIR / \".venv\"\n_MODULE = _PROGRAM_DIR.name.replace(\"-\", \"_\")\n_MODULE_DIR = _PROGRAM_DIR / _MODULE\n\nif _VENV_DIR.exists():\n    context = venv.EnvBuilder().ensure_directories(_VENV_DIR)\n    python_exe = context.env_exe\nelse:\n    python_exe = \"python3\"\n\nsubprocess.check_call([python_exe, \"-m\", \"black\", str(_MODULE_DIR)])\nsubprocess.check_call([python_exe, \"-m\", \"isort\", str(_MODULE_DIR)])\n"
  },
  {
    "path": "script/lint",
    "content": "#!/usr/bin/env python3\nimport subprocess\nimport venv\nfrom pathlib import Path\n\n_DIR = Path(__file__).parent\n_PROGRAM_DIR = _DIR.parent\n_VENV_DIR = _PROGRAM_DIR / \".venv\"\n_MODULE = _PROGRAM_DIR.name.replace(\"-\", \"_\")\n_MODULE_DIR = _PROGRAM_DIR / _MODULE\n\nif _VENV_DIR.exists():\n    context = venv.EnvBuilder().ensure_directories(_VENV_DIR)\n    python_exe = context.env_exe\nelse:\n    python_exe = \"python3\"\n\nsubprocess.check_call([python_exe, \"-m\", \"black\", str(_MODULE_DIR), \"--check\"])\nsubprocess.check_call([python_exe, \"-m\", \"isort\", str(_MODULE_DIR), \"--check\"])\nsubprocess.check_call([python_exe, \"-m\", \"flake8\", str(_MODULE_DIR)])\nsubprocess.check_call([python_exe, \"-m\", \"pylint\", str(_MODULE_DIR)])\nsubprocess.check_call([python_exe, \"-m\", \"mypy\", str(_MODULE_DIR)])\n"
  },
  {
    "path": "script/package",
    "content": "#!/usr/bin/env python3\nimport shutil\nimport subprocess\nimport sys\nimport platform\nimport venv\nfrom pathlib import Path\n\n_DIR = Path(__file__).parent\n_PROGRAM_DIR = _DIR.parent\n_VENV_DIR = _PROGRAM_DIR / \".venv\"\n_MODULE = _PROGRAM_DIR.name.replace(\"-\", \"_\")\n_MODULE_DIR = _PROGRAM_DIR / _MODULE\n_SRC_LIB_DIR = _PROGRAM_DIR / \"lib\"\n_DEST_LIB_DIR = _MODULE_DIR / \"lib\"\n\nif _VENV_DIR.exists():\n    context = venv.EnvBuilder().ensure_directories(_VENV_DIR)\n    python_exe = context.env_exe\nelse:\n    python_exe = \"python3\"\n\nmachine = platform.machine().lower()\nis_arm = (\"arm\" in machine) or (\"aarch\" in machine)\nis_amd64 = machine in (\"x86_64\", \"amd64\")\nsystem = sys.platform\nplatform_name = \"\"\n\nif system.startswith(\"linux\"):\n    if is_arm:\n        platform_name = \"linux_arm64\"\n\n    if is_amd64:\n        platform_name = \"linux_amd64\"\n\nif system == \"win32\":\n    if is_amd64:\n        platform_name = \"windows_amd64\"\n\nif system == \"darwin\":\n    if is_arm:\n        platform_name = \"darwin_arm64\"\n\nif not platform_name:\n    raise SystemExit(\"Unsupported platform\")\n\nplatform_src_dir = _SRC_LIB_DIR / platform_name\nplatform_dest_dir = _DEST_LIB_DIR\n\nif _DEST_LIB_DIR.exists():\n    shutil.rmtree(_DEST_LIB_DIR)\n\nshutil.copytree(platform_src_dir, platform_dest_dir)\n\nsubprocess.check_call([python_exe, \"-m\", \"build\", \"--wheel\", \"--sdist\"])\n"
  },
  {
    "path": "script/run",
    "content": "#!/usr/bin/env python3\nimport sys\nimport subprocess\nimport venv\nfrom pathlib import Path\n\n_DIR = Path(__file__).parent\n_PROGRAM_DIR = _DIR.parent\n_MODULE = _PROGRAM_DIR.name.replace(\"-\", \"_\")\n_VENV_DIR = _PROGRAM_DIR / \".venv\"\n\nif _VENV_DIR.exists():\n    context = venv.EnvBuilder().ensure_directories(_VENV_DIR)\n    python_exe = context.env_exe\nelse:\n    python_exe = \"python3\"\n\nsubprocess.check_call([python_exe, \"-m\", _MODULE] + sys.argv[1:])\n"
  },
  {
    "path": "script/setup",
    "content": "#!/usr/bin/env python3\nimport argparse\nimport subprocess\nimport venv\nfrom pathlib import Path\n\n_DIR = Path(__file__).parent\n_PROGRAM_DIR = _DIR.parent\n_VENV_DIR = _PROGRAM_DIR / \".venv\"\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--dev\", action=\"store_true\", help=\"Install dev requirements\")\nparser.add_argument(\n    \"--transformers\", action=\"store_true\", help=\"Install transformers requirements\"\n)\nparser.add_argument(\"--sherpa\", action=\"store_true\", help=\"Install sherpa requirements\")\nargs = parser.parse_args()\n\n# Create virtual environment\nbuilder = venv.EnvBuilder(with_pip=True)\ncontext = builder.ensure_directories(_VENV_DIR)\nbuilder.create(_VENV_DIR)\n\n# Upgrade dependencies\npip = [context.env_exe, \"-m\", \"pip\"]\nsubprocess.check_call(pip + [\"install\", \"--upgrade\", \"pip\"])\nsubprocess.check_call(pip + [\"install\", \"--upgrade\", \"setuptools\", \"wheel\"])\n\nextras = []\nif args.dev:\n    extras.append(\"dev\")\n\nif args.transformers:\n    extras.append(\"transformers\")\n\nif args.sherpa:\n    extras.append(\"sherpa\")\n\nextras_str = \"\"\nif extras:\n    extras_str = \"[\" + \",\".join(extras) + \"]\"\n\n# Install requirements\nsubprocess.check_call(pip + [\"install\", \"-e\", f\"{_PROGRAM_DIR}{extras_str}\"])\n"
  },
  {
    "path": "script/test",
    "content": "#!/usr/bin/env python3\nimport subprocess\nimport venv\nfrom pathlib import Path\n\n_DIR = Path(__file__).parent\n_PROGRAM_DIR = _DIR.parent\n_VENV_DIR = _PROGRAM_DIR / \".venv\"\n_TESTS_DIR = _PROGRAM_DIR / \"tests\"\n\nif _VENV_DIR.exists():\n    context = venv.EnvBuilder().ensure_directories(_VENV_DIR)\n    python_exe = context.env_exe\nelse:\n    python_exe = \"python3\"\n\nsubprocess.check_call([python_exe, \"-m\", \"pytest\", str(_TESTS_DIR)])\n"
  },
  {
    "path": "setup.cfg",
    "content": "[flake8]\n# To work with Black\nmax-line-length = 88\n# E501: line too long\n# W503: Line break occurred before a binary operator\n# E203: Whitespace before ':'\n# D202 No blank lines allowed after function docstring\n# W504 line break after binary operator\nignore =\n    E501,\n    W503,\n    E203,\n    D202,\n    W504\n\nexclude = wyoming_faster_whisper/faster_whisper\n\n[isort]\nmulti_line_output = 3\ninclude_trailing_comma=True\nforce_grid_wrap=0\nuse_parentheses=True\nline_length=88\nindent = \"    \"\n"
  },
  {
    "path": "tests/__init__.py",
    "content": "\"\"\"Tests for wyoming-faster-whisper.\"\"\"\n\nfrom pathlib import Path\n\n_DIR = Path(__file__).parent\n_PROGRAM_DIR = _DIR.parent\n_LOCAL_DIR = _PROGRAM_DIR / \"local\"\n_SAMPLES_PER_CHUNK = 1024\n\n# Need to give time for the model to download\n_START_TIMEOUT = 60\n_TRANSCRIBE_TIMEOUT = 60\n"
  },
  {
    "path": "tests/test_faster_whisper.py",
    "content": "\"\"\"Tests for faster-whisper.\"\"\"\n\nimport asyncio\nimport re\nimport sys\nimport wave\nfrom asyncio.subprocess import PIPE\n\nimport pytest\nfrom wyoming.asr import Transcribe, Transcript\nfrom wyoming.audio import AudioStart, AudioStop, wav_to_chunks\nfrom wyoming.event import async_read_event, async_write_event\nfrom wyoming.info import Describe, Info\n\n\nfrom . import _LOCAL_DIR, _SAMPLES_PER_CHUNK, _START_TIMEOUT, _TRANSCRIBE_TIMEOUT, _DIR\n\n\n@pytest.mark.parametrize(\n    (\"stt_library\", \"model\"),\n    [\n        (\"faster-whisper\", \"base-int8\"),\n        (\"transformers\", \"openai/whisper-base.en\"),\n        (\"sherpa\", \"auto\"),\n    ],\n)\n@pytest.mark.asyncio\nasync def test_faster_whisper(stt_library: str, model: str) -> None:\n    proc = await asyncio.create_subprocess_exec(\n        sys.executable,\n        \"-m\",\n        \"wyoming_faster_whisper\",\n        \"--uri\",\n        \"stdio://\",\n        \"--stt-library\",\n        stt_library,\n        \"--model\",\n        model,\n        \"--data-dir\",\n        str(_LOCAL_DIR),\n        \"--language\",\n        \"en\",\n        \"--vad-filter\",\n        stdin=PIPE,\n        stdout=PIPE,\n    )\n    assert proc.stdin is not None\n    assert proc.stdout is not None\n\n    # Check info\n    await async_write_event(Describe().event(), proc.stdin)\n    while True:\n        event = await asyncio.wait_for(\n            async_read_event(proc.stdout), timeout=_START_TIMEOUT\n        )\n        assert event is not None\n\n        if not Info.is_type(event.type):\n            continue\n\n        info = Info.from_event(event)\n        assert len(info.asr) == 1, \"Expected one asr service\"\n        break\n\n    await async_write_event(Transcribe().event(), proc.stdin)\n\n    # Test known WAV\n    with wave.open(str(_DIR / \"turn_on_the_living_room_lamp.wav\"), \"rb\") as example_wav:\n        await async_write_event(\n            AudioStart(\n                rate=example_wav.getframerate(),\n                width=example_wav.getsampwidth(),\n                channels=example_wav.getnchannels(),\n            ).event(),\n            proc.stdin,\n        )\n        for chunk in wav_to_chunks(example_wav, _SAMPLES_PER_CHUNK):\n            await async_write_event(chunk.event(), proc.stdin)\n\n        await async_write_event(AudioStop().event(), proc.stdin)\n\n    while True:\n        event = await asyncio.wait_for(\n            async_read_event(proc.stdout), timeout=_TRANSCRIBE_TIMEOUT\n        )\n        assert event is not None\n\n        if not Transcript.is_type(event.type):\n            continue\n\n        transcript = Transcript.from_event(event)\n        text = transcript.text.lower().strip()\n        text = re.sub(r\"[^a-z ]\", \"\", text)\n        assert text == \"turn on the living room lamp\"\n        break\n\n    # Need to close stdin for graceful termination\n    proc.stdin.close()\n    _, stderr = await proc.communicate()\n\n    assert proc.returncode == 0, stderr.decode()\n"
  },
  {
    "path": "wyoming_faster_whisper/__init__.py",
    "content": "\"\"\"Wyoming server for faster-whisper.\"\"\"\n\nfrom importlib.metadata import version\n\n__version__ = version(\"wyoming_faster_whisper\")\n\n__all__ = [\"__version__\"]\n"
  },
  {
    "path": "wyoming_faster_whisper/__main__.py",
    "content": "#!/usr/bin/env python3\nimport argparse\nimport asyncio\nimport logging\nimport platform\nimport re\nfrom functools import partial\nfrom typing import Any, Dict, Optional\n\nimport faster_whisper\nfrom wyoming.info import AsrModel, AsrProgram, Attribution, Info\nfrom wyoming.server import AsyncServer, AsyncTcpServer\n\nfrom . import __version__\nfrom .const import AUTO_LANGUAGE, AUTO_MODEL, PARAKEET_LANGUAGES, SttLibrary\nfrom .dispatch_handler import DispatchEventHandler\nfrom .models import ModelLoader\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def main() -> None:\n    \"\"\"Main entry point.\"\"\"\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\"--uri\", required=True, help=\"unix:// or tcp://\")\n    #\n    parser.add_argument(\n        \"--zeroconf\",\n        nargs=\"?\",\n        const=\"faster-whisper\",\n        help=\"Enable discovery over zeroconf with optional name (default: faster-whisper)\",\n    )\n    #\n    parser.add_argument(\n        \"--model\", default=AUTO_MODEL, help=f\"Name of model to use (or {AUTO_MODEL})\"\n    )\n    parser.add_argument(\n        \"--data-dir\",\n        required=True,\n        action=\"append\",\n        help=\"Data directory to check for downloaded models\",\n    )\n    parser.add_argument(\n        \"--download-dir\",\n        help=\"Directory to download models into (default: first data dir)\",\n    )\n    parser.add_argument(\n        \"--device\",\n        default=\"cpu\",\n        help=\"Device to use for inference (default: cpu)\",\n    )\n    parser.add_argument(\n        \"--language\",\n        default=AUTO_LANGUAGE,\n        help=f\"Default language to set for transcription (default: {AUTO_LANGUAGE})\",\n    )\n    parser.add_argument(\n        \"--compute-type\",\n        default=\"default\",\n        help=\"Compute type (float16, int8, etc.)\",\n    )\n    parser.add_argument(\n        \"--beam-size\",\n        type=int,\n        default=0,\n        help=\"Size of beam during decoding (0 for auto)\",\n    )\n    parser.add_argument(\n        \"--cpu-threads\",\n        default=4,\n        type=int,\n        help=\"Number of CPU threads to use for inference (default: 4, faster-whisper and sherpa-onnx)\",\n    )\n    parser.add_argument(\n        \"--initial-prompt\",\n        help=\"Optional text to provide as a prompt for the first window (faster-whisper only)\",\n    )\n    parser.add_argument(\n        \"--vad-filter\",\n        action=\"store_true\",\n        help=\"Enable Silero VAD to filter out non-speech which can reduce hallucinations (default: false, faster-whisper only)\",\n    )\n    parser.add_argument(\n        \"--vad-threshold\",\n        type=float,\n        default=0.5,\n        help=\"VAD speech probability threshold (default: 0.5, faster-whisper only)\",\n    )\n    parser.add_argument(\n        \"--vad-min-speech-ms\",\n        type=int,\n        default=250,\n        help=\"VAD minimum speech duration in ms (default: 250, faster-whisper only)\",\n    )\n    parser.add_argument(\n        \"--vad-min-silence-ms\",\n        type=int,\n        default=2000,\n        help=\"VAD minimum silence duration in ms to split (default: 2000, faster-whisper only)\",\n    )\n    parser.add_argument(\n        \"--stt-library\",\n        choices=[lib.value for lib in SttLibrary],\n        default=SttLibrary.AUTO,\n        help=\"Set library to use for speech-to-text (may require extra dependencies)\",\n    )\n    parser.add_argument(\n        \"--local-files-only\",\n        action=\"store_true\",\n        help=\"Don't check HuggingFace hub for updates every time\",\n    )\n    #\n    parser.add_argument(\"--debug\", action=\"store_true\", help=\"Log DEBUG messages\")\n    parser.add_argument(\n        \"--log-format\", default=logging.BASIC_FORMAT, help=\"Format for log messages\"\n    )\n    parser.add_argument(\n        \"--version\",\n        action=\"version\",\n        version=__version__,\n        help=\"Print version and exit\",\n    )\n    args = parser.parse_args()\n\n    if not args.download_dir:\n        # Download to first data dir by default\n        args.download_dir = args.data_dir[0]\n\n    logging.basicConfig(\n        level=logging.DEBUG if args.debug else logging.INFO, format=args.log_format\n    )\n    _LOGGER.debug(args)\n\n    args.stt_library = SttLibrary(args.stt_library)\n\n    machine = platform.machine().lower()\n    is_arm = (\"arm\" in machine) or (\"aarch\" in machine)\n\n    if args.beam_size <= 0:\n        args.beam_size = 1 if is_arm else 5\n        _LOGGER.debug(\"Beam size automatically selected: %s\", args.beam_size)\n\n    # Resolve model name\n    model_name = args.model\n    model_match = re.match(r\"^(tiny|base|small|medium)[.-]int8$\", args.model)\n    if model_match:\n        # Original models re-uploaded to huggingface\n        model_size = model_match.group(1)\n        model_name = f\"{model_size}-int8\"\n        args.model = f\"rhasspy/faster-whisper-{model_name}\"\n\n    if args.language == AUTO_LANGUAGE:\n        # Whisper does not understand auto\n        args.language = None\n\n    if args.model == AUTO_MODEL:\n        args.model = None\n\n    wyoming_info = Info(\n        asr=[\n            AsrProgram(\n                name=\"faster-whisper\",\n                description=\"Faster Whisper transcription with CTranslate2\",\n                attribution=Attribution(\n                    name=\"Guillaume Klein\",\n                    url=\"https://github.com/guillaumekln/faster-whisper/\",\n                ),\n                installed=True,\n                version=__version__,\n                models=[\n                    AsrModel(\n                        name=model_name,\n                        description=model_name,\n                        attribution=Attribution(\n                            name=\"Systran\",\n                            url=\"https://huggingface.co/Systran\",\n                        ),\n                        installed=True,\n                        languages=sorted(\n                            list(\n                                # pylint: disable=protected-access\n                                set(faster_whisper.tokenizer._LANGUAGE_CODES).union(\n                                    PARAKEET_LANGUAGES\n                                )\n                            )\n                        ),\n                        version=faster_whisper.__version__,\n                    )\n                ],\n            )\n        ],\n    )\n\n    vad_parameters: Optional[Dict[str, Any]] = None\n    if args.vad_filter:\n        vad_parameters = {\n            \"threshold\": args.vad_threshold,\n            \"min_speech_duration_ms\": args.vad_min_speech_ms,\n            \"min_silence_duration_ms\": args.vad_min_silence_ms,\n        }\n\n    loader = ModelLoader(\n        preferred_stt_library=args.stt_library,\n        preferred_language=args.language,\n        download_dir=args.download_dir,\n        local_files_only=args.local_files_only,\n        model=args.model,\n        compute_type=args.compute_type,\n        device=args.device,\n        beam_size=args.beam_size,\n        cpu_threads=args.cpu_threads,\n        initial_prompt=args.initial_prompt,\n        vad_parameters=vad_parameters,\n    )\n\n    # Load model\n    _LOGGER.debug(\"Pre-loading transcriber\")\n    await loader.load_transcriber()\n\n    server = AsyncServer.from_uri(args.uri)\n\n    if args.zeroconf:\n        if not isinstance(server, AsyncTcpServer):\n            raise ValueError(\"Zeroconf requires tcp:// uri\")\n\n        from wyoming.zeroconf import HomeAssistantZeroconf\n\n        tcp_server: AsyncTcpServer = server\n        hass_zeroconf = HomeAssistantZeroconf(\n            name=args.zeroconf, port=tcp_server.port, host=tcp_server.host\n        )\n        await hass_zeroconf.register_server()\n        _LOGGER.debug(\"Zeroconf discovery enabled\")\n\n    _LOGGER.info(\"Ready\")\n    await server.run(\n        partial(\n            DispatchEventHandler,\n            wyoming_info,\n            loader,\n        )\n    )\n\n\n# -----------------------------------------------------------------------------\n\n\ndef run() -> None:\n    asyncio.run(main())\n\n\nif __name__ == \"__main__\":\n    try:\n        run()\n    except KeyboardInterrupt:\n        pass\n"
  },
  {
    "path": "wyoming_faster_whisper/const.py",
    "content": "\"\"\"Constants.\"\"\"\n\nfrom abc import ABC, abstractmethod\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import Optional, Union\n\n\nclass SttLibrary(str, Enum):\n    \"\"\"Speech-to-text library.\"\"\"\n\n    AUTO = \"auto\"\n    FASTER_WHISPER = \"faster-whisper\"\n    TRANSFORMERS = \"transformers\"\n    SHERPA = \"sherpa\"\n    ONNX_ASR = \"onnx-asr\"\n\n\nAUTO_LANGUAGE = \"auto\"\nAUTO_MODEL = \"auto\"\n\nPARAKEET_LANGUAGES = {\n    \"bg\",\n    \"hr\",\n    \"cs\",\n    \"da\",\n    \"nl\",\n    \"en\",\n    \"et\",\n    \"fi\",\n    \"fr\",\n    \"de\",\n    \"el\",\n    \"hu\",\n    \"it\",\n    \"lv\",\n    \"lt\",\n    \"mt\",\n    \"pl\",\n    \"pt\",\n    \"ro\",\n    \"sk\",\n    \"sl\",\n    \"es\",\n    \"sv\",\n    \"ru\",\n    \"uk\",\n}\n\n\nclass Transcriber(ABC):\n    \"\"\"Base class for transcribers.\"\"\"\n\n    @abstractmethod\n    def transcribe(\n        self,\n        wav_path: Union[str, Path],\n        language: Optional[str],\n        beam_size: int = 5,\n        initial_prompt: Optional[str] = None,\n    ) -> str:\n        pass\n"
  },
  {
    "path": "wyoming_faster_whisper/dispatch_handler.py",
    "content": "\"\"\"Event handler for clients of the server.\"\"\"\n\nimport asyncio\nimport logging\nimport os\nimport tempfile\nimport wave\nfrom typing import Optional\n\nfrom wyoming.asr import Transcribe, Transcript\nfrom wyoming.audio import AudioChunk, AudioChunkConverter, AudioStop\nfrom wyoming.event import Event\nfrom wyoming.info import Describe, Info\nfrom wyoming.server import AsyncEventHandler\n\nfrom .const import Transcriber\nfrom .models import ModelLoader\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass DispatchEventHandler(AsyncEventHandler):\n    \"\"\"Dispatches to appropriate transcriber.\"\"\"\n\n    def __init__(\n        self,\n        wyoming_info: Info,\n        loader: ModelLoader,\n        *args,\n        **kwargs,\n    ) -> None:\n        super().__init__(*args, **kwargs)\n\n        self.wyoming_info_event = wyoming_info.event()\n\n        self._loader = loader\n        self._transcriber: Optional[Transcriber] = None\n        self._transcriber_future: Optional[asyncio.Future] = None\n        self._language: Optional[str] = None\n\n        self._wav_dir = tempfile.TemporaryDirectory()\n        self._wav_path = os.path.join(self._wav_dir.name, \"speech.wav\")\n        self._wav_file: Optional[wave.Wave_write] = None\n\n        self._audio_converter = AudioChunkConverter(rate=16000, width=2, channels=1)\n\n    async def handle_event(self, event: Event) -> bool:\n        if AudioChunk.is_type(event.type):\n            # Audio is saved to a WAV file for transcription later.\n            # None of the underlying models support streaming.\n            chunk = self._audio_converter.convert(AudioChunk.from_event(event))\n\n            if self._wav_file is None:\n                self._wav_file = wave.open(self._wav_path, \"wb\")\n                self._wav_file.setframerate(chunk.rate)\n                self._wav_file.setsampwidth(chunk.width)\n                self._wav_file.setnchannels(chunk.channels)\n\n            self._wav_file.writeframes(chunk.audio)\n\n            if (self._transcriber is None) and (self._transcriber_future is None):\n                # Load the transcriber in the background.\n                # Hopefully it's ready by the time the audio stops.\n                self._transcriber_future = asyncio.create_task(\n                    self._loader.load_transcriber(self._language)\n                )\n\n            return True\n\n        if AudioStop.is_type(event.type):\n            _LOGGER.debug(\"Audio stoppped\")\n\n            if self._transcriber is None:\n                # Get transcriber that was loading in the background\n                assert self._transcriber_future is not None\n                self._transcriber = await self._transcriber_future\n\n            assert self._transcriber is not None\n            assert self._wav_file is not None\n\n            self._wav_file.close()\n            self._wav_file = None\n\n            # Do transcription in a separate thread\n            text = await asyncio.to_thread(\n                self._transcriber.transcribe,\n                self._wav_path,\n                self._language,\n                beam_size=self._loader.beam_size,\n                initial_prompt=self._loader.initial_prompt,\n            )\n\n            _LOGGER.info(text)\n\n            await self.write_event(Transcript(text=text).event())\n            _LOGGER.debug(\"Completed request\")\n\n            # Reset\n            self._language = None\n            self._transcriber = None\n\n            return False\n\n        if Transcribe.is_type(event.type):\n            transcribe = Transcribe.from_event(event)\n            self._language = transcribe.language or self._loader.preferred_language\n            _LOGGER.debug(\"Language set to %s\", self._language)\n\n            return True\n\n        if Describe.is_type(event.type):\n            await self.write_event(self.wyoming_info_event)\n            _LOGGER.debug(\"Sent info\")\n            return True\n\n        return True\n"
  },
  {
    "path": "wyoming_faster_whisper/faster_whisper_handler.py",
    "content": "\"\"\"Event handler for clients of the server.\"\"\"\n\nfrom pathlib import Path\nfrom typing import Any, Dict, Optional, Union\n\nimport faster_whisper\n\nfrom .const import Transcriber\n\n\nclass FasterWhisperTranscriber(Transcriber):\n    \"\"\"Event handler for clients.\"\"\"\n\n    def __init__(\n        self,\n        model_id: str,\n        cache_dir: Union[str, Path],\n        device: str = \"cpu\",\n        compute_type: str = \"default\",\n        cpu_threads: int = 4,\n        vad_parameters: Optional[Dict[str, Any]] = None,\n    ) -> None:\n        self.vad_filter = vad_parameters is not None\n        self.vad_parameters = vad_parameters\n\n        self.model = faster_whisper.WhisperModel(\n            model_id,\n            download_root=str(cache_dir),\n            device=device,\n            compute_type=compute_type,\n            cpu_threads=cpu_threads,\n        )\n\n    def transcribe(\n        self,\n        wav_path: Union[str, Path],\n        language: Optional[str],\n        beam_size: int = 5,\n        initial_prompt: Optional[str] = None,\n    ) -> str:\n        segments, _info = self.model.transcribe(\n            str(wav_path),\n            beam_size=beam_size,\n            language=language,\n            initial_prompt=initial_prompt,\n            vad_filter=self.vad_filter,\n            vad_parameters=self.vad_parameters,\n        )\n\n        text = \" \".join(segment.text for segment in segments)\n        return text\n"
  },
  {
    "path": "wyoming_faster_whisper/models.py",
    "content": "\"\"\"Logic for model selection, loading, and transcription.\"\"\"\n\nimport asyncio\nimport logging\nimport platform\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom typing import Any, Dict, Optional, Tuple, Union\n\nfrom .const import SttLibrary, Transcriber\nfrom .faster_whisper_handler import FasterWhisperTranscriber\n\n_LOGGER = logging.getLogger(__name__)\n\nTRANSCRIBER_KEY = Tuple[SttLibrary, str]  # model id\n\n\nclass ModelLoader:\n    \"\"\"Load transcribers for models.\"\"\"\n\n    def __init__(\n        self,\n        preferred_stt_library: SttLibrary,\n        preferred_language: Optional[str],\n        download_dir: Union[str, Path],\n        local_files_only: bool,\n        model: Optional[str],\n        compute_type: str,\n        device: str,\n        beam_size: int,\n        cpu_threads: int,\n        initial_prompt: Optional[str],\n        vad_parameters: Optional[Dict[str, Any]],\n    ) -> None:\n        self.preferred_stt_library = preferred_stt_library\n        self.preferred_language = preferred_language\n\n        self.download_dir = Path(download_dir)\n        self.local_files_only = local_files_only\n\n        # faster-whisper only\n        self.model = model\n        self.compute_type = compute_type\n        self.device = device\n        self.beam_size = beam_size\n        self.cpu_threads = cpu_threads\n        self.initial_prompt = initial_prompt\n        self.vad_parameters = vad_parameters\n\n        self._transcriber: Dict[TRANSCRIBER_KEY, Transcriber] = {}\n        self._transcriber_lock: Dict[TRANSCRIBER_KEY, asyncio.Lock] = defaultdict(\n            asyncio.Lock\n        )\n\n    async def load_transcriber(self, language: Optional[str] = None) -> Transcriber:\n        \"\"\"Load or get transcriber from cache for a language.\"\"\"\n        language = language or self.preferred_language\n        stt_library = self.preferred_stt_library\n\n        # Check dependencies\n        try:\n            from .sherpa_handler import SherpaTranscriber\n\n            has_sherpa = True\n            _LOGGER.debug(\"Sherpa is available\")\n        except ImportError:\n            has_sherpa = False\n            _LOGGER.debug(\"Sherpa is NOT available\")\n\n        try:\n            from .transformers_whisper import TransformersTranscriber\n\n            has_transformers = True\n            _LOGGER.debug(\"Transformers library is available\")\n        except ImportError:\n            has_transformers = False\n            _LOGGER.debug(\"Transformers library is NOT available\")\n\n        try:\n            from .onnx_asr_handler import OnnxAsrTranscriber\n\n            has_onnx_asr = True\n            _LOGGER.debug(\"Onnx-ASR is available\")\n        except ImportError:\n            has_onnx_asr = False\n            _LOGGER.debug(\"Onnx-ASR is NOT available\")\n\n        # Select speech-to-text library\n        if stt_library == SttLibrary.AUTO:\n            # Default to faster-whisper\n            stt_library = SttLibrary.FASTER_WHISPER\n\n            if self.model is None:  # auto\n                if (language == \"ru\") and has_onnx_asr:\n                    # Prefer GigaAM via onnx-asr\n                    stt_library = SttLibrary.ONNX_ASR\n                elif (language == \"en\") and has_sherpa:\n                    # Prefer Parakeet via sherpa for English.\n                    # The v3 Parakeet model claims to auto detect other\n                    # languages, but it doesn't work.\n                    stt_library = SttLibrary.SHERPA\n        elif (\n            ((stt_library == SttLibrary.TRANSFORMERS) and (not has_transformers))\n            or ((stt_library == SttLibrary.SHERPA) and (not has_sherpa))\n            or ((stt_library == SttLibrary.ONNX_ASR) and (not has_onnx_asr))\n        ):\n            # Fall back to faster-whisper\n            stt_library = SttLibrary.FASTER_WHISPER\n            _LOGGER.debug(\"Falling back to faster-whisper (missing dependencies)\")\n\n        # Select model\n        model = self.model\n        if model is None:  # auto\n            machine = platform.machine().lower()\n            is_arm = (\"arm\" in machine) or (\"aarch\" in machine)\n            model = guess_model(stt_library, language, is_arm)\n\n        _LOGGER.debug(\n            \"Selected stt-library '%s' with model '%s'\", stt_library.value, model\n        )\n\n        # Load transcriber\n        assert stt_library != SttLibrary.AUTO\n        assert model\n\n        key = (stt_library, model)\n\n        async with self._transcriber_lock[key]:\n            transcriber = self._transcriber.get(key)\n            if transcriber is not None:\n                return transcriber\n\n            if stt_library == SttLibrary.SHERPA:\n                from .sherpa_handler import SherpaTranscriber  # noqa: F811\n\n                transcriber = SherpaTranscriber(\n                    model, self.download_dir, cpu_threads=self.cpu_threads\n                )\n            elif stt_library == SttLibrary.ONNX_ASR:\n                from .onnx_asr_handler import OnnxAsrTranscriber  # noqa: F811\n\n                transcriber = OnnxAsrTranscriber(\n                    model,\n                    cache_dir=self.download_dir,\n                    local_files_only=self.local_files_only,\n                )\n            elif stt_library == SttLibrary.TRANSFORMERS:\n                from .transformers_whisper import TransformersTranscriber  # noqa: F811\n\n                transcriber = TransformersTranscriber(\n                    model,\n                    cache_dir=self.download_dir,\n                    local_files_only=self.local_files_only,\n                )\n            else:\n                transcriber = FasterWhisperTranscriber(\n                    model,\n                    cache_dir=self.download_dir,\n                    device=self.device,\n                    compute_type=self.compute_type,\n                    cpu_threads=self.cpu_threads,\n                    vad_parameters=self.vad_parameters,\n                )\n\n            self._transcriber[key] = transcriber\n\n        return transcriber\n\n    async def transcribe(\n        self, wav_path: Union[str, Path], language: Optional[str]\n    ) -> str:\n        \"\"\"Transcribe WAV file using appropriate transcriber.\n\n        Assume WAV file is 16Khz 16-bit mono PCM.\n        \"\"\"\n        transcriber = await self.load_transcriber(language)\n        text = await asyncio.to_thread(\n            transcriber.transcribe,\n            wav_path,\n            language=language,\n            beam_size=self.beam_size,\n            initial_prompt=self.initial_prompt,\n        )\n        _LOGGER.debug(\"Transcribed audio: %s\", text)\n\n        return text\n\n\ndef guess_model(stt_library: SttLibrary, language: Optional[str], is_arm: bool) -> str:\n    \"\"\"Automatically guess STT model id.\"\"\"\n    if stt_library == SttLibrary.SHERPA:\n        if language == \"en\":\n            return \"sherpa-onnx-nemo-parakeet-tdt-0.6b-v2-int8\"\n\n        # Non-English\n        return \"sherpa-onnx-nemo-parakeet-tdt-0.6b-v3-int8\"\n\n    if stt_library == SttLibrary.TRANSFORMERS:\n        if language == \"en\":\n            if is_arm:\n                return \"openai/whisper-tiny.en\"\n\n            return \"openai/whisper-base.en\"\n\n        # Non-English\n        if is_arm:\n            return \"openai/whisper-tiny\"\n\n        return \"openai/whisper-base\"\n\n    if stt_library == SttLibrary.ONNX_ASR:\n        return \"gigaam-v2-rnnt\"\n\n    # faster-whisper\n    if is_arm:\n        return \"rhasspy/faster-whisper-tiny-int8\"\n\n    return \"rhasspy/faster-whisper-base-int8\"\n"
  },
  {
    "path": "wyoming_faster_whisper/onnx_asr_handler.py",
    "content": "\"\"\"Code for transcription using the onnx-asr library.\"\"\"\n\nimport wave\nfrom pathlib import Path\nfrom typing import Optional, Union\nfrom unittest.mock import patch\n\nimport numpy as np\nimport onnx_asr\nfrom huggingface_hub import snapshot_download\n\nfrom .const import Transcriber\n\n_RATE = 16000\n\n\nclass OnnxAsrTranscriber(Transcriber):\n    \"\"\"Wrapper for onnx-asr model.\"\"\"\n\n    def __init__(\n        self, model_id: str, cache_dir: Union[str, Path], local_files_only: bool\n    ) -> None:\n        \"\"\"Initialize model.\"\"\"\n\n        # Force download to our cache dir\n        def snapshot_download_with_cache(*args, **kwargs) -> str:\n            kwargs[\"cache_dir\"] = str(Path(cache_dir).resolve())\n            kwargs[\"local_files_only\"] = local_files_only\n\n            return snapshot_download(*args, **kwargs)\n\n        with patch(\"huggingface_hub.snapshot_download\", snapshot_download_with_cache):\n            self.onnx_model = onnx_asr.load_model(model_id)\n\n    def transcribe(\n        self,\n        wav_path: Union[str, Path],\n        language: Optional[str],\n        beam_size: int = 5,\n        initial_prompt: Optional[str] = None,\n    ) -> str:\n        \"\"\"Returns transcription for WAV file.\n\n        WAV file must be 16Khz 16-bit mono audio.\n        \"\"\"\n        wav_file: wave.Wave_read = wave.open(str(wav_path), \"rb\")\n        with wav_file:\n            assert wav_file.getframerate() == _RATE, \"Sample rate must be 16Khz\"\n            assert wav_file.getsampwidth() == 2, \"Width must be 16-bit (2 bytes)\"\n            assert wav_file.getnchannels() == 1, \"Audio must be mono\"\n            audio_bytes = wav_file.readframes(wav_file.getnframes())\n\n        audio_array = (\n            np.frombuffer(audio_bytes, dtype=np.int16).astype(np.float32) / 32767.0\n        )\n\n        recognize_kwargs = {}\n        if language:\n            recognize_kwargs[\"language\"] = language\n\n        text = self.onnx_model.recognize(  # type: ignore[call-overload]\n            audio_array, sample_rate=_RATE, **recognize_kwargs\n        )\n        return text\n"
  },
  {
    "path": "wyoming_faster_whisper/sherpa_handler.py",
    "content": "\"\"\"Code for transcription using the sherpa-onnx library.\"\"\"\n\nimport logging\nimport shutil\nimport tarfile\nimport urllib.request\nimport wave\nfrom pathlib import Path\nfrom typing import Optional, Union\n\nimport numpy as np\nimport sherpa_onnx as so\n\nfrom .const import Transcriber\n\n_LOGGER = logging.getLogger(__name__)\n\n_RATE = 16000\n_URL_FORMAT = \"https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/{model_id}.tar.bz2\"\n\n\nclass SherpaTranscriber(Transcriber):\n    \"\"\"Wrapper for sherpa-onnx model.\"\"\"\n\n    def __init__(\n        self,\n        model_id: str,\n        cache_dir: Union[str, Path],\n        cpu_threads: int = 4,\n    ) -> None:\n        \"\"\"Initialize model.\"\"\"\n        cache_dir = Path(cache_dir)\n        model_dir = cache_dir / model_id\n        _LOGGER.debug(\"Looking for sherpa model: %s\", model_dir)\n\n        if not model_dir.exists():\n            url = _URL_FORMAT.format(model_id=model_id)\n            _LOGGER.info(\"Downloading %s\", url)\n            cache_dir.mkdir(parents=True, exist_ok=True)\n\n            try:\n                # Download/extract to cache dir.\n                # We assume that the .tar.bz2 contains a directory named after\n                # the model id.\n                with urllib.request.urlopen(url) as response:\n                    with tarfile.open(fileobj=response, mode=\"r|bz2\") as tar:\n                        for member in tar:\n                            tar.extract(member, path=cache_dir)\n            except Exception:\n                # Delete directory so we'll download again next time\n                shutil.rmtree(model_dir, ignore_errors=True)\n                raise\n\n        # Load model\n        self.recognizer = so.OfflineRecognizer.from_transducer(\n            num_threads=cpu_threads,\n            encoder=f\"{model_dir}/encoder.int8.onnx\",\n            decoder=f\"{model_dir}/decoder.int8.onnx\",\n            joiner=f\"{model_dir}/joiner.int8.onnx\",\n            tokens=f\"{model_dir}/tokens.txt\",\n            provider=\"cpu\",\n            model_type=\"nemo_transducer\",\n        )\n\n        # Prime model so that the first transcription will be fast\n        stream = self.recognizer.create_stream()\n        stream.accept_waveform(_RATE, np.zeros(shape=(128), dtype=np.float32))\n        self.recognizer.decode_stream(stream)\n\n    def transcribe(\n        self,\n        wav_path: Union[str, Path],\n        language: Optional[str],\n        beam_size: int = 5,\n        initial_prompt: Optional[str] = None,\n    ) -> str:\n        \"\"\"Returns transcription for WAV file.\n\n        WAV file must be 16Khz 16-bit mono audio.\n        \"\"\"\n        wav_file: wave.Wave_read = wave.open(str(wav_path), \"rb\")\n        with wav_file:\n            assert wav_file.getframerate() == _RATE, \"Sample rate must be 16Khz\"\n            assert wav_file.getsampwidth() == 2, \"Width must be 16-bit (2 bytes)\"\n            assert wav_file.getnchannels() == 1, \"Audio must be mono\"\n            audio_bytes = wav_file.readframes(wav_file.getnframes())\n\n        audio_array = (\n            np.frombuffer(audio_bytes, dtype=np.int16).astype(np.float32) / 32767.0\n        )\n        stream = self.recognizer.create_stream()\n        stream.accept_waveform(_RATE, audio_array)\n        self.recognizer.decode_stream(stream)\n        return stream.result.text\n"
  },
  {
    "path": "wyoming_faster_whisper/transformers_whisper.py",
    "content": "\"\"\"Code for Whisper transcription using HuggingFace's transformers library.\"\"\"\n\nimport wave\nfrom pathlib import Path\nfrom typing import Optional, Union\n\nimport torch\nfrom transformers import AutoModelForSpeechSeq2Seq, AutoProcessor\n\nfrom .const import Transcriber\n\n_RATE = 16000\n\n\nclass TransformersTranscriber(Transcriber):\n    \"\"\"Wrapper for HuggingFace transformers Whisper model.\"\"\"\n\n    def __init__(\n        self,\n        model_id: str,\n        cache_dir: Optional[Union[str, Path]] = None,\n        local_files_only: bool = False,\n    ) -> None:\n        \"\"\"Initialize Whisper model.\"\"\"\n        self.processor = AutoProcessor.from_pretrained(\n            model_id, cache_dir=cache_dir, local_files_only=local_files_only\n        )\n        self.model = AutoModelForSpeechSeq2Seq.from_pretrained(\n            model_id, cache_dir=cache_dir, local_files_only=local_files_only\n        )\n        self.model.eval()\n\n    def transcribe(\n        self,\n        wav_path: Union[str, Path],\n        language: Optional[str],\n        beam_size: int = 5,\n        initial_prompt: Optional[str] = None,\n    ) -> str:\n        \"\"\"Returns transcription for WAV file.\n\n        WAV file must be 16Khz 16-bit mono audio.\n        \"\"\"\n        wav_file: wave.Wave_read = wave.open(str(wav_path), \"rb\")\n        with wav_file:\n            assert wav_file.getframerate() == _RATE, \"Sample rate must be 16Khz\"\n            assert wav_file.getsampwidth() == 2, \"Width must be 16-bit (2 bytes)\"\n            assert wav_file.getnchannels() == 1, \"Audio must be mono\"\n            audio_bytes = wav_file.readframes(wav_file.getnframes())\n\n        audio_tensor = (\n            torch.frombuffer(audio_bytes, dtype=torch.int16).float() / 32768.0\n        )\n\n        inputs = self.processor(audio_tensor, sampling_rate=_RATE, return_tensors=\"pt\")\n        generate_args = {**inputs, \"num_beams\": beam_size}\n\n        if initial_prompt:\n            prompt_ids = (\n                self.processor.tokenizer(\n                    initial_prompt, return_tensors=\"pt\", add_special_tokens=False\n                )\n                .input_ids[0]\n                .to(self.model.device)\n            )\n            generate_args[\"prompt_ids\"] = prompt_ids\n\n        if language:\n            self.processor.tokenizer.set_prefix_tokens(\n                language=language, task=\"transcribe\"\n            )\n\n        with torch.no_grad():\n            # Ignore warning about attention_mask because we're only doing a single utterance.\n            generated_ids = self.model.generate(**generate_args)\n            transcription = self.processor.batch_decode(\n                generated_ids, skip_special_tokens=True\n            )[0]\n\n        return transcription\n"
  }
]