Full Code of rhasspy/rhasspy3 for AI

master 11e8d3016d32 cached
258 files
454.7 KB
113.5k tokens
315 symbols
1 requests
Download .txt
Showing preview only (514K chars total). Download the full file or copy to clipboard to get everything.
Repository: rhasspy/rhasspy3
Branch: master
Commit: 11e8d3016d32
Files: 258
Total size: 454.7 KB

Directory structure:
gitextract_7danshym/

├── .gitignore
├── .gitmodules
├── .isort.cfg
├── LICENSE.md
├── README.md
├── bin/
│   ├── asr_adapter_raw2text.py
│   ├── asr_adapter_wav2text.py
│   ├── asr_transcribe.py
│   ├── asr_transcribe_stream.py
│   ├── asr_transcribe_wav.py
│   ├── client_unix_socket.py
│   ├── config_print.py
│   ├── handle_adapter_json.py
│   ├── handle_adapter_text.py
│   ├── handle_intent.py
│   ├── handle_text.py
│   ├── intent_recognize.py
│   ├── mic_adapter_raw.py
│   ├── mic_record_sample.py
│   ├── mic_test_energy.py
│   ├── pipeline_run.py
│   ├── program_download.py
│   ├── program_install.py
│   ├── satellite_run.py
│   ├── server_run.py
│   ├── snd_adapter_raw.py
│   ├── snd_play.py
│   ├── tts_adapter_http.py
│   ├── tts_adapter_text2wav.py
│   ├── tts_speak.py
│   ├── tts_synthesize.py
│   ├── vad_adapter_raw.py
│   ├── vad_segment_wav.py
│   ├── wake_adapter_raw.py
│   └── wake_detect.py
├── docs/
│   ├── README.md
│   ├── adapters.md
│   ├── domains.md
│   ├── home_assistant.md
│   ├── satellite.md
│   ├── tutorial.md
│   └── wyoming.md
├── examples/
│   └── satellite/
│       └── configuration.yaml
├── mypy.ini
├── programs/
│   ├── asr/
│   │   ├── coqui-stt/
│   │   │   ├── README.md
│   │   │   ├── bin/
│   │   │   │   ├── coqui_stt_raw2text.py
│   │   │   │   ├── coqui_stt_server.py
│   │   │   │   └── coqui_stt_wav2text.py
│   │   │   ├── requirements.txt
│   │   │   └── script/
│   │   │       ├── download.py
│   │   │       ├── raw2text
│   │   │       ├── server
│   │   │       ├── setup
│   │   │       └── wav2text
│   │   ├── faster-whisper/
│   │   │   ├── README.md
│   │   │   ├── bin/
│   │   │   │   ├── faster_whisper_server.py
│   │   │   │   └── faster_whisper_wav2text.py
│   │   │   ├── script/
│   │   │   │   ├── download.py
│   │   │   │   ├── server
│   │   │   │   ├── setup
│   │   │   │   └── wav2text
│   │   │   └── src/
│   │   │       ├── LICENSE
│   │   │       ├── README.md
│   │   │       ├── faster_whisper/
│   │   │       │   ├── __init__.py
│   │   │       │   ├── audio.py
│   │   │       │   ├── feature_extractor.py
│   │   │       │   └── transcribe.py
│   │   │       ├── requirements.conversion.txt
│   │   │       ├── requirements.txt
│   │   │       └── setup.py
│   │   ├── pocketsphinx/
│   │   │   ├── README.md
│   │   │   ├── bin/
│   │   │   │   ├── pocketsphinx_raw2text.py
│   │   │   │   ├── pocketsphinx_server.py
│   │   │   │   └── pocketsphinx_wav2text.py
│   │   │   ├── requirements.txt
│   │   │   └── script/
│   │   │       ├── download.py
│   │   │       ├── raw2text
│   │   │       ├── server
│   │   │       ├── setup
│   │   │       └── wav2text
│   │   ├── vosk/
│   │   │   ├── README.md
│   │   │   ├── bin/
│   │   │   │   ├── vosk_raw2text.py
│   │   │   │   ├── vosk_server.py
│   │   │   │   └── vosk_wav2text.py
│   │   │   ├── requirements.txt
│   │   │   └── script/
│   │   │       ├── download.py
│   │   │       ├── raw2text
│   │   │       ├── server
│   │   │       ├── setup
│   │   │       └── wav2text
│   │   ├── whisper/
│   │   │   ├── README.md
│   │   │   ├── bin/
│   │   │   │   ├── whisper_server.py
│   │   │   │   └── whisper_wav2text.py
│   │   │   ├── requirements.txt
│   │   │   └── script/
│   │   │       ├── server
│   │   │       ├── setup
│   │   │       └── wav2text
│   │   └── whisper-cpp/
│   │       ├── .gitignore
│   │       ├── Dockerfile.libwhisper
│   │       ├── Dockerfile.libwhisper.dockerignore
│   │       ├── README.md
│   │       ├── bin/
│   │       │   ├── whisper_cpp_server.py
│   │       │   └── whisper_cpp_wav2text.py
│   │       ├── lib/
│   │       │   ├── Makefile
│   │       │   └── whisper_cpp.py
│   │       ├── requirements.txt
│   │       └── script/
│   │           ├── build_libwhisper
│   │           ├── download.py
│   │           ├── server
│   │           ├── setup
│   │           ├── setup.py
│   │           └── wav2text
│   ├── handle/
│   │   ├── date_time/
│   │   │   └── bin/
│   │   │       └── date_time.py
│   │   └── home_assistant/
│   │       └── bin/
│   │           └── converse.py
│   ├── intent/
│   │   └── regex/
│   │       └── bin/
│   │           └── regex.py
│   ├── mic/
│   │   ├── pyaudio/
│   │   │   ├── README.md
│   │   │   ├── bin/
│   │   │   │   ├── pyaudio_events.py
│   │   │   │   ├── pyaudio_list_mics.py
│   │   │   │   ├── pyaudio_raw.py
│   │   │   │   └── pyaudio_shared.py
│   │   │   ├── requirements.txt
│   │   │   └── script/
│   │   │       ├── events
│   │   │       ├── list_mics
│   │   │       ├── raw
│   │   │       └── setup
│   │   ├── sounddevice/
│   │   │   ├── README.md
│   │   │   ├── bin/
│   │   │   │   ├── sounddevice_events.py
│   │   │   │   ├── sounddevice_list_mics.py
│   │   │   │   ├── sounddevice_raw.py
│   │   │   │   └── sounddevice_shared.py
│   │   │   ├── requirements.txt
│   │   │   └── script/
│   │   │       ├── events
│   │   │       ├── list_mics
│   │   │       ├── raw
│   │   │       └── setup
│   │   └── udp_raw/
│   │       └── bin/
│   │           └── udp_raw.py
│   ├── remote/
│   │   └── websocket/
│   │       ├── bin/
│   │       │   └── stream2stream.py
│   │       ├── requirements.txt
│   │       └── script/
│   │           ├── run
│   │           └── setup
│   ├── snd/
│   │   └── udp_raw/
│   │       └── bin/
│   │           └── udp_raw.py
│   ├── tts/
│   │   ├── coqui-tts/
│   │   │   ├── README.md
│   │   │   ├── requirements.txt
│   │   │   └── script/
│   │   │       ├── list_models
│   │   │       ├── server
│   │   │       └── setup
│   │   ├── flite/
│   │   │   └── script/
│   │   │       ├── download.py
│   │   │       └── setup
│   │   ├── larynx/
│   │   │   ├── README.md
│   │   │   ├── bin/
│   │   │   │   └── larynx_client.py
│   │   │   ├── requirements.txt
│   │   │   └── script/
│   │   │       ├── list_models
│   │   │       ├── server
│   │   │       └── setup
│   │   ├── marytts/
│   │   │   └── bin/
│   │   │       └── marytts.py
│   │   ├── mimic3/
│   │   │   ├── README.md
│   │   │   ├── bin/
│   │   │   │   └── mimic3_server.py
│   │   │   ├── requirements.txt
│   │   │   └── script/
│   │   │       ├── server
│   │   │       └── setup
│   │   └── piper/
│   │       ├── README.md
│   │       ├── bin/
│   │       │   └── piper_server.py
│   │       └── script/
│   │           ├── download.py
│   │           ├── server
│   │           └── setup.py
│   ├── vad/
│   │   ├── energy/
│   │   │   └── bin/
│   │   │       └── energy_speech_prob.py
│   │   ├── silero/
│   │   │   ├── README.md
│   │   │   ├── bin/
│   │   │   │   └── silero_speech_prob.py
│   │   │   ├── requirements.txt
│   │   │   ├── script/
│   │   │   │   ├── setup
│   │   │   │   └── speech_prob
│   │   │   └── share/
│   │   │       └── silero_vad.onnx
│   │   └── webrtcvad/
│   │       ├── README.md
│   │       ├── bin/
│   │       │   └── webrtcvad_speech_prob.py
│   │       ├── requirements.txt
│   │       └── script/
│   │           ├── setup
│   │           └── speech_prob
│   └── wake/
│       ├── porcupine1/
│       │   ├── bin/
│       │   │   ├── list_models.py
│       │   │   ├── porcupine_raw_text.py
│       │   │   ├── porcupine_shared.py
│       │   │   └── porcupine_stream.py
│       │   ├── requirements.txt
│       │   └── script/
│       │       ├── download.py
│       │       ├── list_models
│       │       ├── raw2text
│       │       └── setup
│       ├── precise-lite/
│       │   ├── bin/
│       │   │   └── precise.py
│       │   ├── requirements.txt
│       │   ├── script/
│       │   │   └── setup
│       │   └── share/
│       │       └── hey_mycroft.tflite
│       └── snowboy/
│           ├── bin/
│           │   └── snowboy_raw_text.py
│           ├── requirements.txt
│           ├── script/
│           │   └── setup
│           └── share/
│               ├── hey_extreme.umdl
│               ├── jarvis.umdl
│               ├── neoya.umdl
│               ├── smart_mirror.umdl
│               ├── snowboy.umdl
│               ├── subex.umdl
│               └── view_glass.umdl
├── pylintrc
├── requirements_dev.txt
├── requirements_http_api.txt
├── rhasspy3/
│   ├── VERSION
│   ├── __init__.py
│   ├── asr.py
│   ├── audio.py
│   ├── config.py
│   ├── configuration.yaml
│   ├── core.py
│   ├── event.py
│   ├── handle.py
│   ├── intent.py
│   ├── mic.py
│   ├── pipeline.py
│   ├── program.py
│   ├── py.typed
│   ├── remote.py
│   ├── snd.py
│   ├── tts.py
│   ├── util/
│   │   ├── __init__.py
│   │   ├── dataclasses_json.py
│   │   └── jaml.py
│   ├── vad.py
│   └── wake.py
├── rhasspy3_http_api/
│   ├── __init__.py
│   ├── __main__.py
│   ├── asr.py
│   ├── css/
│   │   └── main.css
│   ├── handle.py
│   ├── intent.py
│   ├── js/
│   │   ├── main.js
│   │   └── recorder.worklet.js
│   ├── pipeline.py
│   ├── snd.py
│   ├── templates/
│   │   ├── asr.html
│   │   ├── index.html
│   │   ├── layout.html
│   │   ├── pipeline.html
│   │   ├── satellite.html
│   │   └── tts.html
│   ├── tts.py
│   └── wake.py
├── script/
│   ├── format
│   ├── http_server
│   ├── lint
│   ├── run
│   ├── setup
│   ├── setup_http_server
│   └── test
├── setup.cfg
├── setup.py
├── tests/
│   ├── test_dataclasses_json.py
│   └── test_jaml.py
└── tools/
    └── websocket-client/
        ├── bin/
        │   └── websocket_client.py
        ├── requirements.txt
        └── script/
            ├── run
            └── setup

================================================
FILE CONTENTS
================================================

================================================
FILE: .gitignore
================================================
.DS_Store
.idea
*.log
tmp/

*.py[cod]
*.egg
/build
htmlcov

.projectile
.venv/
venv/
.mypy_cache/
*.egg-info/

/local/


================================================
FILE: .gitmodules
================================================
[submodule "programs/asr/whisper.cpp/build/whisper.cpp"]
	path = programs/asr/whisper.cpp/build/whisper.cpp
	url = https://github.com/ggerganov/whisper.cpp


================================================
FILE: .isort.cfg
================================================
[settings]
multi_line_output=3
include_trailing_comma=True
force_grid_wrap=0
use_parentheses=True
line_length=88


================================================
FILE: LICENSE.md
================================================
MIT License

Copyright (c) 2022 Michael Hansen

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.


================================================
FILE: README.md
================================================
![Rhasspy 3](img/banner.png)

**NOTE: This is a very early developer preview!**

An open source toolkit for building voice assistants.

![Voice assistant pipeline](img/pipeline.png)

Rhasspy focuses on:

* Privacy - no data leaves your computer unless you want it to
* Broad language support - more than just English
* Customization - everything can be changed

## Getting Started

* Check out the [tutorial](docs/tutorial.md)
* Connect Rhasspy to [Home Assistant](docs/home_assistant.md)
   * Install the [Rhasspy 3 add-on](https://github.com/rhasspy/hassio-addons)
* Run one or more [satellites](docs/satellite.md)
* Join [the community](https://community.rhasspy.org/)


## Missing Pieces

This is a developer preview, so there are lots of things missing:

* A user friendly web UI
* An automated method for installing programs/services and downloading models
* Support for custom speech to text grammars
* Intent systems besides Home Assistant
* The ability to accumulate context within a pipeline


## Core Concepts

### Domains

Rhasspy is organized by [domain](docs/domains.md):

* mic - audio input
* wake - wake word detection
* asr - speech to text
* vad - voice activity detection
* intent - intent recognition from text
* handle - intent or text input handling
* tts - text to speech
* snd - audio output


### Programs

Rhasspy talks to external programs using the [Wyoming protocol](docs/wyoming.md). You can add your own programs by implementing the protocol or using an [adapter](#adapters).


### Adapters

[Small scripts](docs/adapters.md) that live in `bin/` and bridge existing programs into the [Wyoming protocol](docs/wyoming.md).

For example, a speech to text program (`asr`) that accepts a WAV file and outputs text can use `asr_adapter_wav2text.py`


### Pipelines

Complete voice loop from microphone input (mic) to speaker output (snd). Stages are:

1. detect (optional)
    * Wait until wake word is detected in mic
2. transcribe
    * Listen until vad detects silence, then convert audio to text
3. recognize (optional)
    * Recognize an intent from text
4. handle
    * Handle an intent or text, producing a text response
5. speak
    * Convert handle output text to speech, and speak through snd

### Servers

Some programs take a while to load, so it's best to leave them running as a server. Use `bin/server_run.py` or add `--server <domain> <name>` when running the HTTP server.

See `servers` section of `configuration.yaml` file.

---


## Supported Programs

* mic
    * [arecord](https://alsa-project.org/wiki/Main_Page)
    * [gstreamer_udp](https://gstreamer.freedesktop.org/)
    * [sounddevice](https://python-sounddevice.readthedocs.io)
    * [pyaudio](https://people.csail.mit.edu/hubert/pyaudio/docs/)
* wake 
    * [porcupine1](https://github.com/Picovoice/porcupine)
    * [precise-lite](https://github.com/mycroftAI/mycroft-precise)
    * [snowboy](https://github.com/Kitt-AI/snowboy)
* vad
    * [silero](https://github.com/snakers4/silero-vad)
    * [webrtcvad](https://pypi.org/project/webrtcvad/)
* asr 
    * [whisper](https://github.com/openai/whisper)
    * [whisper-cpp](https://github.com/ggerganov/whisper.cpp/)
    * [faster-whisper](https://github.com/guillaumekln/faster-whisper/)
    * [vosk](https://alphacephei.com/vosk/)
    * [coqui-stt](https://stt.readthedocs.io)
    * [pocketsphinx](https://github.com/cmusphinx/pocketsphinx)
* handle
    * [home_assistant_conversation](https://www.home-assistant.io/docs/assist)
* tts 
    * [piper](https://github.com/rhasspy/piper/)
    * [mimic3](https://github.com/mycroftAI/mimic3)
    * [larynx](https://github.com/rhasspy/larynx/)
    * [coqui-tts](https://tts.readthedocs.io)
    * [marytts](http://mary.dfki.de/)
    * [flite](http://www.festvox.org/flite/)
    * [festival](http://www.cstr.ed.ac.uk/projects/festival/)
    * [espeak-ng](https://github.com/espeak-ng/espeak-ng/)
* snd
    * [aplay](https://alsa-project.org/wiki/Main_Page)
    * [gstreamer_udp](https://gstreamer.freedesktop.org/)
    
    
---


## HTTP API

`http://localhost:13331/<endpoint>`

Unless overridden, the pipeline named "default" is used.

* `/pipeline/run`
    * Runs a full pipeline from mic to snd
    * Produces JSON
    * Override `pipeline` or:
        * `wake_program`
        * `asr_program`
        * `intent_program`
        * `handle_program`
        * `tts_program`
        * `snd_program`
    * Skip stages with `start_after`
        * `wake` - skip detection, body is detection name (text)
        * `asr` - skip recording, body is transcript (text) or WAV audio
        * `intent` - skip recognition, body is intent/not-recognized event (JSON)
        * `handle` - skip handling, body is handle/not-handled event (JSON)
        * `tts` - skip synthesis, body is WAV audio
    * Stop early with `stop_after`
        * `wake` - only detection
        * `asr` - detection and transcription
        * `intent` - detection, transcription, recognition
        * `handle` - detection, transcription, recognition, handling
        * `tts` - detection, transcription, recognition, handling, synthesis
* `/wake/detect`
    * Detect wake word in WAV input
    * Produces JSON
    * Override `wake_program` or `pipeline`
* `/asr/transcribe`
    * Transcribe audio from WAV input
    * Produces JSON
    * Override `asr_program` or `pipeline`
* `/intent/recognize`
    * Recognizes intent from text body (POST) or `text` (GET)
    * Produces JSON
    * Override `intent_program` or `pipeline`
* `/handle/handle`
    * Handles intent/text from body (POST) or `input` (GET)
    * `Content-Type` must be `application/json` for intent input
    * Override `handle_program` or `pipeline`
* `/tts/synthesize`
    * Synthesizes audio from text body (POST) or `text` (GET)
    * Produces WAV audio
    * Override `tts_program` or `pipeline`
* `/tts/speak`
    * Plays audio from text body (POST)  or `text` (GET)
    * Produces JSON
    * Override `tts_program`, `snd_program`, or `pipeline`
* `/snd/play`
    * Plays WAV audio via snd
    * Override `snd_program` or `pipeline`
* `/config`
    * Returns JSON config
* `/version`
    * Returns version info


## WebSocket API

`ws://localhost:13331/<endpoint>`

Audio streams are raw PCM in binary messages.

Use the `rate`, `width`, and `channels` parameters for sample rate (hertz), width (bytes), and channel count. By default, input audio is 16Khz 16-bit mono, and output audio is 22Khz 16-bit mono.

The client can "end" the audio stream by sending an empty binary message.

* `/pipeline/asr-tts`
    * Run pipeline from asr (stream in) to tts (stream out)
    * Produces JSON messages as events happen
    * Override `pipeline` or:
        * `asr_program`
        * `vad_program`
        * `handle_program`
        * `tts_program`
    * Use `in_rate`, `in_width`, `in_channels` for audio input format
    * Use `out_rate`, `out_width`, `out_channels` for audio output format
* `/wake/detect`
    * Detect wake word from websocket audio stream
    * Produces a JSON message when audio stream ends
    * Override `wake_program` or `pipeline`
* `/asr/transcribe`
    * Transcribe a websocket audio stream
    * Produces a JSON message when audio stream ends
    * Override `asr_program` or `pipeline`
* `/snd/play`
    * Play a websocket audio stream
    * Produces a JSON message when audio stream ends
    * Override `snd_program` or `pipeline`


================================================
FILE: bin/asr_adapter_raw2text.py
================================================
#!/usr/bin/env python3
import argparse
import logging
import shlex
import subprocess
from pathlib import Path

from rhasspy3.asr import Transcript
from rhasspy3.audio import AudioChunk, AudioChunkConverter, AudioStop
from rhasspy3.event import read_event, write_event

_FILE = Path(__file__)
_DIR = _FILE.parent
_LOGGER = logging.getLogger(_FILE.stem)


def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "command",
        help="Command to run",
    )
    parser.add_argument("--shell", action="store_true")
    #
    parser.add_argument(
        "--rate",
        type=int,
        help="Sample rate (hz)",
    )
    parser.add_argument(
        "--width",
        type=int,
        help="Sample width bytes",
    )
    parser.add_argument(
        "--channels",
        type=int,
        help="Sample channel count",
    )
    parser.add_argument(
        "--debug", action="store_true", help="Print DEBUG messages to console"
    )
    args = parser.parse_args()
    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    if args.shell:
        command = args.command
    else:
        command = shlex.split(args.command)

    proc = subprocess.Popen(
        command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=args.shell
    )
    text = ""
    converter = AudioChunkConverter(args.rate, args.width, args.channels)

    with proc:
        assert proc.stdin is not None
        assert proc.stdout is not None

        while True:
            event = read_event()
            if event is None:
                break

            if AudioChunk.is_type(event.type):
                chunk = AudioChunk.from_event(event)
                chunk = converter.convert(chunk)
                proc.stdin.write(chunk.audio)
                proc.stdin.flush()
            elif AudioStop.is_type(event.type):
                break

        stdout, _stderr = proc.communicate()
        text = stdout.decode()

    write_event(Transcript(text=text.strip()).event())


if __name__ == "__main__":
    main()


================================================
FILE: bin/asr_adapter_wav2text.py
================================================
#!/usr/bin/env python3
import argparse
import logging
import shlex
import subprocess
import tempfile
import wave
from pathlib import Path

from rhasspy3.asr import Transcript
from rhasspy3.audio import AudioChunk, AudioStop
from rhasspy3.event import read_event, write_event

_FILE = Path(__file__)
_DIR = _FILE.parent
_LOGGER = logging.getLogger(_FILE.stem)


def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "command",
        help="Command to run",
    )
    parser.add_argument("--shell", action="store_true")
    parser.add_argument(
        "--debug", action="store_true", help="Print DEBUG messages to console"
    )
    args = parser.parse_args()
    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    with tempfile.NamedTemporaryFile(mode="wb+", suffix=".wav") as wav_io:
        args.command = args.command.format(wav_file=wav_io.name)
        if args.shell:
            command = args.command
        else:
            command = shlex.split(args.command)

        wav_params_set = False
        wav_file: wave.Wave_write = wave.open(wav_io, "wb")
        try:
            with wav_file:
                while True:
                    event = read_event()
                    if event is None:
                        break

                    if AudioChunk.is_type(event.type):
                        chunk = AudioChunk.from_event(event)
                        if not wav_params_set:
                            wav_file.setframerate(chunk.rate)
                            wav_file.setsampwidth(chunk.width)
                            wav_file.setnchannels(chunk.channels)
                            wav_params_set = True

                        wav_file.writeframes(chunk.audio)
                    elif AudioStop.is_type(event.type):
                        break

            wav_io.seek(0)
            text = subprocess.check_output(command, shell=args.shell).decode()
            write_event(Transcript(text=text.strip()).event())
        except wave.Error:
            pass


if __name__ == "__main__":
    main()


================================================
FILE: bin/asr_transcribe.py
================================================
#!/usr/bin/env python3
"""Transcribes mic audio into text."""
import argparse
import asyncio
import json
import logging
import sys
from pathlib import Path

from rhasspy3.asr import DOMAIN, Transcript
from rhasspy3.core import Rhasspy
from rhasspy3.event import async_read_event
from rhasspy3.mic import DOMAIN as MIC_DOMAIN
from rhasspy3.program import create_process
from rhasspy3.vad import segment

_FILE = Path(__file__)
_DIR = _FILE.parent
_LOGGER = logging.getLogger(_FILE.stem)


async def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-c",
        "--config",
        default=_DIR.parent / "config",
        help="Configuration directory",
    )
    parser.add_argument(
        "-p", "--pipeline", default="default", help="Name of pipeline to use"
    )
    parser.add_argument(
        "--mic-program", help="Name of mic program to use (overrides pipeline)"
    )
    parser.add_argument(
        "--asr-program", help="Name of asr program to use (overrides pipeline)"
    )
    parser.add_argument(
        "--vad-program", help="Name of vad program to use (overrides pipeline)"
    )
    #
    parser.add_argument(
        "--debug", action="store_true", help="Print DEBUG messages to console"
    )
    args = parser.parse_args()
    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    rhasspy = Rhasspy.load(args.config)
    mic_program = args.mic_program
    asr_program = args.asr_program
    vad_program = args.vad_program
    pipeline = rhasspy.config.pipelines.get(args.pipeline)

    if not mic_program:
        assert pipeline is not None, f"No pipeline named {args.pipeline}"
        mic_program = pipeline.mic

    assert mic_program, "No mic program"

    if not asr_program:
        assert pipeline is not None, f"No pipeline named {args.pipeline}"
        asr_program = pipeline.asr

    assert asr_program, "No asr program"

    if not vad_program:
        assert pipeline is not None, f"No pipeline named {args.pipeline}"
        vad_program = pipeline.vad

    assert vad_program, "No vad program"

    # Transcribe voice command
    async with (await create_process(rhasspy, MIC_DOMAIN, mic_program)) as mic_proc, (
        await create_process(rhasspy, DOMAIN, asr_program)
    ) as asr_proc:
        assert mic_proc.stdout is not None
        assert asr_proc.stdin is not None
        assert asr_proc.stdout is not None

        _LOGGER.info("Ready")
        await segment(rhasspy, vad_program, mic_proc.stdout, asr_proc.stdin)

        # Read transcript
        _LOGGER.debug("Waiting for transcript")
        transcript = Transcript(text="")
        while True:
            event = await async_read_event(asr_proc.stdout)
            if event is None:
                break

            if Transcript.is_type(event.type):
                transcript = Transcript.from_event(event)
                break

        json.dump(transcript.event().to_dict(), sys.stdout, ensure_ascii=False)
        print("", flush=True)


if __name__ == "__main__":
    asyncio.run(main())


================================================
FILE: bin/asr_transcribe_stream.py
================================================
#!/usr/bin/env python3
"""Transcribes raw audio from stdin into text."""
import argparse
import asyncio
import json
import logging
import sys
from pathlib import Path

from rhasspy3.asr import DOMAIN, Transcript
from rhasspy3.audio import (
    DEFAULT_IN_CHANNELS,
    DEFAULT_IN_RATE,
    DEFAULT_IN_WIDTH,
    DEFAULT_SAMPLES_PER_CHUNK,
    AudioChunk,
    AudioChunkConverter,
    AudioStart,
    AudioStop,
)
from rhasspy3.core import Rhasspy
from rhasspy3.event import async_read_event, async_write_event
from rhasspy3.program import create_process

_FILE = Path(__file__)
_DIR = _FILE.parent
_LOGGER = logging.getLogger(_FILE.stem)


async def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-c",
        "--config",
        default=_DIR.parent / "config",
        help="Configuration directory",
    )
    parser.add_argument(
        "-p", "--pipeline", default="default", help="Name of pipeline to use"
    )
    parser.add_argument(
        "--asr-program", help="Name of asr program to use (overrides pipeline)"
    )
    #
    parser.add_argument(
        "--mic-rate",
        type=int,
        default=DEFAULT_IN_RATE,
        help="Input sample rate (hertz)",
    )
    parser.add_argument(
        "--mic-width",
        type=int,
        default=DEFAULT_IN_WIDTH,
        help="Input sample width (bytes)",
    )
    parser.add_argument(
        "--mic-channels",
        type=int,
        default=DEFAULT_IN_CHANNELS,
        help="Input sample channel count",
    )
    #
    parser.add_argument(
        "--asr-rate", type=int, default=DEFAULT_IN_RATE, help="asr sample rate (hertz)"
    )
    parser.add_argument(
        "--asr-width",
        type=int,
        default=DEFAULT_IN_WIDTH,
        help="asr sample width (bytes)",
    )
    parser.add_argument(
        "--asr-channels",
        type=int,
        default=DEFAULT_IN_CHANNELS,
        help="asr sample channel count",
    )
    parser.add_argument(
        "--samples-per-chunk",
        type=int,
        default=DEFAULT_SAMPLES_PER_CHUNK,
        help="Samples to process per chunk",
    )
    #
    parser.add_argument(
        "--debug", action="store_true", help="Print DEBUG messages to console"
    )
    args = parser.parse_args()
    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    rhasspy = Rhasspy.load(args.config)
    asr_program = args.asr_program
    pipeline = rhasspy.config.pipelines.get(args.pipeline)

    if not asr_program:
        assert pipeline is not None, f"No pipeline named {args.pipeline}"
        asr_program = pipeline.asr

    assert asr_program, "No asr program"
    _LOGGER.debug("asr program: %s", asr_program)

    # Transcribe raw audio from stdin
    converter = AudioChunkConverter(args.asr_rate, args.asr_width, args.asr_channels)
    bytes_per_chunk = args.samples_per_chunk * args.mic_width * args.mic_channels
    timestamp = 0

    async with (await create_process(rhasspy, DOMAIN, asr_program)) as asr_proc:
        assert asr_proc.stdin is not None
        assert asr_proc.stdout is not None
        _LOGGER.debug("Started %s", asr_program)

        await async_write_event(
            AudioStart(
                args.asr_rate, args.asr_width, args.asr_channels, timestamp=timestamp
            ).event(),
            asr_proc.stdin,
        )

        audio_bytes = sys.stdin.buffer.read(bytes_per_chunk)
        while audio_bytes:
            chunk = AudioChunk(
                args.mic_rate,
                args.mic_width,
                args.mic_channels,
                audio_bytes,
                timestamp=timestamp,
            )
            timestamp += chunk.milliseconds
            chunk = converter.convert(chunk)

            # Write audio
            await async_write_event(
                chunk.event(),
                asr_proc.stdin,
            )
            audio_bytes = sys.stdin.buffer.read(bytes_per_chunk)

        await async_write_event(AudioStop(timestamp=timestamp).event(), asr_proc.stdin)

        # Read transcript
        transcript = Transcript(text="")
        while True:
            event = await async_read_event(asr_proc.stdout)
            if event is None:
                break

            if Transcript.is_type(event.type):
                transcript = Transcript.from_event(event)
                break

        json.dump(transcript.event().to_dict(), sys.stdout, ensure_ascii=False)
        print("", flush=True)


if __name__ == "__main__":
    asyncio.run(main())


================================================
FILE: bin/asr_transcribe_wav.py
================================================
#!/usr/bin/env python3
"""Transcribes WAV audio into text."""
import argparse
import asyncio
import io
import json
import logging
import os
import sys
import time
import wave
from pathlib import Path
from typing import Iterable, Optional

from rhasspy3.asr import DOMAIN, Transcript
from rhasspy3.audio import (
    DEFAULT_IN_CHANNELS,
    DEFAULT_IN_RATE,
    DEFAULT_IN_WIDTH,
    DEFAULT_SAMPLES_PER_CHUNK,
    AudioChunkConverter,
    AudioStart,
    AudioStop,
    wav_to_chunks,
)
from rhasspy3.core import Rhasspy
from rhasspy3.event import async_read_event, async_write_event
from rhasspy3.program import create_process

_FILE = Path(__file__)
_DIR = _FILE.parent
_LOGGER = logging.getLogger(_FILE.stem)


async def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-c",
        "--config",
        default=_DIR.parent / "config",
        help="Configuration directory",
    )
    parser.add_argument(
        "-p", "--pipeline", default="default", help="Name of pipeline to use"
    )
    parser.add_argument(
        "--asr-program", help="Name of asr program to use (overrides pipeline)"
    )
    #
    parser.add_argument(
        "--rate", type=int, default=DEFAULT_IN_RATE, help="Sample rate (hertz)"
    )
    parser.add_argument(
        "--width", type=int, default=DEFAULT_IN_WIDTH, help="Sample width (bytes)"
    )
    parser.add_argument(
        "--channels", type=int, default=DEFAULT_IN_CHANNELS, help="Sample channel count"
    )
    parser.add_argument(
        "--samples-per-chunk", type=int, default=DEFAULT_SAMPLES_PER_CHUNK
    )
    #
    parser.add_argument("wav", nargs="*", help="Path to WAV file(s)")
    #
    parser.add_argument(
        "--debug", action="store_true", help="Print DEBUG messages to console"
    )
    args = parser.parse_args()
    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    rhasspy = Rhasspy.load(args.config)
    asr_program = args.asr_program
    pipeline = rhasspy.config.pipelines.get(args.pipeline)

    if not asr_program:
        assert pipeline is not None, f"No pipeline named {args.pipeline}"
        asr_program = pipeline.asr

    assert asr_program, "No asr program"
    _LOGGER.debug("asr program: %s", asr_program)

    # Transcribe WAV file(s)
    for wav_bytes in get_wav_bytes(args):
        converter = AudioChunkConverter(args.rate, args.width, args.channels)

        with io.BytesIO(wav_bytes) as wav_io:
            with wave.open(wav_io, "rb") as wav_file:
                chunks = list(wav_to_chunks(wav_file, args.samples_per_chunk))

        async with (await create_process(rhasspy, DOMAIN, asr_program)) as asr_proc:
            assert asr_proc.stdin is not None
            assert asr_proc.stdout is not None

            # Write audio
            start_time = time.monotonic_ns()
            await async_write_event(
                AudioStart(args.rate, args.width, args.channels, timestamp=0).event(),
                asr_proc.stdin,
            )

            last_timestamp: Optional[int] = None
            for chunk in chunks:
                chunk = converter.convert(chunk)
                await async_write_event(chunk.event(), asr_proc.stdin)
                last_timestamp = chunk.timestamp

            await async_write_event(
                AudioStop(timestamp=last_timestamp).event(),
                asr_proc.stdin,
            )

            # Read transcript
            _LOGGER.debug("Waiting for transcription")
            transcript = Transcript(text="")
            while True:
                event = await async_read_event(asr_proc.stdout)
                if event is None:
                    break

                if Transcript.is_type(event.type):
                    transcript = Transcript.from_event(event)
                    end_time = time.monotonic_ns()
                    _LOGGER.debug(
                        "Transcribed in %s second(s)", (end_time - start_time) / 1e9
                    )
                    break

            _LOGGER.debug(transcript)

            json.dump(transcript.event().to_dict(), sys.stdout, ensure_ascii=False)
            print("", flush=True)


def get_wav_bytes(args: argparse.Namespace) -> Iterable[bytes]:
    """Yields WAV audio from stdin or args."""
    if args.wav:
        # WAV file path(s)
        for wav_path in args.wav:
            _LOGGER.debug("Processing %s", wav_path)
            with open(wav_path, "rb") as wav_file:
                yield wav_file.read()
    else:
        # WAV on stdin
        if os.isatty(sys.stdin.fileno()):
            print("Reading WAV audio from stdin", file=sys.stderr)

        yield sys.stdin.buffer.read()


if __name__ == "__main__":
    asyncio.run(main())


================================================
FILE: bin/client_unix_socket.py
================================================
#!/usr/bin/env python3
import argparse
import logging
import socket
import threading

from rhasspy3.event import read_event, write_event

_LOGGER = logging.getLogger("wrapper_unix_socket")


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("socketfile", help="Path to Unix domain socket file")
    parser.add_argument("--debug", action="store_true", help="Log DEBUG messages")
    args = parser.parse_args()

    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    _LOGGER.debug("Connecting to %s", args.socketfile)
    sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
    sock.connect(args.socketfile)
    _LOGGER.debug("Connected")

    try:
        with sock.makefile(mode="rwb") as conn_file:
            read_thread = threading.Thread(
                target=read_proc, args=(conn_file,), daemon=True
            )
            read_thread.start()

            write_thread = threading.Thread(
                target=write_proc, args=(conn_file,), daemon=True
            )
            write_thread.start()
            write_thread.join()
    except KeyboardInterrupt:
        pass


def read_proc(conn_file):
    try:
        while True:
            event = read_event(conn_file)
            if event is None:
                break

            write_event(event)
    except Exception:
        _LOGGER.exception("Unexpected error in read thread")


def write_proc(conn_file):
    try:
        while True:
            event = read_event()
            if event is None:
                break

            write_event(event, conn_file)
    except Exception:
        _LOGGER.exception("Unexpected error in write thread")


if __name__ == "__main__":
    main()


================================================
FILE: bin/config_print.py
================================================
#!/usr/bin/env python3
"""Prints configuration as JSON."""
import argparse
import json
import logging
import sys
from pathlib import Path

from rhasspy3.core import Rhasspy

_FILE = Path(__file__)
_DIR = _FILE.parent
_LOGGER = logging.getLogger(_FILE.stem)


def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-c",
        "--config",
        default=_DIR.parent / "config",
        help="Configuration directory",
    )
    parser.add_argument("--indent", type=int, default=4)
    parser.add_argument(
        "--debug", action="store_true", help="Print DEBUG messages to console"
    )
    args = parser.parse_args()
    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    rhasspy = Rhasspy.load(args.config)
    json.dump(rhasspy.config_dict, sys.stdout, indent=args.indent, ensure_ascii=False)


if __name__ == "__main__":
    main()


================================================
FILE: bin/handle_adapter_json.py
================================================
#!/usr/bin/env python3
import argparse
import json
import logging
import shlex
import subprocess
from pathlib import Path

from rhasspy3.event import read_event, write_event
from rhasspy3.handle import Handled, NotHandled
from rhasspy3.intent import Intent, NotRecognized

_FILE = Path(__file__)
_DIR = _FILE.parent
_LOGGER = logging.getLogger(_FILE.stem)


def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "command",
        help="Command to run",
    )
    parser.add_argument("--shell", action="store_true")
    parser.add_argument(
        "--debug", action="store_true", help="Print DEBUG messages to console"
    )
    args = parser.parse_args()
    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    if args.shell:
        command = args.command
    else:
        command = shlex.split(args.command)

    proc = subprocess.Popen(
        command,
        stdin=subprocess.PIPE,
        stdout=subprocess.PIPE,
        shell=args.shell,
        universal_newlines=True,
    )
    with proc:
        assert proc.stdin is not None
        assert proc.stdout is not None

        while True:
            event = read_event()
            if event is None:
                break

            if Intent.is_type(event.type):
                intent = Intent.from_event(event)
                stdout, _stderr = proc.communicate(
                    input=json.dumps(
                        {
                            "intent": {
                                "name": intent.name,
                            },
                            "entities": [
                                {"entity": entity.name, "value": entity.value}
                                for entity in intent.entities or []
                            ],
                            "slots": {
                                entity.name: entity.value
                                for entity in intent.entities or []
                            },
                        },
                        ensure_ascii=False,
                    )
                )
                handled = False
                for line in stdout.splitlines():
                    line = line.strip()
                    if line:
                        write_event(Handled(text=line).event())
                        handled = True
                        break

                if not handled:
                    write_event(NotHandled().event())

                break

            if NotRecognized.is_type(event.type):
                write_event(NotHandled().event())
                break


if __name__ == "__main__":
    main()


================================================
FILE: bin/handle_adapter_text.py
================================================
#!/usr/bin/env python3
import argparse
import logging
import shlex
import subprocess
from pathlib import Path

from rhasspy3.asr import Transcript
from rhasspy3.event import read_event, write_event
from rhasspy3.handle import Handled, NotHandled

_FILE = Path(__file__)
_DIR = _FILE.parent
_LOGGER = logging.getLogger(_FILE.stem)


def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "command",
        help="Command to run",
    )
    parser.add_argument("--shell", action="store_true")
    parser.add_argument(
        "--debug", action="store_true", help="Print DEBUG messages to console"
    )
    args = parser.parse_args()
    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    if args.shell:
        command = args.command
    else:
        command = shlex.split(args.command)

    proc = subprocess.Popen(
        command,
        stdin=subprocess.PIPE,
        stdout=subprocess.PIPE,
        shell=args.shell,
        universal_newlines=True,
    )
    with proc:
        assert proc.stdin is not None
        assert proc.stdout is not None

        while True:
            event = read_event()
            if event is None:
                break

            if Transcript.is_type(event.type):
                transcript = Transcript.from_event(event)
                stdout, _stderr = proc.communicate(input=transcript.text)
                handled = False
                for line in stdout.splitlines():
                    line = line.strip()
                    if line:
                        write_event(Handled(text=line).event())
                        handled = True
                        break

                if not handled:
                    write_event(NotHandled().event())

                break


if __name__ == "__main__":
    main()


================================================
FILE: bin/handle_intent.py
================================================
#!/usr/bin/env python3
"""Handle text or intent."""
import argparse
import asyncio
import json
import logging
import os
import sys
from pathlib import Path
from typing import Iterable

from rhasspy3.core import Rhasspy
from rhasspy3.handle import handle
from rhasspy3.intent import Intent

_FILE = Path(__file__)
_DIR = _FILE.parent
_LOGGER = logging.getLogger(_FILE.stem)


async def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-c",
        "--config",
        default=_DIR.parent / "config",
        help="Configuration directory",
    )
    parser.add_argument(
        "-p", "--pipeline", default="default", help="Name of pipeline to use"
    )
    parser.add_argument(
        "--handle-program", help="Name of handle program to use (overrides pipeline)"
    )
    parser.add_argument("intent", nargs="*", help="Intent JSON event(s) to handle")
    #
    parser.add_argument(
        "--debug", action="store_true", help="Print DEBUG messages to console"
    )
    args = parser.parse_args()
    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    rhasspy = Rhasspy.load(args.config)
    handle_program = args.handle_program
    pipeline = rhasspy.config.pipelines.get(args.pipeline)

    if not handle_program:
        assert pipeline is not None, f"No pipeline named {args.pipeline}"
        handle_program = pipeline.handle

    assert handle_program, "No handle program"

    for line in get_input(args):
        # Intent JSON
        handle_input: Intent = Intent.from_dict(json.loads(line))
        handle_result = await handle(rhasspy, handle_program, handle_input)
        if handle_result is None:
            _LOGGER.warning("No result")
            continue

        _LOGGER.debug(handle_result)
        json.dump(handle_result.event().to_dict(), sys.stdout, ensure_ascii=False)


def get_input(args: argparse.Namespace) -> Iterable[str]:
    """Get input from stdin or args."""
    if args.intent:
        for event_json in args.intent:
            yield event_json
    else:
        if os.isatty(sys.stdin.fileno()):
            print("Reading input from stdin", file=sys.stderr)

        for line in sys.stdin:
            line = line.strip()
            if line:
                yield line


if __name__ == "__main__":
    asyncio.run(main())


================================================
FILE: bin/handle_text.py
================================================
#!/usr/bin/env python3
"""Handle text or intent."""
import argparse
import asyncio
import json
import logging
import os
import sys
from pathlib import Path
from typing import Iterable

from rhasspy3.asr import Transcript
from rhasspy3.core import Rhasspy
from rhasspy3.handle import handle

_FILE = Path(__file__)
_DIR = _FILE.parent
_LOGGER = logging.getLogger(_FILE.stem)


async def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-c",
        "--config",
        default=_DIR.parent / "config",
        help="Configuration directory",
    )
    parser.add_argument(
        "-p", "--pipeline", default="default", help="Name of pipeline to use"
    )
    parser.add_argument(
        "--handle-program", help="Name of handle program to use (overrides pipeline)"
    )
    parser.add_argument("text", nargs="*", help="Text input to handle")
    #
    parser.add_argument(
        "--debug", action="store_true", help="Print DEBUG messages to console"
    )
    args = parser.parse_args()
    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    rhasspy = Rhasspy.load(args.config)
    handle_program = args.handle_program
    pipeline = rhasspy.config.pipelines.get(args.pipeline)

    if not handle_program:
        assert pipeline is not None, f"No pipeline named {args.pipeline}"
        handle_program = pipeline.handle

    assert handle_program, "No handle program"

    for line in get_input(args):
        # Text
        handle_input = Transcript(text=line)
        handle_result = await handle(rhasspy, handle_program, handle_input)
        if handle_result is None:
            _LOGGER.warning("No result")
            continue

        _LOGGER.debug(handle_result)
        json.dump(handle_result.event().to_dict(), sys.stdout, ensure_ascii=False)


def get_input(args: argparse.Namespace) -> Iterable[str]:
    """Get input from stdin or args."""
    if args.text:
        for text in args.text:
            yield text
    else:
        if os.isatty(sys.stdin.fileno()):
            print("Reading input from stdin", file=sys.stderr)

        for line in sys.stdin:
            line = line.strip()
            if line:
                yield line


if __name__ == "__main__":
    asyncio.run(main())


================================================
FILE: bin/intent_recognize.py
================================================
#!/usr/bin/env python3
import argparse
import asyncio
import json
import logging
import os
import sys
from pathlib import Path
from typing import Iterable

from rhasspy3.core import Rhasspy
from rhasspy3.intent import recognize

_FILE = Path(__file__)
_DIR = _FILE.parent
_LOGGER = logging.getLogger(_FILE.stem)


async def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-c",
        "--config",
        default=_DIR.parent / "config",
        help="Configuration directory",
    )
    parser.add_argument(
        "-p", "--pipeline", default="default", help="Name of pipeline to use"
    )
    parser.add_argument(
        "--intent-program", help="Name of intent program to use (overrides pipeline)"
    )
    parser.add_argument("text", nargs="*", help="Text to recognize")
    parser.add_argument(
        "--debug", action="store_true", help="Print DEBUG messages to console"
    )
    args = parser.parse_args()
    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    rhasspy = Rhasspy.load(args.config)
    intent_program = args.intent_program
    pipeline = rhasspy.config.pipelines.get(args.pipeline)

    if not intent_program:
        assert pipeline is not None, f"No pipeline named {args.pipeline}"
        intent_program = pipeline.intent

    assert intent_program, "No intent program"

    for text in get_texts(args):
        intent_result = await recognize(rhasspy, intent_program, text)
        if intent_result is None:
            continue

        json.dump(intent_result.event().data, sys.stdout, ensure_ascii=False)
        print("", flush=True)


def get_texts(args: argparse.Namespace) -> Iterable[str]:
    if args.text:
        for text in args.text:
            yield text
    else:
        if os.isatty(sys.stdin.fileno()):
            print("Reading text from stdin", file=sys.stderr)

        for line in sys.stdin:
            line = line.strip()
            if line:
                yield line


if __name__ == "__main__":
    asyncio.run(main())


================================================
FILE: bin/mic_adapter_raw.py
================================================
#!/usr/bin/env python3
"""Reads raw audio chunks from stdin."""
import argparse
import logging
import shlex
import subprocess
import time
from pathlib import Path

from rhasspy3.audio import DEFAULT_SAMPLES_PER_CHUNK, AudioChunk, AudioStart
from rhasspy3.event import write_event

_FILE = Path(__file__)
_DIR = _FILE.parent
_LOGGER = logging.getLogger(_FILE.stem)


def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "command",
        help="Command to run",
    )
    parser.add_argument("--shell", action="store_true", help="Run command with shell")
    #
    parser.add_argument(
        "--samples-per-chunk",
        type=int,
        default=DEFAULT_SAMPLES_PER_CHUNK,
        help="Number of samples to read at a time from command",
    )
    parser.add_argument(
        "--rate",
        type=int,
        required=True,
        help="Sample rate (hz)",
    )
    parser.add_argument(
        "--width",
        type=int,
        required=True,
        help="Sample width bytes",
    )
    parser.add_argument(
        "--channels",
        type=int,
        required=True,
        help="Sample channel count",
    )
    #
    parser.add_argument(
        "--debug", action="store_true", help="Print DEBUG messages to console"
    )
    args = parser.parse_args()
    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    bytes_per_chunk = args.samples_per_chunk * args.width * args.channels

    if args.shell:
        command = args.command
    else:
        command = shlex.split(args.command)

    proc = subprocess.Popen(command, stdout=subprocess.PIPE)
    with proc:
        assert proc.stdout is not None

        write_event(
            AudioStart(
                args.rate, args.width, args.channels, timestamp=time.monotonic_ns()
            ).event()
        )
        while True:
            audio_bytes = proc.stdout.read(bytes_per_chunk)
            if not audio_bytes:
                break

            write_event(
                AudioChunk(
                    args.rate,
                    args.width,
                    args.channels,
                    audio_bytes,
                    timestamp=time.monotonic_ns(),
                ).event()
            )


if __name__ == "__main__":
    main()


================================================
FILE: bin/mic_record_sample.py
================================================
#!/usr/bin/env python3
"""Record a spoken audio sample to a WAV file."""
import argparse
import asyncio
import logging
import wave
from collections import deque
from pathlib import Path
from typing import Deque

from rhasspy3.audio import AudioChunk
from rhasspy3.core import Rhasspy
from rhasspy3.event import async_read_event, async_write_event
from rhasspy3.mic import DOMAIN as MIC_DOMAIN
from rhasspy3.program import create_process
from rhasspy3.vad import DOMAIN as VAD_DOMAIN
from rhasspy3.vad import VoiceStarted, VoiceStopped

_FILE = Path(__file__)
_DIR = _FILE.parent
_LOGGER = logging.getLogger(_FILE.stem)


async def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument("wav_file", nargs="+", help="Path to WAV file(s) to write")
    #
    parser.add_argument(
        "-c",
        "--config",
        default=_DIR.parent / "config",
        help="Configuration directory",
    )
    parser.add_argument(
        "-p", "--pipeline", default="default", help="Name of pipeline to use"
    )
    parser.add_argument(
        "--mic-program", help="Name of mic program to use (overrides pipeline)"
    )
    parser.add_argument(
        "--vad-program", help="Name of vad program to use (overrides pipeline)"
    )
    #
    parser.add_argument(
        "--chunk-buffer-size",
        type=int,
        default=25,
        help="Audio chunks to buffer before start is known",
    )
    parser.add_argument(
        "-b",
        "--keep-chunks-before",
        type=int,
        default=5,
        help="Audio chunks to keep before voice starts",
    )
    parser.add_argument(
        "-a",
        "--keep-chunks-after",
        type=int,
        default=0,
        help="Audio chunks to keep after voice ends",
    )
    #
    parser.add_argument(
        "--debug", action="store_true", help="Print DEBUG messages to console"
    )
    args = parser.parse_args()
    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    rhasspy = Rhasspy.load(args.config)
    mic_program = args.mic_program
    vad_program = args.vad_program
    pipeline = rhasspy.config.pipelines.get(args.pipeline)

    if not mic_program:
        assert pipeline is not None, f"No pipeline named {args.pipeline}"
        mic_program = pipeline.mic

    assert mic_program, "No mic program"

    if not vad_program:
        assert pipeline is not None, f"No pipeline named {args.pipeline}"
        vad_program = pipeline.vad

    assert vad_program, "No vad program"

    for wav_path in args.wav_file:
        wav_file: wave.Wave_write = wave.open(wav_path, "wb")
        with wav_file:
            is_first_chunk = True

            # Audio kept before we get the event that the voice command started
            # at a timestep in the past.
            chunk_buffer: Deque[AudioChunk] = deque(
                maxlen=max(args.chunk_buffer_size, args.keep_chunks_before)
            )

            async with (
                await create_process(rhasspy, MIC_DOMAIN, mic_program)
            ) as mic_proc, (
                await create_process(rhasspy, VAD_DOMAIN, vad_program)
            ) as vad_proc:
                assert mic_proc.stdout is not None
                assert vad_proc.stdin is not None
                assert vad_proc.stdout is not None

                _LOGGER.info("Recording %s", wav_path)
                mic_task = asyncio.create_task(async_read_event(mic_proc.stdout))
                vad_task = asyncio.create_task(async_read_event(vad_proc.stdout))
                pending = {mic_task, vad_task}

                before_command = True
                while True:
                    done, pending = await asyncio.wait(
                        pending, return_when=asyncio.FIRST_COMPLETED
                    )
                    if mic_task in done:
                        mic_event = mic_task.result()
                        if mic_event is None:
                            break

                        # Process chunk
                        if AudioChunk.is_type(mic_event.type):
                            chunk = AudioChunk.from_event(mic_event)
                            if is_first_chunk:
                                _LOGGER.debug("Receiving audio")
                                is_first_chunk = False
                                wav_file.setframerate(chunk.rate)
                                wav_file.setsampwidth(chunk.width)
                                wav_file.setnchannels(chunk.channels)

                            await async_write_event(mic_event, vad_proc.stdin)
                            if before_command:
                                chunk_buffer.append(chunk)
                            else:
                                wav_file.writeframes(chunk.audio)

                        # Next chunk
                        mic_task = asyncio.create_task(
                            async_read_event(mic_proc.stdout)
                        )
                        pending.add(mic_task)

                    if vad_task in done:
                        vad_event = vad_task.result()
                        if vad_event is None:
                            break

                        if VoiceStarted.is_type(vad_event.type):
                            if before_command:
                                # Start of voice command
                                voice_started = VoiceStarted.from_event(vad_event)
                                if voice_started.timestamp is None:
                                    # Keep chunks before
                                    chunks_left = args.keep_chunks_before
                                    while chunk_buffer and (chunks_left > 0):
                                        chunk = chunk_buffer.popleft()
                                        wav_file.writeframes(chunk.audio)
                                else:
                                    # Locate start chunk
                                    start_idx = 0
                                    for i, chunk in enumerate(chunk_buffer):
                                        if (chunk.timestamp is not None) and (
                                            chunk.timestamp >= voice_started.timestamp
                                        ):
                                            start_idx = i
                                            break

                                    # Back up by "keep chunks" and then write audio forward
                                    start_idx = max(
                                        0, start_idx - args.keep_chunks_before
                                    )
                                    for i, chunk in enumerate(chunk_buffer):
                                        if i >= start_idx:
                                            wav_file.writeframes(chunk.audio)

                                    chunk_buffer.clear()

                                before_command = False
                                _LOGGER.info("Speaking started")
                        elif VoiceStopped.is_type(vad_event.type):
                            # End of voice command
                            _LOGGER.info("Speaking ended")
                            break

                        # Next VAD event
                        vad_task = asyncio.create_task(
                            async_read_event(vad_proc.stdout)
                        )
                        pending.add(vad_task)

                # After chunks
                num_chunks_left = args.keep_chunks_after
                while num_chunks_left > 0:
                    mic_event = await mic_task
                    if mic_event is None:
                        break

                    if AudioChunk.is_type(mic_event.type):
                        chunk = AudioChunk.from_event(mic_event)
                        wav_file.writeframes(chunk.audio)
                        num_chunks_left -= 1

                    if num_chunks_left > 0:
                        mic_task = asyncio.create_task(
                            async_read_event(mic_proc.stdout)
                        )


if __name__ == "__main__":
    try:
        asyncio.run(main())
    except KeyboardInterrupt:
        pass


================================================
FILE: bin/mic_test_energy.py
================================================
#!/usr/bin/env python3
"""Prints microphone energy level to console for testing."""
import argparse
import asyncio
import audioop
import logging
from pathlib import Path

from rhasspy3.audio import AudioChunk, AudioStop
from rhasspy3.core import Rhasspy
from rhasspy3.event import async_read_event
from rhasspy3.mic import DOMAIN as MIC_DOMAIN
from rhasspy3.program import create_process

_FILE = Path(__file__)
_DIR = _FILE.parent
_LOGGER = logging.getLogger(_FILE.stem)


async def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-c",
        "--config",
        default=_DIR.parent / "config",
        help="Configuration directory",
    )
    parser.add_argument(
        "-p", "--pipeline", default="default", help="Name of pipeline to use"
    )
    parser.add_argument(
        "--mic-program", help="Name of mic program to use (overrides pipeline)"
    )
    #
    parser.add_argument(
        "--levels", type=int, default=40, help="Number of levels to display"
    )
    parser.add_argument(
        "--numeric",
        action="store_true",
        help="Print energy numeric values instead of showing level",
    )
    #
    parser.add_argument(
        "--debug", action="store_true", help="Print DEBUG messages to console"
    )
    args = parser.parse_args()
    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    rhasspy = Rhasspy.load(args.config)
    mic_program = args.mic_program
    pipeline = rhasspy.config.pipelines.get(args.pipeline)

    if not mic_program:
        assert pipeline is not None, f"No pipeline named {args.pipeline}"
        mic_program = pipeline.mic

    assert mic_program, "No mic program"
    max_energy = 0

    async with (await create_process(rhasspy, MIC_DOMAIN, mic_program)) as mic_proc:
        assert mic_proc.stdout is not None

        while True:
            event = await async_read_event(mic_proc.stdout)
            if event is None:
                break

            if AudioChunk.is_type(event.type):
                chunk = AudioChunk.from_event(event)
                energy = -audioop.rms(chunk.audio, chunk.width)
                energy_bytes = bytes([energy & 0xFF, (energy >> 8) & 0xFF])
                debiased_energy = audioop.rms(
                    audioop.add(
                        chunk.audio,
                        energy_bytes * (len(chunk.audio) // chunk.width),
                        chunk.width,
                    ),
                    chunk.width,
                )

                max_energy = max(max_energy, debiased_energy)
                max_energy = max(1, max_energy)

                if args.numeric:
                    # Print numbers
                    print(debiased_energy, "/", max_energy)
                else:
                    # Print graphic
                    energy_level = int(args.levels * (debiased_energy / max_energy))
                    energy_level = max(0, energy_level)
                    print(
                        "\r",  # We still use typewriters!
                        "[",
                        "*" * energy_level,
                        " " * (args.levels - energy_level),
                        "]",
                        sep="",
                        end="",
                    )

            elif AudioStop.is_type(event.type):
                break


if __name__ == "__main__":
    try:
        asyncio.run(main())
    except KeyboardInterrupt:
        pass


================================================
FILE: bin/pipeline_run.py
================================================
#!/usr/bin/env python3
"""Run a pipeline all or part of the way."""
import argparse
import asyncio
import json
import logging
import sys
from pathlib import Path
from typing import IO, Optional, Union

from rhasspy3.asr import Transcript
from rhasspy3.audio import DEFAULT_SAMPLES_PER_CHUNK
from rhasspy3.core import Rhasspy
from rhasspy3.event import Event
from rhasspy3.handle import Handled, NotHandled
from rhasspy3.intent import Intent, NotRecognized
from rhasspy3.pipeline import StopAfterDomain
from rhasspy3.pipeline import run as run_pipeline
from rhasspy3.wake import Detection

_FILE = Path(__file__)
_DIR = _FILE.parent
_LOGGER = logging.getLogger(_FILE.stem)


async def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-c",
        "--config",
        default=_DIR.parent / "config",
        help="Configuration directory",
    )
    parser.add_argument(
        "-p", "--pipeline", default="default", help="Name of pipeline to use"
    )
    #
    parser.add_argument(
        "--stop-after",
        choices=[domain.value for domain in StopAfterDomain],
        help="Domain to stop pipeline after",
    )
    #
    parser.add_argument(
        "--wake-name", help="Skip wake word detection and use name instead"
    )
    parser.add_argument(
        "--asr-wav",
        help="Use WAV file for speech to text instead of mic input (skips wake)",
    )
    parser.add_argument("--asr-text", help="Use text for asr transcript (skips wake)")
    parser.add_argument(
        "--intent-json", help="Use JSON for recognized intent (skips wake, asr)"
    )
    parser.add_argument(
        "--handle-text", help="Use text for handle response (skips handle)"
    )
    parser.add_argument(
        "--tts-wav", help="Play WAV file instead of text to speech response (skips tts)"
    )

    parser.add_argument(
        "--samples-per-chunk", type=int, default=DEFAULT_SAMPLES_PER_CHUNK
    )
    parser.add_argument("--asr-chunks-to-buffer", type=int, default=0)
    parser.add_argument("--loop", action="store_true", help="Keep pipeline running")
    parser.add_argument("--debug", action="store_true", help="Log DEBUG messages")
    args = parser.parse_args()
    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    wake_detection: Optional[Detection] = None
    if args.wake_name:
        # Wake word detection will be skipped
        wake_detection = Detection(name=args.wake_name)

    asr_wav_in: Optional[IO[bytes]] = None
    if args.asr_wav:
        # asr input will come from WAV file instead of mic
        asr_wav_in = open(args.asr_wav, "rb")

    asr_transcript: Optional[Transcript] = None
    if args.asr_text:
        # asr transcription will be skipped
        asr_transcript = Transcript(text=args.asr_text)

    intent_result: Optional[Union[Intent, NotRecognized]] = None
    if args.intent_json:
        # intent recognition will be skipped
        intent_event = Event.from_dict(json.loads(args.intent_json))
        if Intent.is_type(intent_event.type):
            intent_result = Intent.from_event(intent_event)
        elif NotRecognized.is_type(intent_event.type):
            intent_result = NotRecognized.from_event(intent_event)

    handle_result: Optional[Union[Handled, NotHandled]] = None
    if args.handle_text:
        # text/intent handling will be skipped
        handle_result = Handled(text=args.handle_text)

    tts_wav_in: Optional[IO[bytes]] = None
    if args.tts_wav:
        # tts synthesis will be skipped
        tts_wav_in = open(args.tts_wav, "rb")

    rhasspy = Rhasspy.load(args.config)

    while True:
        pipeline_result = await run_pipeline(
            rhasspy,
            args.pipeline,
            samples_per_chunk=args.samples_per_chunk,
            asr_chunks_to_buffer=args.asr_chunks_to_buffer,
            wake_detection=wake_detection,
            asr_wav_in=asr_wav_in,
            asr_transcript=asr_transcript,
            intent_result=intent_result,
            handle_result=handle_result,
            tts_wav_in=tts_wav_in,
            stop_after=args.stop_after,
        )

        json.dump(pipeline_result.to_dict(), sys.stdout, ensure_ascii=False)
        print("")

        if not args.loop:
            break


if __name__ == "__main__":
    try:
        asyncio.run(main())
    except KeyboardInterrupt:
        pass


================================================
FILE: bin/program_download.py
================================================
#!/usr/bin/env python3
import argparse
import logging
import shlex
import string
import subprocess
from pathlib import Path

from rhasspy3.core import Rhasspy

_FILE = Path(__file__)
_DIR = _FILE.parent
_LOGGER = logging.getLogger(_FILE.stem)


def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument("domain")
    parser.add_argument("program")
    parser.add_argument("model")
    parser.add_argument(
        "-c",
        "--config",
        default=_DIR.parent / "config",
        help="Configuration directory",
    )
    parser.add_argument(
        "--debug", action="store_true", help="Print DEBUG messages to console"
    )
    args = parser.parse_args()
    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    rhasspy = Rhasspy.load(args.config)
    program_config = rhasspy.config.programs.get(args.domain, {}).get(args.program)
    assert program_config is not None, f"No config for {args.domain} {args.program}"

    install = program_config.install
    assert install is not None, f"No install config for {args.domain} {args.program}"

    downloads = install.downloads
    assert downloads is not None, f"No downloads for {args.domain} {args.program}"

    model = downloads.get(args.model)
    assert (
        model is not None
    ), f"No download named {args.model} for {args.domain} {args.program}"

    program_dir = rhasspy.programs_dir / args.domain / args.program
    data_dir = rhasspy.data_dir / args.domain / args.program

    default_mapping = {
        "program_dir": str(program_dir.absolute()),
        "data_dir": str(data_dir.absolute()),
        "model": str(args.model),
    }

    # Check if already installed
    if model.check_file is not None:
        check_file = Path(
            string.Template(model.check_file).safe_substitute(default_mapping)
        )
        if check_file.exists():
            _LOGGER.info("Installed: %s", check_file)
            return

    download = install.download
    assert download is not None, f"No download config for {args.domain} {args.program}"

    download_command = string.Template(download.command).safe_substitute(
        default_mapping
    )
    _LOGGER.info(download_command)

    cwd = program_dir if program_dir.exists() else rhasspy.config_dir

    if download.shell:
        subprocess.check_call(download_command, shell=True, cwd=cwd)
    else:
        subprocess.check_call(shlex.split(download_command), cwd=cwd)


if __name__ == "__main__":
    main()


================================================
FILE: bin/program_install.py
================================================
#!/usr/bin/env python3
import argparse
import logging
import shlex
import string
import subprocess
from pathlib import Path

from rhasspy3.core import Rhasspy

_FILE = Path(__file__)
_DIR = _FILE.parent
_LOGGER = logging.getLogger(_FILE.stem)


def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument("domain")
    parser.add_argument("program")
    parser.add_argument(
        "-c",
        "--config",
        default=_DIR.parent / "config",
        help="Configuration directory",
    )
    parser.add_argument(
        "--debug", action="store_true", help="Print DEBUG messages to console"
    )
    args = parser.parse_args()
    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    rhasspy = Rhasspy.load(args.config)
    program_config = rhasspy.config.programs.get(args.domain, {}).get(args.program)
    assert program_config is not None, f"No config for {args.domain} {args.program}"

    install = program_config.install
    assert install is not None, f"No install config for {args.domain} {args.program}"

    program_dir = rhasspy.programs_dir / args.domain / args.program
    data_dir = rhasspy.data_dir / args.domain / args.program

    default_mapping = {
        "program_dir": str(program_dir.absolute()),
        "data_dir": str(data_dir.absolute()),
    }

    # Check if already installed
    if install.check_file is not None:
        check_file = Path(
            string.Template(install.check_file).safe_substitute(default_mapping)
        )
        if check_file.exists():
            _LOGGER.info("Installed: %s", check_file)
            return

    install_command = string.Template(install.command).safe_substitute(default_mapping)
    _LOGGER.debug(install_command)

    cwd = program_dir if program_dir.exists() else rhasspy.config_dir

    if install.shell:
        subprocess.check_call(install_command, shell=True, cwd=cwd)
    else:
        subprocess.check_call(shlex.split(install_command), cwd=cwd)


if __name__ == "__main__":
    main()


================================================
FILE: bin/satellite_run.py
================================================
#!/usr/bin/env python3
"""Run satellite loop."""
import argparse
import asyncio
import logging
from collections import deque
from pathlib import Path
from typing import Deque, List

from rhasspy3.audio import AudioChunk, AudioStop
from rhasspy3.core import Rhasspy
from rhasspy3.event import Event, async_read_event, async_write_event
from rhasspy3.mic import DOMAIN as MIC_DOMAIN
from rhasspy3.program import create_process
from rhasspy3.remote import DOMAIN as REMOTE_DOMAIN
from rhasspy3.snd import DOMAIN as SND_DOMAIN
from rhasspy3.snd import Played
from rhasspy3.wake import detect

_FILE = Path(__file__)
_DIR = _FILE.parent
_LOGGER = logging.getLogger(_FILE.stem)


async def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-c",
        "--config",
        default=_DIR.parent / "config",
        help="Configuration directory",
    )
    parser.add_argument(
        "-s", "--satellite", default="default", help="Name of satellite to use"
    )
    #
    parser.add_argument(
        "--mic-program",
        help="Program to use for mic input (overrides satellite)",
    )
    parser.add_argument(
        "--wake-program",
        help="Program to use for wake word detection (overiddes satellite)",
    )
    parser.add_argument(
        "--remote-program",
        help="Program to use for remote communication with base station (overrides satellite)",
    )
    parser.add_argument(
        "--snd-program",
        help="Program to use for audio output (overrides satellite)",
    )
    #
    parser.add_argument("--asr-chunks-to-buffer", type=int, default=0)
    #
    parser.add_argument("--loop", action="store_true", help="Keep satellite running")
    parser.add_argument("--debug", action="store_true", help="Log DEBUG messages")
    args = parser.parse_args()
    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    rhasspy = Rhasspy.load(args.config)
    mic_program = args.mic_program
    wake_program = args.wake_program
    remote_program = args.remote_program
    snd_program = args.snd_program
    satellite = rhasspy.config.satellites.get(args.satellite)

    if not mic_program:
        assert satellite is not None, f"No satellite named {args.satellite}"
        mic_program = satellite.mic

    assert mic_program, "No mic program"

    if not wake_program:
        assert satellite is not None, f"No satellite named {args.satellite}"
        wake_program = satellite.wake

    assert wake_program, "No wake program"

    if not remote_program:
        assert satellite is not None, f"No satellite named {args.satellite}"
        remote_program = satellite.remote

    assert remote_program, "No remote program"

    if not snd_program:
        assert satellite is not None, f"No satellite named {args.satellite}"
        snd_program = satellite.snd

    assert snd_program, "No snd program"

    while True:
        chunk_buffer: Deque[Event] = deque(maxlen=args.asr_chunks_to_buffer)
        snd_buffer: List[Event] = []

        async with (await create_process(rhasspy, MIC_DOMAIN, mic_program)) as mic_proc:
            assert mic_proc.stdout is not None

            detection = await detect(
                rhasspy, wake_program, mic_proc.stdout, chunk_buffer
            )
            if detection is None:
                continue

            async with (
                await create_process(rhasspy, REMOTE_DOMAIN, remote_program)
            ) as remote_proc:
                assert remote_proc.stdin is not None
                assert remote_proc.stdout is not None

                while chunk_buffer:
                    await async_write_event(chunk_buffer.pop(), remote_proc.stdin)

                mic_task = asyncio.create_task(async_read_event(mic_proc.stdout))
                remote_task = asyncio.create_task(async_read_event(remote_proc.stdout))
                pending = {mic_task, remote_task}

                try:
                    # Stream to remote until audio is received
                    while True:
                        done, pending = await asyncio.wait(
                            pending, return_when=asyncio.FIRST_COMPLETED
                        )

                        if mic_task in done:
                            mic_event = mic_task.result()
                            if mic_event is None:
                                break

                            if AudioChunk.is_type(mic_event.type):
                                await async_write_event(mic_event, remote_proc.stdin)

                            mic_task = asyncio.create_task(
                                async_read_event(mic_proc.stdout)
                            )
                            pending.add(mic_task)

                        if remote_task in done:
                            remote_event = remote_task.result()
                            if remote_event is not None:
                                snd_buffer.append(remote_event)

                            for task in pending:
                                task.cancel()

                            break

                    # Output audio
                    async with (
                        await create_process(rhasspy, SND_DOMAIN, snd_program)
                    ) as snd_proc:
                        assert snd_proc.stdin is not None
                        assert snd_proc.stdout is not None

                        for remote_event in snd_buffer:
                            if AudioChunk.is_type(remote_event.type):
                                await async_write_event(remote_event, snd_proc.stdin)
                            elif AudioStop.is_type(remote_event.type):
                                # Unexpected, but it could happen
                                continue

                        while True:
                            remote_event = await async_read_event(remote_proc.stdout)
                            if remote_event is None:
                                break

                            if AudioChunk.is_type(remote_event.type):
                                await async_write_event(remote_event, snd_proc.stdin)
                            elif AudioStop.is_type(remote_event.type):
                                await async_write_event(remote_event, snd_proc.stdin)
                                break

                        # Wait for audio to finish playing
                        while True:
                            snd_event = await async_read_event(snd_proc.stdout)
                            if snd_event is None:
                                break

                            if Played.is_type(snd_event.type):
                                break
                except Exception:
                    _LOGGER.exception(
                        "Unexpected error communicating with remote base station"
                    )

        if not args.loop:
            break


if __name__ == "__main__":
    try:
        asyncio.run(main())
    except KeyboardInterrupt:
        pass


================================================
FILE: bin/server_run.py
================================================
#!/usr/bin/env python3
import argparse
import logging
import os
import shlex
import string
import subprocess
import sys
from pathlib import Path
from typing import List, Union

from rhasspy3.core import Rhasspy
from rhasspy3.util import merge_dict

_FILE = Path(__file__)
_DIR = _FILE.parent
_LOGGER = logging.getLogger(_FILE.stem)


def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-c",
        "--config",
        default=_DIR.parent / "config",
        help="Configuration directory",
    )
    parser.add_argument("domain", help="Domain of server (asr, tts, etc.)")
    parser.add_argument("server", help="Name of server to run")

    parser.add_argument("--debug", action="store_true", help="Log DEBUG messages")
    args = parser.parse_args()
    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    rhasspy = Rhasspy.load(args.config)
    server = rhasspy.config.servers[args.domain][args.server]

    program_dir = rhasspy.programs_dir / args.domain / args.server
    data_dir = rhasspy.data_dir / args.domain / args.server

    # ${variables} available within command and template args
    default_mapping = {
        "program_dir": str(program_dir.absolute()),
        "data_dir": str(data_dir.absolute()),
    }

    command_str = server.command
    command_mapping = dict(default_mapping)

    if server.template_args:
        # Substitute within template args
        args_mapping = dict(server.template_args)
        for arg_name, arg_str in args_mapping.items():
            if not isinstance(arg_str, str):
                continue

            arg_template = string.Template(arg_str)
            args_mapping[arg_name] = arg_template.safe_substitute(default_mapping)

        merge_dict(command_mapping, args_mapping)

    command_template = string.Template(command_str)
    command_str = command_template.safe_substitute(command_mapping)

    env = dict(os.environ)

    # Add rhasspy3/bin to $PATH
    env["PATH"] = f'{rhasspy.base_dir}/bin:${env["PATH"]}'

    # Ensure stdout is flushed for Python programs
    env["PYTHONUNBUFFERED"] = "1"

    server_dir = rhasspy.programs_dir / args.domain / args.server
    cwd = server_dir if server_dir.is_dir() else rhasspy.base_dir

    if server.shell:
        command: Union[str, List[str]] = command_str
    else:
        command = shlex.split(command_str)

    _LOGGER.debug(command)
    proc = subprocess.Popen(command, shell=server.shell, cwd=cwd, env=env)
    with proc:
        sys.exit(proc.wait())


if __name__ == "__main__":
    try:
        main()
    except KeyboardInterrupt:
        pass


================================================
FILE: bin/snd_adapter_raw.py
================================================
#!/usr/bin/env python3
"""Play audio through a command that accepts raw PCM."""
import argparse
import logging
import shlex
import subprocess
from pathlib import Path

from rhasspy3.audio import (
    DEFAULT_OUT_CHANNELS,
    DEFAULT_OUT_RATE,
    DEFAULT_OUT_WIDTH,
    AudioChunk,
    AudioChunkConverter,
    AudioStop,
)
from rhasspy3.event import read_event, write_event
from rhasspy3.snd import Played

_FILE = Path(__file__)
_DIR = _FILE.parent
_LOGGER = logging.getLogger(_FILE.stem)


def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "command",
        help="Command to run",
    )
    parser.add_argument(
        "--rate", type=int, default=DEFAULT_OUT_RATE, help="Sample rate (hertz)"
    )
    parser.add_argument(
        "--width", type=int, default=DEFAULT_OUT_WIDTH, help="Sample width (bytes)"
    )
    parser.add_argument(
        "--channels",
        type=int,
        default=DEFAULT_OUT_CHANNELS,
        help="Sample channel count",
    )
    parser.add_argument("--shell", action="store_true", help="Run command with shell")
    parser.add_argument(
        "--debug", action="store_true", help="Print DEBUG messages to console"
    )
    args = parser.parse_args()
    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    if args.shell:
        command = args.command
    else:
        command = shlex.split(args.command)

    try:
        proc = subprocess.Popen(
            command, stdin=subprocess.PIPE, stdout=subprocess.DEVNULL
        )
        assert proc.stdin is not None

        converter = AudioChunkConverter(args.rate, args.width, args.channels)
        with proc:
            while True:
                event = read_event()
                if event is None:
                    break

                if AudioChunk.is_type(event.type):
                    chunk = AudioChunk.from_event(event)
                    chunk = converter.convert(chunk)
                    proc.stdin.write(chunk.audio)
                    proc.stdin.flush()
                elif AudioStop.is_type(event.type):
                    break
    finally:
        write_event(Played().event())


if __name__ == "__main__":
    main()


================================================
FILE: bin/snd_play.py
================================================
#!/usr/bin/env python3
import argparse
import asyncio
import logging
import os
import sys
from pathlib import Path

from rhasspy3.audio import DEFAULT_SAMPLES_PER_CHUNK
from rhasspy3.core import Rhasspy
from rhasspy3.snd import play

_FILE = Path(__file__)
_DIR = _FILE.parent
_LOGGER = logging.getLogger(_FILE.stem)


async def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument("wav_file", nargs="*", help="Path to WAV file(s) to play")
    parser.add_argument(
        "-c",
        "--config",
        default=_DIR.parent / "config",
        help="Configuration directory",
    )
    parser.add_argument("-p", "--pipeline", default="default", help="Name of pipeline")
    parser.add_argument("--snd-program", help="Audio output program name")
    parser.add_argument(
        "--samples-per-chunk",
        type=int,
        default=DEFAULT_SAMPLES_PER_CHUNK,
        help="Samples to send to snd program at a time",
    )
    parser.add_argument(
        "--debug", action="store_true", help="Print DEBUG messages to console"
    )
    args = parser.parse_args()
    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    rhasspy = Rhasspy.load(args.config)
    snd_program = args.snd_program
    pipeline = rhasspy.config.pipelines.get(args.pipeline)

    if not snd_program:
        assert pipeline is not None, f"No pipeline named {args.pipeline}"
        snd_program = pipeline.snd

    assert snd_program, "No snd program"

    if args.wav_file:
        for wav_path in args.wav_file:
            with open(wav_path, "rb") as wav_file:
                await play(rhasspy, snd_program, wav_file, args.samples_per_chunk)
    else:
        if os.isatty(sys.stdin.fileno()):
            print("Reading WAV data from stdin", file=sys.stderr)

        await play(
            rhasspy,
            snd_program,
            sys.stdin.buffer,
            args.samples_per_chunk,
        )


if __name__ == "__main__":
    asyncio.run(main())


================================================
FILE: bin/tts_adapter_http.py
================================================
#!/usr/bin/env python3
import argparse
import logging
import wave
from pathlib import Path
from urllib.parse import urlencode
from urllib.request import urlopen

from rhasspy3.audio import DEFAULT_SAMPLES_PER_CHUNK, AudioChunk, AudioStart, AudioStop
from rhasspy3.event import read_event, write_event
from rhasspy3.tts import Synthesize

_FILE = Path(__file__)
_DIR = _FILE.parent
_LOGGER = logging.getLogger(_FILE.stem)


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "url",
        help="URL of API endpoint",
    )
    parser.add_argument(
        "--param",
        nargs=2,
        action="append",
        metavar=("name", "value"),
        help="Name/value of query parameter",
    )
    #
    parser.add_argument(
        "--samples-per-chunk", type=int, default=DEFAULT_SAMPLES_PER_CHUNK
    )
    #
    parser.add_argument(
        "--debug", action="store_true", help="Print DEBUG messages to console"
    )
    args = parser.parse_args()
    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    params = {}
    if args.param:
        for key, value in params.items():
            # Don't include empty parameters
            if value:
                params[key] = value

    try:
        while True:
            event = read_event()
            if event is None:
                break

            if Synthesize.is_type(event.type):
                synthesize = Synthesize.from_event(event)

                params["text"] = synthesize.text
                url = args.url + "?" + urlencode(params)

                with urlopen(url) as response:
                    with wave.open(response, "rb") as wav_file:
                        rate = wav_file.getframerate()
                        width = wav_file.getsampwidth()
                        channels = wav_file.getnchannels()

                        num_frames = wav_file.getnframes()
                        audio_bytes = wav_file.readframes(num_frames)

                bytes_per_chunk = args.samples_per_chunk * width
                timestamp = 0
                write_event(
                    AudioStart(rate, width, channels, timestamp=timestamp).event()
                )
                while audio_bytes:
                    chunk = AudioChunk(
                        rate,
                        width,
                        channels,
                        audio_bytes[:bytes_per_chunk],
                        timestamp=timestamp,
                    )
                    write_event(chunk.event())
                    timestamp += chunk.milliseconds
                    audio_bytes = audio_bytes[bytes_per_chunk:]

                write_event(AudioStop(timestamp=timestamp).event())
    except KeyboardInterrupt:
        pass


if __name__ == "__main__":
    main()


================================================
FILE: bin/tts_adapter_text2wav.py
================================================
#!/usr/bin/env python3
"""
Runs a text to speech command that returns WAV audio on stdout or in a temp file.
"""
import argparse
import io
import logging
import shlex
import subprocess
import tempfile
import wave
from pathlib import Path

from rhasspy3.audio import DEFAULT_SAMPLES_PER_CHUNK, AudioChunk, AudioStart, AudioStop
from rhasspy3.event import read_event, write_event
from rhasspy3.tts import Synthesize

_FILE = Path(__file__)
_DIR = _FILE.parent
_LOGGER = logging.getLogger(_FILE.stem)


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "command",
        help="Command to run",
    )
    parser.add_argument(
        "--temp_file",
        action="store_true",
        help="Command has {temp_file} and will write output to it",
    )
    parser.add_argument(
        "--text",
        action="store_true",
        help="Command has {text} argument",
    )
    #
    parser.add_argument(
        "--samples-per-chunk", type=int, default=DEFAULT_SAMPLES_PER_CHUNK
    )
    #
    parser.add_argument(
        "--debug", action="store_true", help="Print DEBUG messages to console"
    )
    args = parser.parse_args()
    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    try:
        while True:
            event = read_event()
            if event is None:
                break

            if Synthesize.is_type(event.type):
                synthesize = Synthesize.from_event(event)
                wav_bytes = text_to_wav(args, synthesize.text)
                with io.BytesIO(wav_bytes) as wav_io:
                    with wave.open(wav_io, "rb") as wav_file:
                        rate = wav_file.getframerate()
                        width = wav_file.getsampwidth()
                        channels = wav_file.getnchannels()

                        num_frames = wav_file.getnframes()
                        audio_bytes = wav_file.readframes(num_frames)

                bytes_per_chunk = args.samples_per_chunk * width
                timestamp = 0
                write_event(
                    AudioStart(rate, width, channels, timestamp=timestamp).event()
                )
                while audio_bytes:
                    chunk = AudioChunk(
                        rate,
                        width,
                        channels,
                        audio_bytes[:bytes_per_chunk],
                        timestamp=timestamp,
                    )
                    write_event(chunk.event())
                    timestamp += chunk.milliseconds
                    audio_bytes = audio_bytes[bytes_per_chunk:]

                write_event(AudioStop(timestamp=timestamp).event())
    except KeyboardInterrupt:
        pass


def text_to_wav(args: argparse.Namespace, text: str) -> bytes:
    command_str = args.command
    format_args = {}
    if args.text:
        format_args["text"] = text
        text = ""  # Pass as arg instead

    if args.temp_file:
        with tempfile.NamedTemporaryFile(mode="wb", suffix=".wav") as wav_file:
            format_args["temp_file"] = wav_file.name
            command_str = command_str.format(**format_args)
            command = shlex.split(command_str)

            # Send stdout to devnull so it doesn't interfere with our events
            subprocess.run(
                command, check=True, stdout=subprocess.DEVNULL, input=text.encode()
            )
            wav_file.seek(0)
            return Path(wav_file.name).read_bytes()

    else:
        command_str = command_str.format(**format_args)
        command = shlex.split(command_str)
        return subprocess.check_output(command, input=text.encode())


if __name__ == "__main__":
    main()


================================================
FILE: bin/tts_speak.py
================================================
#!/usr/bin/env python3
"""Synthesize and speak audio."""
import argparse
import asyncio
import io
import json
import logging
import os
import sys
from pathlib import Path

from rhasspy3.core import Rhasspy
from rhasspy3.snd import play
from rhasspy3.tts import synthesize

_FILE = Path(__file__)
_DIR = _FILE.parent
_LOGGER = logging.getLogger(_FILE.stem)


async def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument("text", nargs="*", help="Text to speak (default: stdin)")
    parser.add_argument(
        "-c",
        "--config",
        default=_DIR.parent / "config",
        help="Configuration directory",
    )
    parser.add_argument("-p", "--pipeline", default="default", help="Name of pipeline")
    parser.add_argument("--tts-program", help="TTS program name")
    parser.add_argument("--snd-program", help="Audio output program name")
    parser.add_argument(
        "--samples-per-chunk",
        type=int,
        default=1024,
        help="Samples to send to snd program at a time",
    )
    parser.add_argument(
        "--debug", action="store_true", help="Print DEBUG messages to console"
    )
    args = parser.parse_args()
    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    rhasspy = Rhasspy.load(args.config)
    tts_program = args.tts_program
    snd_program = args.snd_program
    pipeline = rhasspy.config.pipelines.get(args.pipeline)

    if not tts_program:
        assert pipeline is not None, f"No pipeline named {args.pipeline}"
        tts_program = pipeline.tts

    if not snd_program:
        assert pipeline is not None, f"No pipeline named {args.pipeline}"
        snd_program = pipeline.snd

    assert tts_program, "No tts program"
    assert snd_program, "No snd program"

    if args.text:
        lines = args.text
    else:
        lines = sys.stdin
        if os.isatty(sys.stdin.fileno()):
            print("Reading text from stdin", file=sys.stderr)

    for line in lines:
        line = line.strip()
        if not line:
            continue

        with io.BytesIO() as wav_io:
            await synthesize(rhasspy, tts_program, line, wav_io)
            wav_io.seek(0)
            play_result = await play(
                rhasspy, snd_program, wav_io, args.samples_per_chunk
            )
            if play_result is not None:
                json.dump(play_result.event().to_dict(), sys.stdout, ensure_ascii=False)
                print("", flush=True)


if __name__ == "__main__":
    asyncio.run(main())


================================================
FILE: bin/tts_synthesize.py
================================================
#!/usr/bin/env python3
"""Synthesize WAV audio from text."""
import argparse
import asyncio
import io
import logging
import sys
from pathlib import Path

from rhasspy3.core import Rhasspy
from rhasspy3.tts import synthesize

_FILE = Path(__file__)
_DIR = _FILE.parent
_LOGGER = logging.getLogger(_FILE.stem)


async def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument("text", help="Text to speak")
    parser.add_argument(
        "-c",
        "--config",
        default=_DIR.parent / "config",
        help="Configuration directory",
    )
    parser.add_argument("-p", "--pipeline", default="default", help="Name of pipeline")
    parser.add_argument("--tts-program", help="TTS program name")
    parser.add_argument("-f", "--file", help="Write to file instead of stdout")
    parser.add_argument(
        "--debug", action="store_true", help="Print DEBUG messages to console"
    )
    args = parser.parse_args()
    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    rhasspy = Rhasspy.load(args.config)
    tts_program = args.tts_program
    pipeline = rhasspy.config.pipelines.get(args.pipeline)

    if not tts_program:
        assert pipeline is not None, f"No pipeline named {args.pipeline}"
        tts_program = pipeline.tts

    assert tts_program, "No tts program"

    with io.BytesIO() as wav_out:
        await synthesize(rhasspy, tts_program, args.text, wav_out)
        wav_bytes = wav_out.getvalue()

        if args.file:
            output_path = Path(args.file)
            output_path.parent.mkdir(parents=True, exist_ok=True)
            output_path.write_bytes(wav_bytes)
        else:
            sys.stdout.buffer.write(wav_bytes)


if __name__ == "__main__":
    asyncio.run(main())


================================================
FILE: bin/vad_adapter_raw.py
================================================
#!/usr/bin/env python3
"""Voice activity detection programs that accept raw PCM audio and print a speech probability for each chunk."""
import argparse
import logging
import shlex
import subprocess
import time
from pathlib import Path
from typing import Optional

from rhasspy3.audio import AudioChunk, AudioChunkConverter, AudioStop
from rhasspy3.event import read_event, write_event
from rhasspy3.vad import Segmenter, VoiceStarted, VoiceStopped

_FILE = Path(__file__)
_DIR = _FILE.parent
_LOGGER = logging.getLogger(_FILE.stem)


def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "command",
        help="Command to run",
    )
    #
    parser.add_argument(
        "--rate",
        type=int,
        required=True,
        help="Sample rate (hz)",
    )
    parser.add_argument(
        "--width",
        type=int,
        required=True,
        help="Sample width bytes",
    )
    parser.add_argument(
        "--channels",
        type=int,
        required=True,
        help="Sample channel count",
    )
    parser.add_argument(
        "--samples-per-chunk",
        required=True,
        type=int,
        help="Samples to send to command at a time",
    )
    #
    parser.add_argument(
        "--threshold",
        type=float,
        default=0.5,
        help="Speech probability threshold (0-1)",
    )
    parser.add_argument(
        "--speech-seconds",
        type=float,
        default=0.3,
    )
    parser.add_argument(
        "--silence-seconds",
        type=float,
        default=0.5,
    )
    parser.add_argument(
        "--timeout-seconds",
        type=float,
        default=15.0,
    )
    parser.add_argument(
        "--reset-seconds",
        type=float,
        default=1,
    )
    #
    parser.add_argument(
        "--debug", action="store_true", help="Print DEBUG messages to console"
    )
    args = parser.parse_args()
    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    bytes_per_chunk = args.samples_per_chunk * args.width * args.channels
    seconds_per_chunk = args.samples_per_chunk / args.rate

    command = shlex.split(args.command)
    with subprocess.Popen(
        command, stdin=subprocess.PIPE, stdout=subprocess.PIPE
    ) as proc:
        assert proc.stdin is not None
        assert proc.stdout is not None

        segmenter = Segmenter(
            args.speech_seconds,
            args.silence_seconds,
            args.timeout_seconds,
            args.reset_seconds,
        )
        converter = AudioChunkConverter(args.rate, args.width, args.channels)
        audio_bytes = bytes()
        is_first_audio = True
        sent_started = False
        sent_stopped = False
        last_stop_timestamp: Optional[int] = None

        while True:
            event = read_event()
            if event is None:
                break

            if AudioChunk.is_type(event.type):
                if is_first_audio:
                    _LOGGER.debug("Receiving audio")
                    is_first_audio = False

                chunk = AudioChunk.from_event(event)
                chunk = converter.convert(chunk)
                audio_bytes += chunk.audio
                timestamp = (
                    time.monotonic_ns() if chunk.timestamp is None else chunk.timestamp
                )
                last_stop_timestamp = timestamp + chunk.milliseconds

                # Handle uneven chunk sizes
                while len(audio_bytes) >= bytes_per_chunk:
                    chunk_bytes = audio_bytes[:bytes_per_chunk]
                    proc.stdin.write(chunk_bytes)
                    proc.stdin.flush()

                    line = proc.stdout.readline().decode()
                    if line:
                        speech_probability = float(line)
                        is_speech = speech_probability > args.threshold
                        segmenter.process(
                            chunk=chunk_bytes,
                            chunk_seconds=seconds_per_chunk,
                            is_speech=is_speech,
                            timestamp=timestamp,
                        )

                        if (not sent_started) and segmenter.started:
                            _LOGGER.debug("Voice started")
                            write_event(
                                VoiceStarted(
                                    timestamp=segmenter.start_timestamp
                                ).event()
                            )
                            sent_started = True

                        if (not sent_stopped) and segmenter.stopped:
                            if segmenter.timeout:
                                _LOGGER.info("Voice timeout")
                            else:
                                _LOGGER.debug("Voice stopped")

                            write_event(
                                VoiceStopped(timestamp=segmenter.stop_timestamp).event()
                            )
                            sent_stopped = True

                    audio_bytes = audio_bytes[bytes_per_chunk:]

            elif AudioStop.is_type(event.type):
                _LOGGER.debug("Audio stopped")
                if not sent_stopped:
                    write_event(VoiceStopped(timestamp=last_stop_timestamp).event())
                    sent_stopped = True

                proc.stdin.close()
                break


if __name__ == "__main__":
    main()


================================================
FILE: bin/vad_segment_wav.py
================================================
#!/usr/bin/env python3
"""Prints voice start/stop in WAV file."""
import argparse
import asyncio
import io
import json
import logging
import sys
import time
import wave
from pathlib import Path
from typing import Iterable, Optional

from rhasspy3.audio import DEFAULT_SAMPLES_PER_CHUNK, AudioChunk, AudioStart, AudioStop
from rhasspy3.core import Rhasspy
from rhasspy3.event import async_read_event, async_write_event
from rhasspy3.program import create_process
from rhasspy3.vad import DOMAIN, VoiceStarted, VoiceStopped

_FILE = Path(__file__)
_DIR = _FILE.parent
_LOGGER = logging.getLogger(_FILE.stem)


async def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-c",
        "--config",
        default=_DIR.parent / "config",
        help="Configuration directory",
    )
    parser.add_argument(
        "-p", "--pipeline", default="default", help="Name of pipeline to use"
    )
    parser.add_argument(
        "--vad-program", help="Name of vad program to use (overrides pipeline)"
    )
    parser.add_argument(
        "--samples-per-chunk",
        type=int,
        default=DEFAULT_SAMPLES_PER_CHUNK,
        help="Samples to process at a time",
    )
    parser.add_argument("wav", nargs="*", help="Path(s) to WAV file(s)")
    #
    parser.add_argument(
        "--debug", action="store_true", help="Print DEBUG messages to console"
    )
    args = parser.parse_args()
    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    rhasspy = Rhasspy.load(args.config)
    vad_program = args.vad_program
    pipeline = rhasspy.config.pipelines.get(args.pipeline)

    if not vad_program:
        assert pipeline is not None, f"No pipeline named {args.pipeline}"
        vad_program = pipeline.vad

    assert vad_program, "No vad program"

    async with (await create_process(rhasspy, DOMAIN, vad_program)) as vad_proc:
        assert vad_proc.stdin is not None
        assert vad_proc.stdout is not None

        for wav_bytes in get_wav_bytes(args):
            with io.BytesIO(wav_bytes) as wav_io:
                with wave.open(wav_io, "rb") as wav_file:
                    rate = wav_file.getframerate()
                    width = wav_file.getsampwidth()
                    channels = wav_file.getnchannels()

                    timestamp = 0
                    await async_write_event(
                        AudioStart(rate, width, channels, timestamp=timestamp).event(),
                        vad_proc.stdin,
                    )

                    audio_bytes = wav_file.readframes(args.samples_per_chunk)
                    while audio_bytes:
                        chunk = AudioChunk(
                            rate, width, channels, audio_bytes, timestamp=timestamp
                        )
                        await async_write_event(
                            chunk.event(),
                            vad_proc.stdin,
                        )
                        timestamp += chunk.milliseconds
                        audio_bytes = wav_file.readframes(args.samples_per_chunk)

                    await async_write_event(
                        AudioStop(timestamp=timestamp).event(), vad_proc.stdin
                    )

            voice_started: Optional[VoiceStarted] = None
            voice_stopped: Optional[VoiceStopped] = None
            while True:
                event = await async_read_event(vad_proc.stdout)
                if event is None:
                    break

                if VoiceStarted.is_type(event.type):
                    voice_started = VoiceStarted.from_event(event)
                    if voice_started.timestamp is None:
                        voice_started.timestamp = time.monotonic_ns()

                    _LOGGER.debug(voice_started)
                    json.dump(
                        voice_started.event().to_dict(), sys.stdout, ensure_ascii=False
                    )
                    print("", flush=True)
                elif VoiceStopped.is_type(event.type):
                    voice_stopped = VoiceStopped.from_event(event)
                    if voice_stopped.timestamp is None:
                        voice_stopped.timestamp = time.monotonic_ns()

                    _LOGGER.debug(voice_stopped)
                    json.dump(
                        voice_stopped.event().to_dict(), sys.stdout, ensure_ascii=False
                    )
                    print("", flush=True)
                    break


def get_wav_bytes(args: argparse.Namespace) -> Iterable[bytes]:
    """Yields WAV audio from stdin or args."""
    if args.wav:
        # WAV file path(s)
        for wav_path in args.wav:
            with open(wav_path, "rb") as wav_file:
                yield wav_file.read()
    else:
        # WAV on stdin
        yield sys.stdin.buffer.read()


if __name__ == "__main__":
    asyncio.run(main())


================================================
FILE: bin/wake_adapter_raw.py
================================================
#!/usr/bin/env python3
"""Wake word detection with a command that accepts raw PCM audio and prints a line for each detection."""
import argparse
import logging
import shlex
import subprocess
import threading
import time
from dataclasses import dataclass
from pathlib import Path
from typing import IO

from rhasspy3.audio import AudioChunk, AudioStop
from rhasspy3.event import read_event, write_event
from rhasspy3.wake import Detection, NotDetected

_FILE = Path(__file__)
_DIR = _FILE.parent
_LOGGER = logging.getLogger(_FILE.stem)


@dataclass
class State:
    timestamp: int = 0
    detected: bool = False


def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "command",
        help="Command to run",
    )
    parser.add_argument(
        "--debug", action="store_true", help="Print DEBUG messages to console"
    )
    args = parser.parse_args()
    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    command = shlex.split(args.command)
    with subprocess.Popen(
        command, stdin=subprocess.PIPE, stdout=subprocess.PIPE
    ) as proc:
        assert proc.stdin is not None
        assert proc.stdout is not None

        state = State()
        threading.Thread(target=write_proc, args=(proc.stdout, state)).start()

        while not state.detected:
            event = read_event()
            if event is None:
                break

            if AudioChunk.is_type(event.type):
                chunk = AudioChunk.from_event(event)
                state.timestamp = (
                    chunk.timestamp
                    if chunk.timestamp is not None
                    else time.monotonic_ns()
                )
                proc.stdin.write(chunk.audio)
                proc.stdin.flush()
            elif AudioStop.is_type(event.type):
                proc.stdin.close()
                break

    if not state.detected:
        write_event(NotDetected().event())


def write_proc(reader: IO[bytes], state: State):
    try:
        for line in reader:
            line = line.strip()
            if line:
                write_event(
                    Detection(name=line.decode(), timestamp=state.timestamp).event()
                )
                state.detected = True
                break
    except Exception:
        _LOGGER.exception("Unexpected error in write thread")


if __name__ == "__main__":
    main()


================================================
FILE: bin/wake_detect.py
================================================
#!/usr/bin/env python3
"""Wait for wake word to be detected."""
import argparse
import asyncio
import json
import logging
import sys
from pathlib import Path

from rhasspy3.core import Rhasspy
from rhasspy3.mic import DOMAIN as MIC_DOMAIN
from rhasspy3.program import create_process
from rhasspy3.wake import detect

_FILE = Path(__file__)
_DIR = _FILE.parent
_LOGGER = logging.getLogger(_FILE.stem)


async def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-c",
        "--config",
        default=_DIR.parent / "config",
        help="Configuration directory",
    )
    parser.add_argument(
        "-p", "--pipeline", default="default", help="Name of pipeline to use"
    )
    parser.add_argument(
        "--mic-program", help="Name of mic program to use (overrides pipeline)"
    )
    parser.add_argument(
        "--wake-program", help="Name of wake program to use (overrides pipeline)"
    )
    #
    parser.add_argument("--loop", action="store_true", help="Keep detecting wake words")
    parser.add_argument(
        "--debug", action="store_true", help="Print DEBUG messages to console"
    )
    args = parser.parse_args()
    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    rhasspy = Rhasspy.load(args.config)
    mic_program = args.mic_program
    wake_program = args.wake_program
    pipeline = rhasspy.config.pipelines.get(args.pipeline)

    if not mic_program:
        assert pipeline is not None, f"No pipeline named {args.pipeline}"
        mic_program = pipeline.mic

    assert mic_program, "No mic program"
    _LOGGER.debug("mic program: %s", mic_program)

    if not wake_program:
        assert pipeline is not None, f"No pipeline named {args.pipeline}"
        wake_program = pipeline.wake

    assert wake_program, "No wake program"
    _LOGGER.debug("wake program: %s", wake_program)

    # Detect wake word
    while True:
        async with (await create_process(rhasspy, MIC_DOMAIN, mic_program)) as mic_proc:
            assert mic_proc.stdout is not None
            _LOGGER.debug("Detecting wake word")
            detection = await detect(rhasspy, wake_program, mic_proc.stdout)
            if detection is not None:
                json.dump(detection.event().to_dict(), sys.stdout, ensure_ascii=False)
                print("", flush=True)

        if not args.loop:
            break


if __name__ == "__main__":
    try:
        asyncio.run(main())
    except KeyboardInterrupt:
        pass


================================================
FILE: docs/README.md
================================================
# Rhasspy 3

* [Tutorial](tutorial.md)
* [Domains](domains.md)
* [Wyoming Protcol](wyoming.md)
* [Adapters](adapters.md)


================================================
FILE: docs/adapters.md
================================================
# Adapters

Scripts in `bin/`:

* `asr_adapter_raw2text.py`
    * Raw audio stream in, text or JSON out
* `asr_adapter_wav2text.py`
    * WAV file(s) in, text or JSON out (per file)
* `handle_adapter_json.py`
    * Intent JSON in, text response out
* `handle_adapter_text.py`
    * Transcription in, text response out
* `mic_adapter_raw.py`
    * Raw audio stream in
* `snd_adapter_raw.py`
    * Raw audio stream out
* `tts_adapter_http.py`
    * HTTP POST to endpoint with text, WAV out
* `tts_adapter_text2wav.py`
    * Text in, WAV out
* `vad_adapter_raw.py`
    * Raw audio stream in, speech probability out (one line per chunk)
* `wake_adapter_raw.py`
    * Raw audio stream in, name of detected model out (one line per detection)
* `client_unix_socket.py`
    * Send/receive events over Unix domain socket


![Wyoming protocol adapter](img/adapter.png)


================================================
FILE: docs/domains.md
================================================
# Domains

Programs belong to a specific domain. This defines the kinds of [events](wyoming.md) they are expected to receive and emit.

## mic

Emits `audio-chunk` events, ideally with a `timestamp`.


## wake

Receives `audio-chunk` events.
Emits `detection` event(s) or a `not-detected` event if the program exits without a detection.


## asr

Receives an `audio-start` event, followed by zero or more `audio-chunk` events.

An `audio-stop` event must trigger a `transcript` event to be emitted.


## vad

Receives `audio-chunk` events.

Emits `voice-started` with the `timestamp` of the `audio-chunk` when the user started speaking.

Emits `voice-stopped` with the `timestamp` of the `audio-chunk` when the user finished speaking.


## intent

Optional. The `handle` domain can handle `transcript` events directly.

Receives `recognize` events.

Emits either an `intent` or a `not-recognized` event.


## handle

Receives one of the following event types: `transcript`, `intent`, or `not-recognized`.

Emits either a `handle` or `not-handled` event.


## tts

Receives a `synthesize` event.

Emits an `audio-start` event followed by zero or more `audio-chunk` events, and then an `audio-stop` event.


## snd

Receives `audio-chunk` events until an `audio-stop` event.

Must emit `played` event when audio has finished playing.


================================================
FILE: docs/home_assistant.md
================================================
# Home Assistant

This will connect Rhasspy to Home Assistant via [Assist](https://www.home-assistant.io/docs/assist).

Install the Home Assistant intent handler:

```sh
mkdir -p config/programs/handle/
cp -R programs/handle/home_assistant config/programs/handle/
```

Create a long-lived access token in Home Assistant (inside your profile):

![Long-lived access token](img/ha_token.png)

Copy the **entire** access token (with CTRL+A, not just selecting what you can see) and put it in the data directory:

```sh
mkdir -p config/data/handle/home_assistant/
echo "MY-LONG-LIVED-ACCESS-TOKEN" > config/data/handle/home_assistant/token
```

Add to your `configuration.yaml`:


```yaml
programs:
  handle:
    home_assistant:
      command: |
        bin/converse.py --language "${language}" "${url}" "${token_file}"
      adapter: |
        handle_adapter_text.py
      template_args:
        url: "http://localhost:8123/api/conversation/process"
        token_file: "${data_dir}/token"
        language: "en"

pipelines:
  default:
    mic: ...
    vad: ...
    asr: ...
    wake: ...
    handle:
      name: home_assistant
    tts: ...
    snd: ...
```

Make sure your Home Assistant server is running, and test out a command:

```sh
script/run bin/handle_text.py "Turn on the bed light"
```

Replace "bed light" with the name of a device you have connected to Home Assistant.

If successful, you should see JSON printed with the response text, like:

```sh
{"type": "handled", "data": {"text": "Turned on light"}}
```

This also works over HTTP:

```sh
curl -X POST --data 'Turn on the bed light' 'localhost:13331/handle/handle'
```

Now you can run your full pipeline and control Home Assistant!


================================================
FILE: docs/satellite.md
================================================
# Satellite

Once you have a Rhasspy HTTP server running, you can use Rhasspy as a satellite on a separate device.

**NOTE:** Rhasspy satellites do not need to run Python or any Rhasspy software. They can use the websocket API directly, or talk directly to a running pipeline.

On your satellite, clone the repo:

```sh
git clone https://github.com/rhasspy/rhasspy3
cd rhasspy3
```

Install the websocket utility:

```sh
mkdir -p config/programs/remote/
cp -R programs/remote/websocket config/programs/remote/
config/programs/remote/websocket/script/setup
```

Install [Porcupine](https://github.com/Picovoice/porcupine):

```sh
mkdir -p config/programs/wake/
cp -R programs/wake/porcupine1 config/programs/wake/
config/programs/wake/porcupine1/script/setup
```

Check available wake word models by running 

```sh
config/programs/wake/porcupine1/script/list_models
```

and choose one. We'll use "porcupine_linux.ppn" as an example, but this will be **different on a Raspberry Pi**.

Next, create `config/configuration.yaml` with:

```yaml
programs:
  mic:
    arecord:
      command: |
        arecord -q -r 16000 -c 1 -f S16_LE -t raw -
      adapter: |
        mic_adapter_raw.py --samples-per-chunk 1024 --rate 16000 --width 2 --channels 1

  wake:
    porcupine1:
      command: |
        .venv/bin/python3 bin/porcupine_stream.py --model "${model}"
      template_args:
        model: "porcupine_linux.ppn"

  remote:
    websocket:
      command: |
        script/run "${uri}"
      template_args:
        uri: "ws://localhost:13331/pipeline/asr-tts"

satellites:
  default:
    mic:
      name: arecord
    wake:
      name: porcupine1
    remote:
      name: websocket
    snd:
      name: aplay
```

Replace the model in `porcupine1` with your selection, and adjust the URI in `websocket` to point to your Rhasspy server.

Now you can run your satellite:

```sh
script/run bin/satellite_run.py --debug --loop
```

(say "porcupine", *pause*, say voice command, *wait*)

If everything is working, you should hear a response being spoken. Press CTRL+C to quit.


================================================
FILE: docs/tutorial.md
================================================
# Tutorial

Welcome to Rhasspy 3! This is a developer preview, so many of the manual steps here will be replaced with something more user-friendly in the future.


## Installing Rhasspy 3

To get started, just clone the repo. Rhasspy's core does not currently have any dependencies outside the Python standard library.

```sh
git clone https://github.com/rhasspy/rhasspy3
cd rhasspy3
```


## Layout

Installed programs and downloaded models are stored in the `config` directory, which is empty by default:

* `rhasspy3/config/`
    * `configuration.yaml` - overrides `rhasspy3/configuration.yaml`
    * `programs/` - installed programs
        * `<domain>/`
            * `<name>/`
    * `data/` - downloaded models
        * `<domain>/`
            * `<name>/`
            
Programs in Rhasspy are divided into [domains](domains.md).


## Configuration

Rhasspy loads two configuration files:

1. `rhasspy3/configuration.yaml` (base)
2. `config/configuration.yaml` (user)

The file in `config` will override the base configuration. You can see what the final configuration looks like with:

```sh
script/run bin/config_print.py
```


## Microphone

Programs that were not designed for Rhasspy can be used with [adapters](adapters.md).
For example, add the following to your `configuration.yaml` (in the `config` directory):

```yaml
programs:
  mic:
    arecord:
      command: |
        arecord -q -r 16000 -c 1 -f S16_LE -t raw -
      adapter: |
        mic_adapter_raw.py --rate 16000 --width 2 --channels 1

pipelines:
  default:
    mic:
      name: arecord
```

Now you can run a microphone test:

```sh
script/run bin/mic_test_energy.py
```

When speaking, you should see the bar change with volume. If not, check the available devices with `arecord -L` and update the `arecord` command in `configuration.yaml` with `-D <device_name>` (prefer devices that start with `plughw:`).

Press CTRL+C to quit.

Pipelines will be discussed later. For now, know that the pipeline named `default` will be run if you don't specify one. The mic test script can do this:

```sh
script/run bin/mic_test_energy.py --pipeline my-pipeline
```

You can also override the mic program:

```sh
script/run bin/mic_test_energy.py --mic-program other-program-from-config
```


## Voice Activity Detection

Let's install our first program, [Silero VAD](https://github.com/snakers4/silero-vad/).
Start by copying from `programs/` to `config/programs`, then run the setup script:

```sh
mkdir -p config/programs/vad/
cp -R programs/vad/silero config/programs/vad/
config/programs/vad/silero/script/setup
```

Once the setup script completes, add the following to your `configuration.yaml`:

```yaml
programs:
  mic: ...
  vad:
    silero:
      command: |
        script/speech_prob "share/silero_vad.onnx"
      adapter: |
        vad_adapter_raw.py --rate 16000 --width 2 --channels 1 --samples-per-chunk 512

pipelines:
  default:
    mic: ...
    vad:
      name: silero
```


This calls a command inside `config/programs/vad/silero` and uses an adapter. Notice that the command's working directory will always be `config/programs/<domain>/<name>`.

You can test out the voice activity detection (VAD) by recording an audio sample:

```sh
script/run bin/mic_record_sample.py sample.wav
```

Say something for a few seconds and then wait for the program to finish. Afterwards, listen to `sample.wav` and verify that it sounds correct. You may need to adjust microphone settings with `alsamixer`


## Speech to Text

Now for the fun part! We'll be installing [faster-whisper](https://github.com/guillaumekln/faster-whisper/), an optimized version of Open AI's [Whisper](https://github.com/openai/whisper) model.


```sh
mkdir -p config/programs/asr/
cp -R programs/asr/faster-whisper config/programs/asr/
config/programs/asr/faster-whisper/script/setup
```

Before using faster-whisper, we need to download a model:

```sh
config/programs/asr/faster-whisper/script/download.py tiny-int8
```

Notice that the model was downloaded to `config/data/asr/faster-whisper`:

```sh
find config/data/asr/faster-whisper/
config/data/asr/faster-whisper/
config/data/asr/faster-whisper/tiny-int8
config/data/asr/faster-whisper/tiny-int8/vocabulary.txt
config/data/asr/faster-whisper/tiny-int8/model.bin
config/data/asr/faster-whisper/tiny-int8/config.json
```

The `tiny-int8` model is the smallest and fastest model, but may not give the best transcriptions. Run `download.py` without any arguments to see the available models, or follow [the instructions](https://github.com/guillaumekln/faster-whisper/#model-conversion) to make your own!

Add the following to `configuration.yaml`:

```yaml
programs:
  mic: ...
  vad: ...
  asr:
    faster-whisper:
      command: |
        script/wav2text "${data_dir}/tiny-int8" "{wav_file}"
      adapter: |
        asr_adapter_wav2text.py

pipelines:
  default:
    mic: ...
    vad: ...
    asr:
      name: faster-whisper
```

You can now transcribe a voice command:

```sh
script/run bin/asr_transcribe.py
```

(say something)

You should see a transcription of what you said as part of an [event](wyoming.md).

### Client/Server

Speech to text systems can take a while to load their models, so a lot of time is wasted if we start from scratch each time.

Some speech to text and text to speech programs have included servers. These usually use [Unix domain sockets](https://en.wikipedia.org/wiki/Unix_domain_socket) to communicate with a small client program.

Add the following to your `configuration.yaml`:


```yaml
programs:
  mic: ...
  vad: ...
  asr:
    faster-whisper: ...
    faster-whisper.client:
      command: |
        client_unix_socket.py var/run/faster-whisper.socket

servers:
  asr:
    faster-whisper:
      command: |
        script/server --language "en" "${data_dir}/tiny-int8"

pipelines:
  default:
    mic: ...
    vad: ...
    asr:
      name: faster-whisper.client
```

Start the server in a separate terminal:

```sh
script/run bin/server_run.py asr faster-whisper
```

When it prints "Ready", transcribe yourself speaking again:

```sh
script/run bin/asr_transcribe.py
```

(say something)

You should receive your transcription a bit faster than before.


### HTTP Server

Rhasspy includes a small HTTP server that allows you to access programs and pipelines over a web API. To get started, run the setup script:

```sh
script/setup_http_server
```

Run HTTP server in a separate terminal:

```sh
script/http_server --debug
```

Now you can transcribe a WAV file over HTTP:

```sh
curl -X POST -H 'Content-Type: audio/wav' --data-binary @etc/what_time_is_it.wav 'localhost:13331/asr/transcribe'
```

You can run one or more program servers along with the HTTP server:

```sh
script/http_server --debug --server asr faster-whisper
```

**NOTE:** You will need to restart the HTTP server when you change `configuration.yaml`


## Wake Word Detection

Next, we'll install [Porcupine](https://github.com/Picovoice/porcupine):

```sh
mkdir -p config/programs/wake/
cp -R programs/wake/porcupine1 config/programs/wake/
config/programs/wake/porcupine1/script/setup
```

Check available wake word models with:

```sh
config/programs/wake/porcupine1/script/list_models
alexa_linux.ppn
americano_linux.ppn
blueberry_linux.ppn
bumblebee_linux.ppn
computer_linux.ppn
grapefruit_linux.ppn
grasshopper_linux.ppn
hey google_linux.ppn
hey siri_linux.ppn
jarvis_linux.ppn
ok google_linux.ppn
pico clock_linux.ppn
picovoice_linux.ppn
porcupine_linux.ppn
smart mirror_linux.ppn
snowboy_linux.ppn
terminator_linux.ppn
view glass_linux.ppn
```

**NOTE:** These will be slightly different on a Raspberry Pi (`_raspberry-pi.ppn` instead of `_linux.ppn`).

Add to `configuration.yaml`:

```yaml
programs:
  mic: ...
  vad: ...
  asr: ...
  wake:
    porcupine1:
      command: |
        .venv/bin/python3 bin/porcupine_stream.py --model "${model}"
      template_args:
        model: "porcupine_linux.ppn"

servers:
  asr: ...

pipelines:
  default:
    mic: ...
    vad: ...
    asr: ...
    wake:
      name: porcupine1
```

Notice that we include `template_args` in the `programs` section. This lets us change specific settings in `pipelines`, which will be demonstrated in a moment.

Test wake word detection:

```sh
script/run bin/wake_detect.py --debug
```

(say "porcupine")

Now change the model in `configuration.yaml`:

```yaml
programs:
  mic: ...
  vad: ...
  asr: ...
  wake: ...

servers:
  asr: ...

pipelines:
  default:
    mic: ...
    vad: ...
    asr: ...
    wake:
      name: porcupine1
      template_args:
        model: "grasshopper_linux.ppn"
```

Test wake word detection again:

```sh
script/run bin/wake_detect.py --debug
```

(say "grasshopper")

For non-English models, first download the extra data files:

```sh
config/programs/wake/porcupine1/script/download.py
```

Next, adjust your `configuration.yaml`. For example, this uses the German keyword "ananas":

```yaml
programs:
  wake:
    porcupine1:
      command: |
        .venv/bin/python3 bin/porcupine_stream.py --model "${model}" --lang_model "${lang_model}"
      template_args:
        model: "${data_dir}/resources/keyword_files_de/linux/ananas_linux.ppn"
        lang_model: "${data_dir}/lib/common/porcupine_params_de.pv"

```

Inspect the files in `config/data/wake/porcupine1` for supported languages and keywords. At this time, English, German (de), French (fr), and Spanish (es) are available with keywords for `linux`, `raspberry-pi`, and many other platforms.

Going back to "grasshopper", we can test over HTTP server (restart server):

```sh
curl -X POST 'localhost:13331/pipeline/run?stop_after=wake'
```

(say "grasshopper")

Test full voice command:

```sh
curl -X POST 'localhost:13331/pipeline/run?stop_after=asr'
```

(say "grasshopper", *pause*, voice command, *wait*)



## Intent Handling

There are two types of intent handlers in Rhasspy, ones that handle transcripts directly (text) and others that handle structured intents (name + entities). For this example, we will be handling text directly from `asr`.

In `configuration.yaml`:

```yaml
programs:
  mic: ...
  vad: ...
  asr: ...
  wake: ...
  handle:
    date_time:
      command: |
        bin/date_time.py
      adapter: |
        handle_adapter_text.py

servers:
  asr: ...

pipelines:
  default:
    mic: ...
    vad: ...
    asr: ...
    wake: ...
    handle:
      name: date_time

```

Install date time demo script:

```sh
mkdir -p config/programs/handle/
cp -R programs/handle/date_time config/programs/handle/
```

This script just looks for the words "date" and "time" in the text, and responds appropriately.

You can test it on some text:

```sh
echo 'What time is it?' | script/run bin/handle_text.py --debug
```

Now let's test it with a full voice command:

```sh
script/run bin/pipeline_run.py --debug --stop-after handle
```

(say "grasshopper", *pause*, "what time is it?")

It works too over HTTP (restart server):

```sh
curl -X POST 'localhost:13331/pipeline/run?stop_after=handle'
```

(say "grasshopper", *pause*, "what's the date?")


## Text to Speech and Sound

The final stages of our pipeline will be text to speech (`tts`) and audio output (`snd`).

Install [Piper](https://github.com/rhasspy/piper):

```sh
mkdir -p config/programs/tts/
cp -R programs/tts/piper config/programs/tts/
config/programs/tts/piper/script/setup.py
```

and download an English voice:

```sh
config/programs/tts/piper/script/download.py english
```

Call `download.py` without any arguments to see available voices.

Add to `configuration.yaml`:

```yaml
programs:
  mic: ...
  vad: ...
  asr: ...
  wake: ...
  handle: ...
  tts:
    piper:
      command: |
        bin/piper --model "${model}" --output_file -
      adapter: |
        tts_adapter_text2wav.py
      template_args:
        model: "${data_dir}/en-us-blizzard_lessac-medium.onnx"
  snd:
    aplay:
      command: |
        aplay -q -r 22050 -f S16_LE -c 1 -t raw
      adapter: |
        snd_adapter_raw.py --rate 22050 --width 2 --channels 1

servers:
  asr: ...

pipelines:
  default:
    mic: ...
    vad: ...
    asr: ...
    wake: ...
    handle: ...
    tts:
      name: piper
    snd:
      name: aplay
```


We can test the text to speech and audio output programs:

```sh
script/run bin/tts_speak.py 'Welcome to the world of speech synthesis.'
```

The `bin/tts_synthesize.py` can be used if you want to just output a WAV file.

```sh
script/run bin/tts_synthesize.py 'Welcome to the world of speech synthesis.' > welcome.wav
```

This also works over HTTP (restart server):

```sh
curl -X POST \
  --data 'Welcome to the world of speech synthesis.' \
  --output welcome.wav \
  'localhost:13331/tts/synthesize'
```

Or to speak over HTTP:

```sh
curl -X POST --data 'Welcome to the world of speech synthesis.' 'localhost:13331/tts/speak'
```


### Client/Server

Like speech to text, text to speech models can take a while to load. Let's add a server for Piper to `configuration.yaml`:

```yaml
programs:
  mic: ...
  vad: ...
  asr: ...
  wake: ...
  handle: ...
  tts:
    piper.client:
      command: |
        client_unix_socket.py var/run/piper.socket
  snd: ...

servers:
  asr: ...
  tts:
    piper:
      command: |
        script/server "${model}"
      template_args:
        model: "${data_dir}/en-us-blizzard_lessac-medium.onnx"

pipelines:
  default:
    mic: ...
    vad: ...
    asr: ...
    wake: ...
    handle: ...
    tts:
      name: piper.client
    snd: ...
```

Now we can run both servers with the HTTP server:

```sh
script/http_server --debug --server asr faster-whisper --server tts piper
```

Text to speech requests should be faster now.


## Complete Pipeline

As a final example, let's run a complete pipeline from wake word detection to text to speech response:

```sh
script/run bin/pipeline_run.py --debug
```

(say "grasshopper", *pause*, "what time is it?", *wait*)

Rhasspy should speak the current time.

This also works over HTTP:

```sh
curl -X POST 'localhost:13331/pipeline/run'
```

(say "grasshopper", *pause*, "what is the date?", *wait*)

Rhasspy should speak the current date.


## Next Steps

* Connect Rhasspy to [Home Assistant](home_assistant.md)
* Run one or more [satellites](satellite.md)


================================================
FILE: docs/wyoming.md
================================================
# The Wyoming Protocol

An interprocess event protocol over stdin/stdout for Rhasspy v3.

(effectively [JSONL](https://jsonlines.org/) with an optional binary payload)

![Wyoming protocol](img/wyoming.png)


## Motivation

Rhasspy v2 was built on top of MQTT, and therefore required (1) an MQTT broker and (2) all services to talk over MQTT. Each open source voice program needed a custom service wrapper to talk to Rhasspy.

For v3, a project goal was to minimize the barrier for programs to talk to Rhasspy.


## Talking Directly to Programs

Many voice programs have similar command line interfaces. For example, most text to speech programs accept text through standard input and write a WAV file to standard output or a file:

```sh
echo “Input text” | text-to-speech > output.wav
```

A protocol based on standard input/output would be universal across languages, operating systems, etc. However, some voice programs need to consume or produce audio/event streams. For example, a speech to text system may return a result much quicker if it can process audio as it's being recorded.

## Event Streams

Standard input/output are byte streams, but they can be easily adapted to event streams that can also carry binary data. This lets us send, for example, chunks of audio to a speech to text program as well as an event to say the stream is finished. All without a broker or a socket!

Each **event** in the Wyoming protocol is:

1. A **single line** of JSON with an object:
    * **MUST** have a `type` field with an event type name
    * MAY have a `data` field with an object that contains event-specific data
    * MAY have a `payload_length` field with a number > 0
2. If `payload_length` is given, *exactly* that may bytes follows

Example:

```json
{ "type": "audio-chunk", "data": { "rate": 16000, "width", "channels": 1 }, "payload_length": 2048 }
<2048 bytes>
```


## Adapter

Using events over standard input/output unfortunately means we cannot talk to most programs directly. Fortunately, [small adapters](adapters.md) can be written and shared for programs with similar command-line interfaces. The adapter speaks events to Rhasspy, but calls the underlying program according to a common convention like “text in, WAV out”.

![Wyoming protocol adapter](img/adapter.png)

## Events Types

Voice programs vary significantly in their options, but programs within the same [domain](domains.md) have the same minimal requirements to function:

* mic
    * Audio input
    * Outputs fixed-sized chunks of PCM audio from a microphone, socket, etc.
    * Audio chunks may contain timestamps
* wake
    * Wake word detection
    * Inputs fixed-sized chunks of PCM audio
    * Outputs name of detected model, timestamp of audio chunk
* asr
    * Speech to text
    * Inputs fixed-sized chunks of PCM audio
    * Inputs an event indicating the end of the audio stream (or voice command)
    * Outputs a transcription
* vad
    * Voice activity detection
    * Inputs fixed-sized chunks of PCM audio
    * Outputs events indicating the beginning and end of a voice command
* intent
    * Intent recognition
    * Inputs text
    * Outputs an intent with a name and entities (slots)
* handle
    * Intent/text handling
    * Does something with an intent or directly with a transcription
    * Outputs a text response
* tts
    * Text to speech
    * Inputs text
    * Outputs one or more fixed-sized chunks of PCM audio
* snd
    * Audio output
    * Inputs fixed-sized chunks of PCM audio
    * Plays audio through a sound system

The following event types are currently defined:

| Domain | Type           | Data                             | Payload |
|--------|----------------|----------------------------------|---------|
| audio  | audio-start    | timestamp, rate, width, channels |         |
| audio  | audio-chunk    | timestamp, rate, width, channels | PCM     |
| audio  | audio-stop     | timestamp                        |         |
| wake   | detection      | name, timestamp                  |         |
| wake   | not-detected   |                                  |         |
| vad    | voice-started  | timestamp                        |         |
| vad    | voice-stopped  | timestamp                        |         |
| asr    | transcript     | text                             |         |
| intent | recognize      | text                             |         |
| intent | intent         | name, entities                   |         |
| intent | not-recognized | text                             |         |
| handle | handled        | text                             |         |
| handle | not-handled    | text                             |         |
| tts    | synthesize     | text                             |         |
| snd    | played         |                                  |         |


================================================
FILE: examples/satellite/configuration.yaml
================================================
satellites:
  default:
    mic:
      name: arecord
      template_args:
        device: "default"
    wake:
      name: porcupine1
      template_args:
        model: "porcupine_raspberry-pi.ppn"
    remote:
      name: websocket
      template_args:
        uri: "ws://homeassistant.local:13331/pipeline/asr-tts"
    snd:
      name: aplay
      template_args:
        device: "default"


================================================
FILE: mypy.ini
================================================
[mypy]
ignore_missing_imports = true

[mypy-setuptools.*]
ignore_missing_imports = True


================================================
FILE: programs/asr/coqui-stt/README.md
================================================
# Coqui STT

Speech to text service for Rhasspy based on [Coqui STT](https://stt.readthedocs.io/en/latest/).

Additional models can be downloaded here: https://coqui.ai/models/


## Installation

1. Copy the contents of this directory to `config/programs/asr/coqui-stt/`
2. Run `script/setup`
3. Download a model with `script/download.py`
    * Example: `script/download.py en_large`
    * Models are downloaded to `config/data/asr/coqui-stt` directory
4. Test with `script/wav2text`
    * Example `script/wav2text /path/to/english_v1.0.0-large-vocab/ /path/to/test.wav`


================================================
FILE: programs/asr/coqui-stt/bin/coqui_stt_raw2text.py
================================================
#!/usr/bin/env python3
import argparse
import logging
import sys
from pathlib import Path

import numpy as np
from stt import Model

_LOGGER = logging.getLogger("coqui_stt_raw2text")


def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument("model", help="Path to Coqui STT model directory")
    parser.add_argument(
        "--scorer", help="Path to scorer (default: .scorer file in model directory)"
    )
    parser.add_argument(
        "--alpha-beta",
        type=float,
        nargs=2,
        metavar=("alpha", "beta"),
        help="Scorer alpha/beta",
    )
    parser.add_argument(
        "--samples-per-chunk",
        type=int,
        default=1024,
        help="Number of samples to process at a time",
    )
    parser.add_argument("--debug", action="store_true", help="Log DEBUG messages")
    args = parser.parse_args()

    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    model_dir = Path(args.model)
    model_path = next(model_dir.glob("*.tflite"))
    if args.scorer:
        scorer_path = Path(args.scorer)
    else:
        scorer_path = next(model_dir.glob("*.scorer"))

    _LOGGER.debug("Loading model: %s, scorer: %s", model_path, scorer_path)
    model = Model(str(model_path))
    model.enableExternalScorer(str(scorer_path))

    if args.alpha_beta is not None:
        model.setScorerAlphaBeta(*args.alpha_beta)

    model_stream = model.createStream()
    chunk = sys.stdin.buffer.read(args.samples_per_chunk)
    _LOGGER.debug("Processing audio")
    while chunk:
        chunk_array = np.frombuffer(chunk, dtype=np.int16)
        model_stream.feedAudioContent(chunk_array)
        chunk = sys.stdin.buffer.read(args.samples_per_chunk)

    text = model_stream.finishStream()
    _LOGGER.debug(text)

    print(text.strip())


# -----------------------------------------------------------------------------

if __name__ == "__main__":
    main()


================================================
FILE: programs/asr/coqui-stt/bin/coqui_stt_server.py
================================================
#!/usr/bin/env python3
import argparse
import json
import logging
import os
import socket
import threading
from pathlib import Path

import numpy as np
from stt import Model

_LOGGER = logging.getLogger("coqui_stt_server")


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("model", help="Path to Coqui STT model directory")
    parser.add_argument(
        "--scorer", help="Path to scorer (default: .scorer file in model directory)"
    )
    parser.add_argument(
        "--alpha-beta",
        type=float,
        nargs=2,
        metavar=("alpha", "beta"),
        help="Scorer alpha/beta",
    )
    parser.add_argument(
        "--socketfile", required=True, help="Path to Unix domain socket file"
    )
    parser.add_argument(
        "-r",
        "--rate",
        type=int,
        default=16000,
        help="Input audio sample rate (default: 16000)",
    )
    parser.add_argument("--debug", action="store_true", help="Log DEBUG messages")
    args = parser.parse_args()

    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    # Need to unlink socket if it exists
    try:
        os.unlink(args.socketfile)
    except OSError:
        pass

    try:
        # Create socket server
        sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
        sock.bind(args.socketfile)
        sock.listen()

        model_dir = Path(args.model)
        model_path = next(model_dir.glob("*.tflite"))
        if args.scorer:
            scorer_path = Path(args.scorer)
        else:
            scorer_path = next(model_dir.glob("*.scorer"))

        _LOGGER.debug("Loading model: %s, scorer: %s", model_path, scorer_path)
        model = Model(str(model_path))
        model.enableExternalScorer(str(scorer_path))

        if args.alpha_beta is not None:
            model.setScorerAlphaBeta(*args.alpha_beta)

        _LOGGER.info("Ready")

        # Listen for connections
        while True:
            try:
                connection, client_address = sock.accept()
                _LOGGER.debug("Connection from %s", client_address)

                # Start new thread for client
                threading.Thread(
                    target=handle_client,
                    args=(connection, model, args.rate),
                    daemon=True,
                ).start()
            except KeyboardInterrupt:
                break
            except Exception:
                _LOGGER.exception("Error communicating with socket client")
    finally:
        os.unlink(args.socketfile)


def handle_client(connection: socket.socket, model: Model, rate: int) -> None:
    try:
        model_stream = model.createStream()
        is_first_audio = True

        with connection, connection.makefile(mode="rwb") as conn_file:
            while True:
                event_info = json.loads(conn_file.readline())
                event_type = event_info["type"]

                if event_type == "audio-chunk":
                    if is_first_audio:
                        _LOGGER.debug("Receiving audio")
                        is_first_audio = False

                    num_bytes = event_info["payload_length"]
                    chunk = conn_file.read(num_bytes)
                    chunk_array = np.frombuffer(chunk, dtype=np.int16)
                    model_stream.feedAudioContent(chunk_array)
                elif event_type == "audio-stop":
                    _LOGGER.info("Audio stopped")

                    text = model_stream.finishStream()
                    transcript_str = (
                        json.dumps(
                            {"type": "transcript", "data": {"text": text}},
                            ensure_ascii=False,
                        )
                        + "\n"
                    )
                    conn_file.write(transcript_str.encode())
                    break
    except Exception:
        _LOGGER.exception("Unexpected error in client thread")


# -----------------------------------------------------------------------------

if __name__ == "__main__":
    main()


================================================
FILE: programs/asr/coqui-stt/bin/coqui_stt_wav2text.py
================================================
#!/usr/bin/env python3
import argparse
import logging
import wave
from pathlib import Path

import numpy as np
from stt import Model

_LOGGER = logging.getLogger("coqui_stt_wav2text")


def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument("model", help="Path to Coqui STT model directory")
    parser.add_argument("wav_file", nargs="+", help="Path to WAV file(s) to transcribe")
    parser.add_argument(
        "--scorer", help="Path to scorer (default: .scorer file in model directory)"
    )
    parser.add_argument(
        "--alpha-beta",
        type=float,
        nargs=2,
        metavar=("alpha", "beta"),
        help="Scorer alpha/beta",
    )
    parser.add_argument("--debug", action="store_true", help="Log DEBUG messages")
    args = parser.parse_args()

    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    model_dir = Path(args.model)
    model_path = next(model_dir.glob("*.tflite"))
    if args.scorer:
        scorer_path = Path(args.scorer)
    else:
        scorer_path = next(model_dir.glob("*.scorer"))

    _LOGGER.debug("Loading model: %s, scorer: %s", model_path, scorer_path)
    model = Model(str(model_path))
    model.enableExternalScorer(str(scorer_path))

    if args.alpha_beta is not None:
        model.setScorerAlphaBeta(*args.alpha_beta)

    for wav_path in args.wav_file:
        _LOGGER.debug("Processing %s", wav_path)
        wav_file: wave.Wave_read = wave.open(wav_path, "rb")
        with wav_file:
            assert wav_file.getframerate() == 16000, "16Khz sample rate required"
            assert wav_file.getsampwidth() == 2, "16-bit samples required"
            assert wav_file.getnchannels() == 1, "Mono audio required"
            audio_bytes = wav_file.readframes(wav_file.getnframes())

            model_stream = model.createStream()
            audio_array = np.frombuffer(audio_bytes, dtype=np.int16)
            model_stream.feedAudioContent(audio_array)

            text = model_stream.finishStream()
            print(text.strip())


# -----------------------------------------------------------------------------

if __name__ == "__main__":
    main()


================================================
FILE: programs/asr/coqui-stt/requirements.txt
================================================
stt>=1.4.0,<2.0
numpy


================================================
FILE: programs/asr/coqui-stt/script/download.py
================================================
#!/usr/bin/env python3
import argparse
import itertools
import logging
import tarfile
from pathlib import Path
from urllib.request import urlopen

_DIR = Path(__file__).parent
_LOGGER = logging.getLogger("setup")

MODELS = {"en_large": "english_v1.0.0-large-vocab"}


def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "model",
        nargs="+",
        choices=list(itertools.chain(MODELS.keys(), MODELS.values())),
        help="Coqui STT model(s) to download",
    )
    parser.add_argument(
        "--destination", help="Path to destination directory (default: share)"
    )
    parser.add_argument(
        "--link-format",
        default="https://github.com/rhasspy/models/releases/download/v1.0/asr_coqui-stt-{model}.tar.gz",
        help="Format string for download URLs",
    )
    args = parser.parse_args()
    logging.basicConfig(level=logging.INFO)

    if args.destination:
        args.destination = Path(args.destination)
    else:
        # Assume we're in programs/asr/coqui-stt/script
        data_dir = _DIR.parent.parent.parent.parent / "data"
        args.destination = data_dir / "asr" / "coqui-stt"

    args.destination.parent.mkdir(parents=True, exist_ok=True)

    for model in args.model:
        model = MODELS.get(model, model)
        url = args.link_format.format(model=model)
        _LOGGER.info("Downloading %s", url)
        with urlopen(url) as response:
            with tarfile.open(mode="r|*", fileobj=response) as tar_gz:
                _LOGGER.info("Extracting to %s", args.destination)
                tar_gz.extractall(args.destination)


if __name__ == "__main__":
    main()


================================================
FILE: programs/asr/coqui-stt/script/raw2text
================================================
#!/usr/bin/env bash
set -eo pipefail

# Directory of *this* script
this_dir="$( cd "$( dirname "$0" )" && pwd )"

# Base directory of repo
base_dir="$(realpath "${this_dir}/..")"

# Path to virtual environment
: "${venv:=${base_dir}/.venv}"

if [ -d "${venv}" ]; then
    source "${venv}/bin/activate"
fi

python3 "${base_dir}/bin/coqui_stt_raw2text.py" "$@"


================================================
FILE: programs/asr/coqui-stt/script/server
================================================
#!/usr/bin/env bash
set -eo pipefail

# Directory of *this* script
this_dir="$( cd "$( dirname "$0" )" && pwd )"

# Base directory of repo
base_dir="$(realpath "${this_dir}/..")"

# Path to virtual environment
: "${venv:=${base_dir}/.venv}"

if [ -d "${venv}" ]; then
    source "${venv}/bin/activate"
fi

socket_dir="${base_dir}/var/run"
mkdir -p "${socket_dir}"

python3 "${base_dir}/bin/coqui_stt_server.py" --socketfile "${socket_dir}/coqui-stt.socket" "$@"


================================================
FILE: programs/asr/coqui-stt/script/setup
================================================
#!/usr/bin/env bash
set -eo pipefail

# Directory of *this* script
this_dir="$( cd "$( dirname "$0" )" && pwd )"

# Base directory of repo
base_dir="$(realpath "${this_dir}/..")"

# Path to virtual environment
: "${venv:=${base_dir}/.venv}"

# Python binary to use
: "${PYTHON=python3}"

python_version="$(${PYTHON} --version)"

if [ ! -d "${venv}" ]; then
    # Create virtual environment
    echo "Creating virtual environment at ${venv} (${python_version})"
    rm -rf "${venv}"
    "${PYTHON}" -m venv "${venv}"
    source "${venv}/bin/activate"

    pip3 install --upgrade pip
    pip3 install --upgrade wheel setuptools
else
    source "${venv}/bin/activate"
fi

# Install Python dependencies
echo 'Installing Python dependencies'
pip3 install -r "${base_dir}/requirements.txt"

# -----------------------------------------------------------------------------

echo "OK"


================================================
FILE: programs/asr/coqui-stt/script/wav2text
================================================
#!/usr/bin/env bash
set -eo pipefail

# Directory of *this* script
this_dir="$( cd "$( dirname "$0" )" && pwd )"

# Base directory of repo
base_dir="$(realpath "${this_dir}/..")"

# Path to virtual environment
: "${venv:=${base_dir}/.venv}"

if [ -d "${venv}" ]; then
    source "${venv}/bin/activate"
fi

python3 "${base_dir}/bin/coqui_stt_wav2text.py" "$@"


================================================
FILE: programs/asr/faster-whisper/README.md
================================================
# Faster Whisper

Speech to text service for Rhasspy based on [faster-whisper](https://github.com/guillaumekln/faster-whisper/).

Additional models can be downloaded here: https://github.com/rhasspy/models/releases/tag/v1.0

## Installation

1. Copy the contents of this directory to `config/programs/asr/faster-whisper/`
2. Run `script/setup.py`
3. Download a model with `script/download.py`
    * Example: `script/download.py tiny-int8`
    * Models are downloaded to `config/data/asr/faster-whisper` directory
4. Test with `script/wav2text`
    * Example `script/wav2text /path/to/tiny-int8/ /path/to/test.wav`


================================================
FILE: programs/asr/faster-whisper/bin/faster_whisper_server.py
================================================
#!/usr/bin/env python3
import argparse
import io
import logging
import os
import socket
import wave
from pathlib import Path

from faster_whisper import WhisperModel

from rhasspy3.asr import Transcript
from rhasspy3.audio import AudioChunk, AudioStop
from rhasspy3.event import read_event, write_event

_FILE = Path(__file__)
_DIR = _FILE.parent
_LOGGER = logging.getLogger(_FILE.stem)


def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument("model", help="Path to faster-whisper model directory")
    parser.add_argument(
        "--socketfile", required=True, help="Path to Unix domain socket file"
    )
    parser.add_argument(
        "--device",
        default="cpu",
        help="Device to use for inference (default: cpu)",
    )
    parser.add_argument(
        "--language",
        help="Language to set for transcription",
    )
    parser.add_argument(
        "--compute-type",
        default="default",
        help="Compute type (float16, int8, etc.)",
    )
    parser.add_argument(
        "--beam-size",
        type=int,
        default=1,
    )
    parser.add_argument("--debug", action="store_true", help="Log DEBUG messages")
    args = parser.parse_args()

    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    # Need to unlink socket if it exists
    try:
        os.unlink(args.socketfile)
    except OSError:
        pass

    try:
        # Create socket server
        sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
        sock.bind(args.socketfile)
        sock.listen()

        # Load converted faster-whisper model
        model = WhisperModel(
            args.model, device=args.device, compute_type=args.compute_type
        )
        _LOGGER.info("Ready")

        # Listen for connections
        while True:
            try:
                connection, client_address = sock.accept()
                _LOGGER.debug("Connection from %s", client_address)

                is_first_audio = True
                with connection, connection.makefile(
                    mode="rwb"
                ) as conn_file, io.BytesIO() as wav_io:
                    wav_file: wave.Wave_write = wave.open(wav_io, "wb")
                    with wav_file:
                        while True:
                            event = read_event(conn_file)  # type: ignore
                            if event is None:
                                break

                            if AudioChunk.is_type(event.type):
                                chunk = AudioChunk.from_event(event)

                                if is_first_audio:
                                    _LOGGER.debug("Receiving audio")
                                    wav_file.setframerate(chunk.rate)
                                    wav_file.setsampwidth(chunk.width)
                                    wav_file.setnchannels(chunk.channels)
                                    is_first_audio = False

                                wav_file.writeframes(chunk.audio)
                            elif AudioStop.is_type(event.type):
                                _LOGGER.debug("Audio stopped")
                                break

                    wav_io.seek(0)
                    segments, _info = model.transcribe(
                        wav_io,
                        beam_size=args.beam_size,
                        language=args.language,
                    )
                    text = " ".join(segment.text for segment in segments)
                    _LOGGER.info(text)

                    write_event(Transcript(text=text).event(), conn_file)  # type: ignore
            except KeyboardInterrupt:
                break
            except Exception:
                _LOGGER.exception("Error communicating with socket client")
    finally:
        os.unlink(args.socketfile)


# -----------------------------------------------------------------------------

if __name__ == "__main__":
    main()


================================================
FILE: programs/asr/faster-whisper/bin/faster_whisper_wav2text.py
================================================
#!/usr/bin/env python3
import argparse
import logging
import time
from pathlib import Path

from faster_whisper import WhisperModel

_FILE = Path(__file__)
_DIR = _FILE.parent
_LOGGER = logging.getLogger(_FILE.stem)


def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument("model", help="Path to faster-whisper model directory")
    parser.add_argument("wav_file", nargs="+", help="Path to WAV file(s) to transcribe")
    parser.add_argument(
        "--device",
        default="cpu",
        help="Device to use for inference (default: cpu)",
    )
    parser.add_argument(
        "--language",
        help="Language to set for transcription",
    )
    parser.add_argument(
        "--compute-type",
        default="default",
        help="Compute type (float16, int8, etc.)",
    )
    parser.add_argument(
        "--beam-size",
        type=int,
        default=1,
    )
    parser.add_argument("--debug", action="store_true", help="Log DEBUG messages")
    args = parser.parse_args()

    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    # Load converted faster-whisper model
    _LOGGER.debug("Loading model: %s", args.model)
    model = WhisperModel(args.model, device=args.device, compute_type=args.compute_type)
    _LOGGER.info("Model loaded")

    for wav_path in args.wav_file:
        _LOGGER.debug("Processing %s", wav_path)
        start_time = time.monotonic_ns()
        segments, _info = model.transcribe(
            wav_path,
            beam_size=args.beam_size,
            language=args.language,
        )
        text = " ".join(segment.text for segment in segments)
        end_time = time.monotonic_ns()
        _LOGGER.debug(
            "Transcribed %s in %s second(s)", wav_path, (end_time - start_time) / 1e9
        )
        _LOGGER.debug(text)

        print(text, flush=True)


# -----------------------------------------------------------------------------

if __name__ == "__main__":
    main()


================================================
FILE: programs/asr/faster-whisper/script/download.py
================================================
#!/usr/bin/env python3
import argparse
import logging
import tarfile
from pathlib import Path
from urllib.request import urlopen

_DIR = Path(__file__).parent
_LOGGER = logging.getLogger("setup")

MODELS = [
    "tiny",
    "tiny-int8",
    "base",
    "base-int8",
    "small",
    "small-int8",
]


def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "model",
        nargs="+",
        choices=MODELS,
        help="faster-whisper model(s) to download",
    )
    parser.add_argument(
        "--destination", help="Path to destination directory (default: share)"
    )
    parser.add_argument(
        "--link-format",
        default="https://github.com/rhasspy/models/releases/download/v1.0/asr_faster-whisper-{model}.tar.gz",
        help="Format string for download URLs",
    )
    args = parser.parse_args()
    logging.basicConfig(level=logging.INFO)

    if args.destination:
        args.destination = Path(args.destination)
    else:
        # Assume we're in programs/asr/faster-whisper/script
        data_dir = _DIR.parent.parent.parent.parent / "data"
        args.destination = data_dir / "asr" / "faster-whisper"

    args.destination.parent.mkdir(parents=True, exist_ok=True)

    for model in args.model:
        url = args.link_format.format(model=model)
        _LOGGER.info("Downloading %s", url)
        with urlopen(url) as response:
            with tarfile.open(mode="r|*", fileobj=response) as tar_gz:
                _LOGGER.info("Extracting to %s", args.destination)
                tar_gz.extractall(args.destination)


if __name__ == "__main__":
    main()


================================================
FILE: programs/asr/faster-whisper/script/server
================================================
#!/usr/bin/env bash
set -eo pipefail

# Directory of *this* script
this_dir="$( cd "$( dirname "$0" )" && pwd )"

# Base directory of repo
base_dir="$(realpath "${this_dir}/..")"

# Path to virtual environment
: "${venv:=${base_dir}/.venv}"

if [ -d "${venv}" ]; then
    source "${venv}/bin/activate"
fi

socket_dir="${base_dir}/var/run"
mkdir -p "${socket_dir}"

python3 "${base_dir}/bin/faster_whisper_server.py" --socketfile "${socket_dir}/faster-whisper.socket" "$@"


================================================
FILE: programs/asr/faster-whisper/script/setup
================================================
#!/usr/bin/env bash
set -eo pipefail

# Directory of *this* script
this_dir="$( cd "$( dirname "$0" )" && pwd )"

# Base directory of repo
base_dir="$(realpath "${this_dir}/..")"

# Path to virtual environment
: "${venv:=${base_dir}/.venv}"

# Python binary to use
: "${PYTHON=python3}"

python_version="$(${PYTHON} --version)"

if [ ! -d "${venv}" ]; then
    # Create virtual environment
    echo "Creating virtual environment at ${venv} (${python_version})"
    rm -rf "${venv}"
    "${PYTHON}" -m venv "${venv}"
    source "${venv}/bin/activate"

    pip3 install --upgrade pip
    pip3 install --upgrade wheel setuptools
else
    source "${venv}/bin/activate"
fi


# Install Python dependencies
echo 'Installing Python dependencies'
pip3 install -e "${base_dir}/src"

# Install rhasspy3
rhasspy3_dir="${base_dir}/../../../.."
pip3 install -e "${rhasspy3_dir}"

# -----------------------------------------------------------------------------

echo "OK"


================================================
FILE: programs/asr/faster-whisper/script/wav2text
================================================
#!/usr/bin/env bash
set -eo pipefail

# Directory of *this* script
this_dir="$( cd "$( dirname "$0" )" && pwd )"

# Base directory of repo
base_dir="$(realpath "${this_dir}/..")"

# Path to virtual environment
: "${venv:=${base_dir}/.venv}"

if [ -d "${venv}" ]; then
    source "${venv}/bin/activate"
fi

python3 "${base_dir}/bin/faster_whisper_wav2text.py" "$@"


================================================
FILE: programs/asr/faster-whisper/src/LICENSE
================================================
MIT License

Copyright (c) 2023 Guillaume Klein

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.


================================================
FILE: programs/asr/faster-whisper/src/README.md
================================================
# Faster Whisper transcription with CTranslate2

This repository demonstrates how to implement the Whisper transcription using [CTranslate2](https://github.com/OpenNMT/CTranslate2/), which is a fast inference engine for Transformer models.

This implementation is about 4 times faster than [openai/whisper](https://github.com/openai/whisper) for the same accuracy while using less memory. The efficiency can be further improved with 8-bit quantization on both CPU and GPU.

## Installation

```bash
pip install -e .[conversion]
```

The model conversion requires the modules `transformers` and `torch` which are installed by the `[conversion]` requirement. Once a model is converted, these modules are no longer needed and the installation could be simplified to:

```bash
pip install -e .
```

## Usage

### Model conversion

A Whisper model should be first converted into the CTranslate2 format. For example the command below converts the "medium" Whisper model and saves the weights in FP16:

```bash
ct2-transformers-converter --model openai/whisper-medium --output_dir whisper-medium-ct2 --quantization float16
```

If needed, models can also be converted from the code. See the [conversion API](https://opennmt.net/CTranslate2/python/ctranslate2.converters.TransformersConverter.html).

### Transcription

```python
from faster_whisper import WhisperModel

model_path = "whisper-medium-ct2/"

# Run on GPU with FP16
model = WhisperModel(model_path, device="cuda", compute_type="float16")

# or run on GPU with INT8
# model = WhisperModel(model_path, device="cuda", compute_type="int8_float16")
# or run on CPU with INT8
# model = WhisperModel(model_path, device="cpu", compute_type="int8")

segments, info = model.transcribe("audio.mp3", beam_size=5)

print("Detected language '%s' with probability %f" % (info.language, info.language_probability))

for segment in segments:
    print("[%ds -> %ds] %s" % (segment.start, segment.end, segment.text))
```

## Comparing performance against openai/whisper

If you are comparing the performance against [openai/whisper](https://github.com/openai/whisper), you should make sure to use the same settings in both frameworks. In particular:

* In openai/whisper, `model.transcribe` uses a beam size of 1 by default. A different beam size will have an important impact on performance so make sure to use the same.
* When running on CPU, make sure to set the same number of threads. Both frameworks will read the environment variable `OMP_NUM_THREADS`, which can be set when running your script:

```bash
OMP_NUM_THREADS=4 python3 my_script.py
```


================================================
FILE: programs/asr/faster-whisper/src/faster_whisper/__init__.py
================================================
from faster_whisper.transcribe import WhisperModel


================================================
FILE: programs/asr/faster-whisper/src/faster_whisper/audio.py
================================================
import av
import numpy as np


def decode_audio(input_file, sampling_rate=16000):
    """Decodes the audio.

    Args:
      input_file: Path to the input file or a file-like object.
      sampling_rate: Resample the audio to this sample rate.

    Returns:
      A float32 Numpy array.
    """
    fifo = av.audio.fifo.AudioFifo()
    resampler = av.audio.resampler.AudioResampler(
        format="s16",
        layout="mono",
        rate=sampling_rate,
    )

    with av.open(input_file) as container:
        # Decode and resample each audio frame.
        for frame in container.decode(audio=0):
            frame.pts = None
            for new_frame in resampler.resample(frame):
                fifo.write(new_frame)

        # Flush the resampler.
        for new_frame in resampler.resample(None):
            fifo.write(new_frame)

    frame = fifo.read()

    # Convert s16 back to f32.
    return frame.to_ndarray().flatten().astype(np.float32) / 32768.0


================================================
FILE: programs/asr/faster-whisper/src/faster_whisper/feature_extractor.py
================================================
import numpy as np


# Adapted from https://github.com/huggingface/transformers/blob/main/src/transformers/models/whisper/feature_extraction_whisper.py
class FeatureExtractor:
    def __init__(
        self,
        feature_size=80,
        sampling_rate=16000,
        hop_length=160,
        chunk_length=30,
        n_fft=400,
    ):
        self.n_fft = n_fft
        self.hop_length = hop_length
        self.chunk_length = chunk_length
        self.n_samples = chunk_length * sampling_rate
        self.nb_max_frames = self.n_samples // hop_length
        self.time_per_frame = hop_length / sampling_rate
        self.sampling_rate = sampling_rate
        self.mel_filters = self.get_mel_filters(
            sampling_rate, n_fft, n_mels=feature_size
        )

    def get_mel_filters(self, sr, n_fft, n_mels=128, dtype=np.float32):
        # Initialize the weights
        n_mels = int(n_mels)
        weights = np.zeros((n_mels, int(1 + n_fft // 2)), dtype=dtype)

        # Center freqs of each FFT bin
        fftfreqs = np.fft.rfftfreq(n=n_fft, d=1.0 / sr)

        # 'Center freqs' of mel bands - uniformly spaced between limits
        min_mel = 0.0
        max_mel = 45.245640471924965

        mels = np.linspace(min_mel, max_mel, n_mels + 2)

        mels = np.asanyarray(mels)

        # Fill in the linear scale
        f_min = 0.0
        f_sp = 200.0 / 3
        freqs = f_min + f_sp * mels

        # And now the nonlinear scale
        min_log_hz = 1000.0  # beginning of log region (Hz)
        min_log_mel = (min_log_hz - f_min) / f_sp  # same (Mels)
        logstep = np.log(6.4) / 27.0  # step size for log region

        # If we have vector data, vectorize
        log_t = mels >= min_log_mel
        freqs[log_t] = min_log_hz * np.exp(logstep * (mels[log_t] - min_log_mel))

        mel_f = freqs

        fdiff = np.diff(mel_f)
        ramps = np.subtract.outer(mel_f, fftfreqs)

        for i in range(n_mels):
            # lower and upper slopes for all bins
            lower = -ramps[i] / fdiff[i]
            upper = ramps[i + 2] / fdiff[i + 1]

            # .. then intersect them with each other and zero
            weights[i] = np.maximum(0, np.minimum(lower, upper))

        # Slaney-style mel is scaled to be approx constant energy per channel
        enorm = 2.0 / (mel_f[2 : n_mels + 2] - mel_f[:n_mels])
        weights *= enorm[:, np.newaxis]

        return weights

    def fram_wave(self, waveform, center=True):
        """
        Transform a raw waveform into a list of smaller waveforms.
        The window length defines how much of the signal is
        contain in each frame (smalle waveform), while the hope length defines the step
        between the beginning of each new frame.
        Centering is done by reflecting the waveform which is first centered around
        `frame_idx * hop_length`.
        """
        frames = []
        for i in range(0, waveform.shape[0] + 1, self.hop_length):
            half_window = (self.n_fft - 1) // 2 + 1
            if center:
                start = i - half_window if i > half_window else 0
                end = (
                    i + half_window
                    if i < waveform.shape[0] - half_window
                    else waveform.shape[0]
                )

                frame = waveform[start:end]

                if start == 0:
                    padd_width = (-i + half_window, 0)
                    frame = np.pad(frame, pad_width=padd_width, mode="reflect")

                elif end == waveform.shape[0]:
                    padd_width = (0, (i - waveform.shape[0] + half_window))
                    frame = np.pad(frame, pad_width=padd_width, mode="reflect")

            else:
                frame = waveform[i : i + self.n_fft]
                frame_width = frame.shape[0]
                if frame_width < waveform.shape[0]:
                    frame = np.lib.pad(
                        frame,
                        pad_width=(0, self.n_fft - frame_width),
                        mode="constant",
                        constant_values=0,
                    )

            frames.append(frame)
        return np.stack(frames, 0)

    def stft(self, frames, window):
        """
        Calculates the complex Short-Time Fourier Transform (STFT) of the given framed signal.
        Should give the same results as `torch.stft`.
        """
        frame_size = frames.shape[1]
        fft_size = self.n_fft

        if fft_size is None:
            fft_size = frame_size

        if fft_size < frame_size:
            raise ValueError("FFT size must greater or equal the frame size")
        # number of FFT bins to store
        num_fft_bins = (fft_size >> 1) + 1

        data = np.empty((len(frames), num_fft_bins), dtype=np.complex64)
        fft_signal = np.zeros(fft_size)

        for f, frame in enumerate(frames):
            if window is not None:
                np.multiply(frame, window, out=fft_signal[:frame_size])
            else:
                fft_signal[:frame_size] = frame
            data[f] = np.fft.fft(fft_signal, axis=0)[:num_fft_bins]
        return data.T

    def __call__(self, waveform):
        """
        Compute the log-Mel spectrogram of the provided audio, gives similar results
        whisper's original torch implementation with 1e-5 tolerance.
        """
        window = np.hanning(self.n_fft + 1)[:-1]

        frames = self.fram_wave(waveform)
        stft = self.stft(frames, window=window)
        magnitudes = np.abs(stft[:, :-1]) ** 2

        filters = self.mel_filters
        mel_spec = filters @ magnitudes

        log_spec = np.log10(np.clip(mel_spec, a_min=1e-10, a_max=None))
        log_spec = np.maximum(log_spec, log_spec.max() - 8.0)
        log_spec = (log_spec + 4.0) / 4.0

        return log_spec


================================================
FILE: programs/asr/faster-whisper/src/faster_whisper/transcribe.py
================================================
import collections
import os
import zlib

import ctranslate2
import numpy as np
import tokenizers
from faster_whisper.audio import decode_audio
from faster_whisper.feature_extractor import FeatureExtractor


class Segment(collections.namedtuple("Segment", ("start", "end", "text"))):
    pass


class AudioInfo(
    collections.namedtuple("AudioInfo", ("language", "language_probability"))
):
    pass


class TranscriptionOptions(
    collections.namedtuple(
        "TranscriptionOptions",
        (
            "beam_size",
            "best_of",
            "patience",
            "log_prob_threshold",
            "no_speech_threshold",
            "compression_ratio_threshold",
            "condition_on_previous_text",
            "temperatures",
        ),
    )
):
    pass


class WhisperModel:
    def __init__(
        self,
        model_path,
        device="auto",
        compute_type="default",
        cpu_threads=0,
    ):
        """Initializes the Whisper model.

        Args:
          model_path: Path to the converted model.
          device: Device to use for computation ("cpu", "cuda", "auto").
          compute_type: Type to use for computation.
            See https://opennmt.net/CTranslate2/quantization.html.
          cpu_threads: Number of threads to use when running on CPU (4 by default).
            A non zero value overrides the OMP_NUM_THREADS environment variable.
        """
        self.model = ctranslate2.models.Whisper(
            model_path,
            device=device,
            compute_type=compute_type,
            intra_threads=cpu_threads,
        )

        self.feature_extractor = FeatureExtractor()
        self.decoder = tokenizers.decoders.ByteLevel()

        with open(os.path.join(model_path, "vocabulary.txt")) as vocab_file:
            self.ids_to_tokens = [line.rstrip("\n") for line in vocab_file]
            self.tokens_to_ids = {
                token: i for i, token in enumerate(self.ids_to_tokens)
            }

        self.eot_id = self.tokens_to_ids["<|endoftext|>"]
        self.timestamp_begin_id = self.tokens_to_ids["<|notimestamps|>"] + 1
        self.input_stride = 2
        self.time_precision = 0.02
        self.max_length = 448

    def transcribe(
        self,
        input_file,
        language=None,
        beam_size=5,
        best_of=5,
        patience=1,
        temperature=[0.0, 0.2, 0.4, 0.6, 0.8, 1.0],
        compression_ratio_threshold=2.4,
        log_prob_threshold=-1.0,
        no_speech_threshold=0.6,
        condition_on_previous_text=True,
    ):
        """Transcribes an input file.

        Arguments:
          input_file: Path to the input file or a file-like object.
          language: The language spoken in the audio. If not set, the language will be
            detected in the first 30 seconds of audio.
          beam_size: Beam size to use for decoding.
          best_of: Number of candidates when sampling with non-zero temperature.
          patience: Beam search patience factor.
          temperature: Temperature for sampling. It can be a tuple of temperatures,
            which will be successively used upon failures according to either
            `compression_ratio_threshold` or `logprob_threshold`.
          compression_ratio_threshold: If the gzip compression ratio is above this value,
            treat as failed.
          log_prob_threshold: If the average log probability over sampled tokens is
            below this value, treat as failed.
          no_speech_threshold: If the no_speech probability is higher than this value AND
            the average log probability over sampled tokens is below `logprob_threshold`,
            consider the segment as silent.
          condition_on_previous_text: If True, the previous output of the model is provided
            as a prompt for the next window; disabling may make the text inconsistent across
            windows, but the model becomes less prone to getting stuck in a failure loop,
            such as repetition looping or timestamps going out of sync.

        Returns:
          A tuple with:

            - a generator over transcribed segments
            - an instance of AudioInfo
        """
        audio = decode_audio(
            input_file, sampling_rate=self.feature_extractor.sampling_rate
        )
        features = self.feature_extractor(audio)

        if language is None:
            segment = self.get_segment(features)
            input = self.get_input(segment)
            results = self.model.detect_language(input)
            language_token, language_probability = results[0][0]
            language = language_token[2:-2]
        else:
            language_probability = 1

        options = TranscriptionOptions(
            beam_size=beam_size,
            best_of=best_of,
            patience=patience,
            log_prob_threshold=log_prob_threshold,
            no_speech_threshold=no_speech_threshold,
            compression_ratio_threshold=compression_ratio_threshold,
            condition_on_previous_text=condition_on_previous_text,
            temperatures=(
                temperature if isinstance(temperature, (list, tuple)) else [temperature]
            ),
        )

        segments = self.generate_segments(features, language, options)

        audio_info = AudioInfo(
            language=language,
            language_probability=language_probability,
        )

        return segments, audio_info

    def generate_segments(self, features, language, options):
        tokenized_segments = self.generate_tokenized_segments(
            features, language, options
        )

        for start, end, tokens in tokenized_segments:
            text = self.decode_text_tokens(tokens)
            if not text.strip():
                continue

            yield Segment(
                start=start,
                end=end,
                text=text,
            )

    def generate_tokenized_segments(self, features, language, options):
        num_frames = features.shape[-1]
        offset = 0
        all_tokens = []
        prompt_reset_since = 0

        while offset < num_frames:
            time_offset = offset * self.feature_extractor.time_per_frame
            segment = self.get_segment(features, offset)
            segment_duration = segment.shape[-1] * self.feature_extractor.time_per_frame

            previous_tokens = all_tokens[prompt_reset_since:]
            prompt = self.get_prompt(language, previous_tokens)
            result, temperature = self.generate_with_fallback(segment, prompt, options)

            if (
                result.no_speech_prob > options.no_speech_threshold
                and result.scores[0] < options.log_prob_threshold
            ):
                offset += segment.shape[-1]
                continue

            tokens = result.sequences_ids[0]

            consecutive_timestamps = [
                i
                for i in range(len(tokens))
                if i > 0
                and tokens[i] >= self.timestamp_begin_id
                and tokens[i - 1] >= self.timestamp_begin_id
            ]

            if len(consecutive_timestamps) > 0:
                last_slice = 0
                for i, current_slice in enumerate(consecutive_timestamps):
                    sliced_tokens = tokens[last_slice:current_slice]
                    start_timestamp_position = (
                        sliced_tokens[0] - self.timestamp_begin_id
                    )
                    end_timestamp_position = sliced_tokens[-1] - self.timestamp_begin_id
                    start_time = (
                        time_offset + start_timestamp_position * self.time_precision
                    )
                    end_time = (
                        time_offset + end_timestamp_position * self.time_precision
                    )

                    last_in_window = i + 1 == len(consecutive_timestamps)

                    # Include the last timestamp so that all tokens are included in a segment.
                    if last_in_window:
                        sliced_tokens.append(tokens[current_slice])

                    yield start_time, end_time, sliced_tokens
                    last_slice = current_slice

                last_timestamp_position = (
                    tokens[last_slice - 1] - self.timestamp_begin_id
                )
                offset += last_timestamp_position * self.input_stride
                all_tokens.extend(tokens[: last_slice + 1])

            else:
                duration = segment_duration
                timestamps = [
                    token for token in tokens if token >= self.timestamp_begin_id
                ]
                if len(timestamps) > 0 and timestamps[-1] != self.timestamp_begin_id:
                    last_timestamp_position = timestamps[-1] - self.timestamp_begin_id
                    duration = last_timestamp_position * self.time_precision

                yield time_offset, time_offset + duration, tokens

                offset += segment.shape[-1]
                all_tokens.extend(tokens)

            if not options.condition_on_previous_text or temperature > 0.5:
                prompt_reset_since = len(all_tokens)

    def decode_text_tokens(self, tokens):
        text_tokens = [
            self.ids_to_tokens[token] for token in tokens if token < self.eot_id
        ]

        return self.decoder.decode(text_tokens)

    def generate_with_fallback(self, segment, prompt, options):
        features = self.get_input(segment)
        result = None
        final_temperature = None

        for temperature in options.temperatures:
            if temperature > 0:
                kwargs = {
                    "beam_size": 1,
                    "num_hypotheses": options.best_of,
                    "sampling_topk": 0,
                    "sampling_temperature": temperature,
                }
            else:
                kwargs = {
                    "beam_size": options.beam_size,
                    "patience": options.patience,
                }

            final_temperature = temperature
            result = self.model.generate(
                features,
                [prompt],
                max_length=self.max_length,
                return_scores=True,
                return_no_speech_prob=True,
                **kwargs,
            )[0]

            tokens = result.sequences_ids[0]
            text = self.decode_text_tokens(tokens)
            compression_ratio = get_compression_ratio(text)

            if (
                compression_ratio <= options.compression_ratio_threshold
                and result.scores[0] >= options.log_prob_threshold
            ):
                break

        return result, final_temperature

    def get_prompt(self, language, previous_tokens):
        prompt = []

        if previous_tokens:
            prompt.append(self.tokens_to_ids["<|startofprev|>"])
            prompt.extend(previous_tokens[-(self.max_length // 2 - 1) :])

        prompt += [
            self.tokens_to_ids["<|startoftranscript|>"],
            self.tokens_to_ids["<|%s|>" % language],
            self.tokens_to_ids["<|transcribe|>"],
        ]

        return prompt

    def get_segment(self, features, offset=0):
        if offset > 0:
            features = features[:, offset:]

        num_frames = features.shape[-1]
        required_num_frames = self.feature_extractor.nb_max_frames

        if num_frames > required_num_frames:
            features = features[:, :required_num_frames]
        elif num_frames < required_num_frames:
            pad_widths = [(0, 0), (0, required_num_frames - num_frames)]
            features = np.pad(features, pad_widths)

        features = np.ascontiguousarray(features)
        return features

    def get_input(self, segment):
        segment = np.expand_dims(segment, 0)
        segment = ctranslate2.StorageView.from_array(segment)
        return segment


def get_compression_ratio(text):
    text_bytes = text.encode("utf-8")
    return len(text_bytes) / len(zlib.compress(text_bytes))


================================================
FILE: programs/asr/faster-whisper/src/requirements.conversion.txt
================================================
transformers[torch]>=4.23


================================================
FILE: programs/asr/faster-whisper/src/requirements.txt
================================================
av==10.*
ctranslate2>=3.5,<4
tokenizers==0.13.*


================================================
FILE: programs/asr/faster-whisper/src/setup.py
================================================
import os

from setuptools import find_packages, setup


def get_requirements(path):
    with open(path, encoding="utf-8") as requirements:
        return [requirement.strip() for requirement in requirements]


base_dir = os.path.dirname(os.path.abspath(__file__))
install_requires = get_requirements(os.path.join(base_dir, "requirements.txt"))
conversion_requires = get_requirements(
    os.path.join(base_dir, "requirements.conversion.txt")
)

setup(
    name="faster-whisper",
    version="0.1.0",
    description="Faster Whisper transcription with CTranslate2",
    author="Guillaume Klein",
    python_requires=">=3.7",
    install_requires=install_requires,
    extras_require={
        "conversion": conversion_requires,
    },
    packages=find_packages(),
)


================================================
FILE: programs/asr/pocketsphinx/README.md
================================================
# Pocketsphinx

Speech to text service for Rhasspy based on [Pocketsphinx](https://github.com/cmusphinx/pocketsphinx).

Additional models can be downloaded here: https://github.com/synesthesiam/voice2json-profiles

Model directories should have this layout:

* model/
    * acoustic_model/
    * dictionary.txt
    * language_model.txt
    
These correspond to the `-hmm`, `-dict`, and `-lm` decoder arguments.

## Installation

1. Copy the contents of this directory to `config/programs/asr/pocketsphinx/`
2. Run `script/setup`
3. Download a model with `script/download.py`
    * Example: `script/download.py en_cmu`
    * Models are downloaded to `config/data/asr/pocketsphinx` directory
4. Test with `script/wav2text`
    * Example `script/wav2text /path/to/en-us_pocketsphinx-cmu/ /path/to/test.wav`


================================================
FILE: programs/asr/pocketsphinx/bin/pocketsphinx_raw2text.py
================================================
#!/usr/bin/env python3
import argparse
import logging
import sys
from pathlib import Path

import pocketsphinx

_LOGGER = logging.getLogger("pocketsphinx_raw2text")


def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument("model", help="Path to Pocketsphinx model directory")
    parser.add_argument(
        "--samples-per-chunk",
        type=int,
        default=1024,
        help="Number of samples to process at a time",
    )
    parser.add_argument("--debug", action="store_true", help="Log DEBUG messages")
    args = parser.parse_args()

    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    model_dir = Path(args.model)

    _LOGGER.debug("Loading model from %s", model_dir.absolute())
    decoder_config = pocketsphinx.Decoder.default_config()
    decoder_config.set_string("-hmm", str(model_dir / "acoustic_model"))
    decoder_config.set_string("-dict", str(model_dir / "dictionary.txt"))
    decoder_config.set_string("-lm", str(model_dir / "language_model.txt"))
    decoder = pocketsphinx.Decoder(decoder_config)

    decoder.start_utt()

    chunk = sys.stdin.buffer.read(args.samples_per_chunk)
    _LOGGER.debug("Processing audio")
    while chunk:
        decoder.process_raw(chunk, False, False)
        chunk = sys.stdin.buffer.read(args.samples_per_chunk)

    decoder.end_utt()
    hyp = decoder.hyp()
    if hyp:
        text = hyp.hypstr
    else:
        text = ""

    _LOGGER.debug(text)

    print(text.strip())


# -----------------------------------------------------------------------------

if __name__ == "__main__":
    main()


================================================
FILE: programs/asr/pocketsphinx/bin/pocketsphinx_server.py
================================================
#!/usr/bin/env python3
import argparse
import json
import logging
import os
import socket
import threading
from pathlib import Path

import pocketsphinx

_LOGGER = logging.getLogger("pocketsphinx_server")


def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument("model", help="Path to Pocketsphinx model directory")
    parser.add_argument(
        "--socketfile", required=True, help="Path to Unix domain socket file"
    )
    parser.add_argument(
        "-r",
        "--rate",
        type=int,
        default=16000,
        help="Input audio sample rate (default: 16000)",
    )
    parser.add_argument("--debug", action="store_true", help="Log DEBUG messages")
    args = parser.parse_args()

    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    # Need to unlink socket if it exists
    try:
        os.unlink(args.socketfile)
    except OSError:
        pass

    try:
        # Create socket server
        sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
        sock.bind(args.socketfile)
        sock.listen()

        model_dir = Path(args.model)

        decoder_config = pocketsphinx.Decoder.default_config()
        decoder_config.set_string("-hmm", str(model_dir / "acoustic_model"))
        decoder_config.set_string("-dict", str(model_dir / "dictionary.txt"))
        decoder_config.set_string("-lm", str(model_dir / "language_model.txt"))
        decoder = pocketsphinx.Decoder(decoder_config)

        _LOGGER.info("Ready")

        # Listen for connections
        while True:
            try:
                connection, client_address = sock.accept()
                _LOGGER.debug("Connection from %s", client_address)

                # Start new thread for client
                threading.Thread(
                    target=handle_client,
                    args=(connection, decoder, args.rate),
                    daemon=True,
                ).start()
            except KeyboardInterrupt:
                break
            except Exception:
                _LOGGER.exception("Error communicating with socket client")
    finally:
        os.unlink(args.socketfile)


def handle_client(
    connection: socket.socket, decoder: pocketsphinx.Decoder, rate: int
) -> None:
    try:
        decoder.start_utt()
        is_first_audio = True

        with connection, connection.makefile(mode="rwb") as conn_file:
            while True:
                event_info = json.loads(conn_file.readline())
                event_type = event_info["type"]

                if event_type == "audio-chunk":
                    if is_first_audio:
                        _LOGGER.debug("Receiving audio")
                        is_first_audio = False

                    num_bytes = event_info["payload_length"]
                    chunk = conn_file.read(num_bytes)
                    decoder.process_raw(chunk, False, False)
                elif event_type == "audio-stop":
                    _LOGGER.info("Audio stopped")

                    decoder.end_utt()
                    hyp = decoder.hyp()
                    if hyp:
                        text = hyp.hypstr.strip()
                    else:
                        text = ""

                    transcript_str = (
                        json.dumps(
                            {"type": "transcript", "data": {"text": text}},
                            ensure_ascii=False,
                        )
                        + "\n"
                    )
                    conn_file.write(transcript_str.encode())
                    break
    except Exception:
        _LOGGER.exception("Unexpected error in client thread")


# -----------------------------------------------------------------------------

if __name__ == "__main__":
    main()


================================================
FILE: programs/asr/pocketsphinx/bin/pocketsphinx_wav2text.py
================================================
#!/usr/bin/env python3
import argparse
import logging
import wave
from pathlib import Path

import pocketsphinx

_LOGGER = logging.getLogger("pocketsphinx_wav2text")


def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument("model", help="Path to Pocketsphinx model directory")
    parser.add_argument("wav_file", nargs="+", help="Path to WAV file(s) to transcribe")
    parser.add_argument("--debug", action="store_true", help="Log DEBUG messages")
    args = parser.parse_args()

    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    model_dir = Path(args.model)

    _LOGGER.debug("Loading model from %s", model_dir.absolute())
    decoder_config = pocketsphinx.Decoder.default_config()
    decoder_config.set_string("-hmm", str(model_dir / "acoustic_model"))
    decoder_config.set_string("-dict", str(model_dir / "dictionary.txt"))
    decoder_config.set_string("-lm", str(model_dir / "language_model.txt"))
    decoder = pocketsphinx.Decoder(decoder_config)

    for wav_path in args.wav_file:
        _LOGGER.debug("Processing %s", wav_path)
        wav_file: wave.Wave_read = wave.open(wav_path, "rb")
        with wav_file:
            assert wav_file.getframerate() == 16000, "16Khz sample rate required"
            assert wav_file.getsampwidth() == 2, "16-bit samples required"
            assert wav_file.getnchannels() == 1, "Mono audio required"
            audio_bytes = wav_file.readframes(wav_file.getnframes())

            decoder.start_utt()
            decoder.process_raw(audio_bytes, False, True)
            decoder.end_utt()
            hyp = decoder.hyp()
            if hyp:
                text = hyp.hypstr
            else:
                text = ""

            print(text.strip())


# -----------------------------------------------------------------------------

if __name__ == "__main__":
    main()


================================================
FILE: programs/asr/pocketsphinx/requirements.txt
================================================
pocketsphinx @ https://github.com/synesthesiam/pocketsphinx-python/releases/download/v1.0/pocketsphinx-python.tar.gz


================================================
FILE: programs/asr/pocketsphinx/script/download.py
================================================
#!/usr/bin/env python3
import argparse
import itertools
import logging
import tarfile
from pathlib import Path
from urllib.request import urlopen

_DIR = Path(__file__).parent
_LOGGER = logging.getLogger("setup")

MODELS = {"en_cmu": "en-us_pocketsphinx-cmu"}


def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "model",
        nargs="+",
        choices=list(itertools.chain(MODELS.keys(), MODELS.values())),
        help="Pocketsphinx model(s) to download",
    )
    parser.add_argument("--destination", help="Path to destination directory")
    parser.add_argument(
        "--link-format",
        default="https://github.com/rhasspy/models/releases/download/v1.0/asr_pocketsphinx-{model}.tar.gz",
        help="Format string for download URLs",
    )
    args = parser.parse_args()
    logging.basicConfig(level=logging.INFO)

    if args.destination:
        args.destination = Path(args.destination)
    else:
        # Assume we're in programs/asr/pocketsphinx/script
        data_dir = _DIR.parent.parent.parent.parent / "data"
        args.destination = data_dir / "asr" / "pocketsphinx"

    args.destination.parent.mkdir(parents=True, exist_ok=True)

    for model in args.model:
        model = MODELS.get(model, model)
        url = args.link_format.format(model=model)
        _LOGGER.info("Downloading %s", url)
        with urlopen(url) as response:
            with tarfile.open(mode="r|*", fileobj=response) as tar_gz:
                _LOGGER.info("Extracting to %s", args.destination)
                tar_gz.extractall(args.destination)


if __name__ == "__main__":
    main()


================================================
FILE: programs/asr/pocketsphinx/script/raw2text
================================================
#!/usr/bin/env bash
set -eo pipefail

# Directory of *this* script
this_dir="$( cd "$( dirname "$0" )" && pwd )"

# Base directory of repo
base_dir="$(realpath "${this_dir}/..")"

# Path to virtual environment
: "${venv:=${base_dir}/.venv}"

if [ -d "${venv}" ]; then
    source "${venv}/bin/activate"
fi

python3 "${base_dir}/bin/pocketsphinx_raw2text.py" "$@"


================================================
FILE: programs/asr/pocketsphinx/script/server
================================================
#!/usr/bin/env bash
set -eo pipefail

# Directory of *this* script
this_dir="$( cd "$( dirname "$0" )" && pwd )"

# Base directory of repo
base_dir="$(realpath "${this_dir}/..")"

# Path to virtual environment
: "${venv:=${base_dir}/.venv}"

if [ -d "${venv}" ]; then
    source "${venv}/bin/activate"
fi

socket_dir="${base_dir}/var/run"
mkdir -p "${socket_dir}"

python3 "${base_dir}/bin/pocketsphinx_server.py" --socketfile "${socket_dir}/pocketsphinx.socket" "$@"


================================================
FILE: programs/asr/pocketsphinx/script/setup
================================================
#!/usr/bin/env bash
set -eo pipefail

# Directory of *this* script
this_dir="$( cd "$( dirname "$0" )" && pwd )"

# Base directory of repo
base_dir="$(realpath "${this_dir}/..")"

# Path to virtual environment
: "${venv:=${base_dir}/.venv}"

# Python binary to use
: "${PYTHON=python3}"

python_version="$(${PYTHON} --version)"

if [ ! -d "${venv}" ]; then
    # Create virtual environment
    echo "Creating virtual environment at ${venv} (${python_version})"
    rm -rf "${venv}"
    "${PYTHON}" -m venv "${venv}"
    source "${venv}/bin/activate"

    pip3 install --upgrade pip
    pip3 install --upgrade wheel setuptools
else
    source "${venv}/bin/activate"
fi

# Install Python dependencies
echo 'Installing Python dependencies'
pip3 install -r "${base_dir}/requirements.txt"

# -----------------------------------------------------------------------------

echo "OK"


================================================
FILE: programs/asr/pocketsphinx/script/wav2text
================================================
#!/usr/bin/env bash
set -eo pipefail

# Directory of *this* script
this_dir="$( cd "$( dirname "$0" )" && pwd )"

# Base directory of repo
base_dir="$(realpath "${this_dir}/..")"

# Path to virtual environment
: "${venv:=${base_dir}/.venv}"

if [ -d "${venv}" ]; then
    source "${venv}/bin/activate"
fi

python3 "${base_dir}/bin/pocketsphinx_wav2text.py" "$@"


================================================
FILE: programs/asr/vosk/README.md
================================================
# Vosk

Speech to text service for Rhasspy based on [Vosk](https://alphacephei.com/vosk/).

You can download additional models here: https://alphacephei.com/vosk/models


## Installation

1. Copy the contents of this directory to `config/programs/asr/vosk/`
2. Run `script/setup`
3. Download a model with `script/download.py`
    * Example: `script/download.py en_small`
    * Models are downloaded to `config/data/asr/vosk` directory
4. Test with `script/wav2text`
    * Example `script/wav2text /path/to/vosk-model-small-en-us-0.15/ /path/to/test.wav`


================================================
FILE: programs/asr/vosk/bin/vosk_raw2text.py
================================================
#!/usr/bin/env python3
import argparse
import json
import logging
import sys

from vosk import KaldiRecognizer, Model, SetLogLevel

_LOGGER = logging.getLogger("vosk_raw2text")


def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument("model", help="Path to Vosk model directory")
    parser.add_argument(
        "-r",
        "--rate",
        type=int,
        default=16000,
        help="Model sample rate (default: 16000)",
    )
    parser.add_argument(
        "--samples-per-chunk",
        type=int,
        default=1024,
        help="Number of samples to process at a time",
    )
    args = parser.parse_args()
    logging.basicConfig(level=logging.INFO)

    SetLogLevel(0)

    model = Model(args.model)
    recognizer = KaldiRecognizer(
        model,
        args.rate,
    )

    chunk = sys.stdin.buffer.read(args.samples_per_chunk)
    _LOGGER.debug("Processing audio")
    while chunk:
        recognizer.AcceptWaveform(chunk)
        chunk = sys.stdin.buffer.read(args.samples_per_chunk)

    result = json.loads(recognizer.FinalResult())
    print(result["text"].strip())


# -----------------------------------------------------------------------------

if __name__ == "__main__":
    main()


================================================
FILE: programs/asr/vosk/bin/vosk_server.py
================================================
#!/usr/bin/env python3
import argparse
import json
import logging
import os
import socket
import threading

from vosk import KaldiRecognizer, Model, SetLogLevel

_LOGGER = logging.getLogger("vosk_server")


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("model", help="Path to Vosk model directory")
    parser.add_argument(
        "--socketfile", required=True, help="Path to Unix domain socket file"
    )
    parser.add_argument(
        "-r",
        "--rate",
        type=int,
        default=16000,
        help="Input audio sample rate (default: 16000)",
    )
    parser.add_argument("--debug", action="store_true", help="Log DEBUG messages")
    args = parser.parse_args()

    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)

    # Need to unlink socket if it exists
    try:
        os.unlink(args.socketfile)
    except OSError:
        pass

    try:
        # Create socket server
        sock = socket.socket(socket.AF_UNIX, s
Download .txt
gitextract_7danshym/

├── .gitignore
├── .gitmodules
├── .isort.cfg
├── LICENSE.md
├── README.md
├── bin/
│   ├── asr_adapter_raw2text.py
│   ├── asr_adapter_wav2text.py
│   ├── asr_transcribe.py
│   ├── asr_transcribe_stream.py
│   ├── asr_transcribe_wav.py
│   ├── client_unix_socket.py
│   ├── config_print.py
│   ├── handle_adapter_json.py
│   ├── handle_adapter_text.py
│   ├── handle_intent.py
│   ├── handle_text.py
│   ├── intent_recognize.py
│   ├── mic_adapter_raw.py
│   ├── mic_record_sample.py
│   ├── mic_test_energy.py
│   ├── pipeline_run.py
│   ├── program_download.py
│   ├── program_install.py
│   ├── satellite_run.py
│   ├── server_run.py
│   ├── snd_adapter_raw.py
│   ├── snd_play.py
│   ├── tts_adapter_http.py
│   ├── tts_adapter_text2wav.py
│   ├── tts_speak.py
│   ├── tts_synthesize.py
│   ├── vad_adapter_raw.py
│   ├── vad_segment_wav.py
│   ├── wake_adapter_raw.py
│   └── wake_detect.py
├── docs/
│   ├── README.md
│   ├── adapters.md
│   ├── domains.md
│   ├── home_assistant.md
│   ├── satellite.md
│   ├── tutorial.md
│   └── wyoming.md
├── examples/
│   └── satellite/
│       └── configuration.yaml
├── mypy.ini
├── programs/
│   ├── asr/
│   │   ├── coqui-stt/
│   │   │   ├── README.md
│   │   │   ├── bin/
│   │   │   │   ├── coqui_stt_raw2text.py
│   │   │   │   ├── coqui_stt_server.py
│   │   │   │   └── coqui_stt_wav2text.py
│   │   │   ├── requirements.txt
│   │   │   └── script/
│   │   │       ├── download.py
│   │   │       ├── raw2text
│   │   │       ├── server
│   │   │       ├── setup
│   │   │       └── wav2text
│   │   ├── faster-whisper/
│   │   │   ├── README.md
│   │   │   ├── bin/
│   │   │   │   ├── faster_whisper_server.py
│   │   │   │   └── faster_whisper_wav2text.py
│   │   │   ├── script/
│   │   │   │   ├── download.py
│   │   │   │   ├── server
│   │   │   │   ├── setup
│   │   │   │   └── wav2text
│   │   │   └── src/
│   │   │       ├── LICENSE
│   │   │       ├── README.md
│   │   │       ├── faster_whisper/
│   │   │       │   ├── __init__.py
│   │   │       │   ├── audio.py
│   │   │       │   ├── feature_extractor.py
│   │   │       │   └── transcribe.py
│   │   │       ├── requirements.conversion.txt
│   │   │       ├── requirements.txt
│   │   │       └── setup.py
│   │   ├── pocketsphinx/
│   │   │   ├── README.md
│   │   │   ├── bin/
│   │   │   │   ├── pocketsphinx_raw2text.py
│   │   │   │   ├── pocketsphinx_server.py
│   │   │   │   └── pocketsphinx_wav2text.py
│   │   │   ├── requirements.txt
│   │   │   └── script/
│   │   │       ├── download.py
│   │   │       ├── raw2text
│   │   │       ├── server
│   │   │       ├── setup
│   │   │       └── wav2text
│   │   ├── vosk/
│   │   │   ├── README.md
│   │   │   ├── bin/
│   │   │   │   ├── vosk_raw2text.py
│   │   │   │   ├── vosk_server.py
│   │   │   │   └── vosk_wav2text.py
│   │   │   ├── requirements.txt
│   │   │   └── script/
│   │   │       ├── download.py
│   │   │       ├── raw2text
│   │   │       ├── server
│   │   │       ├── setup
│   │   │       └── wav2text
│   │   ├── whisper/
│   │   │   ├── README.md
│   │   │   ├── bin/
│   │   │   │   ├── whisper_server.py
│   │   │   │   └── whisper_wav2text.py
│   │   │   ├── requirements.txt
│   │   │   └── script/
│   │   │       ├── server
│   │   │       ├── setup
│   │   │       └── wav2text
│   │   └── whisper-cpp/
│   │       ├── .gitignore
│   │       ├── Dockerfile.libwhisper
│   │       ├── Dockerfile.libwhisper.dockerignore
│   │       ├── README.md
│   │       ├── bin/
│   │       │   ├── whisper_cpp_server.py
│   │       │   └── whisper_cpp_wav2text.py
│   │       ├── lib/
│   │       │   ├── Makefile
│   │       │   └── whisper_cpp.py
│   │       ├── requirements.txt
│   │       └── script/
│   │           ├── build_libwhisper
│   │           ├── download.py
│   │           ├── server
│   │           ├── setup
│   │           ├── setup.py
│   │           └── wav2text
│   ├── handle/
│   │   ├── date_time/
│   │   │   └── bin/
│   │   │       └── date_time.py
│   │   └── home_assistant/
│   │       └── bin/
│   │           └── converse.py
│   ├── intent/
│   │   └── regex/
│   │       └── bin/
│   │           └── regex.py
│   ├── mic/
│   │   ├── pyaudio/
│   │   │   ├── README.md
│   │   │   ├── bin/
│   │   │   │   ├── pyaudio_events.py
│   │   │   │   ├── pyaudio_list_mics.py
│   │   │   │   ├── pyaudio_raw.py
│   │   │   │   └── pyaudio_shared.py
│   │   │   ├── requirements.txt
│   │   │   └── script/
│   │   │       ├── events
│   │   │       ├── list_mics
│   │   │       ├── raw
│   │   │       └── setup
│   │   ├── sounddevice/
│   │   │   ├── README.md
│   │   │   ├── bin/
│   │   │   │   ├── sounddevice_events.py
│   │   │   │   ├── sounddevice_list_mics.py
│   │   │   │   ├── sounddevice_raw.py
│   │   │   │   └── sounddevice_shared.py
│   │   │   ├── requirements.txt
│   │   │   └── script/
│   │   │       ├── events
│   │   │       ├── list_mics
│   │   │       ├── raw
│   │   │       └── setup
│   │   └── udp_raw/
│   │       └── bin/
│   │           └── udp_raw.py
│   ├── remote/
│   │   └── websocket/
│   │       ├── bin/
│   │       │   └── stream2stream.py
│   │       ├── requirements.txt
│   │       └── script/
│   │           ├── run
│   │           └── setup
│   ├── snd/
│   │   └── udp_raw/
│   │       └── bin/
│   │           └── udp_raw.py
│   ├── tts/
│   │   ├── coqui-tts/
│   │   │   ├── README.md
│   │   │   ├── requirements.txt
│   │   │   └── script/
│   │   │       ├── list_models
│   │   │       ├── server
│   │   │       └── setup
│   │   ├── flite/
│   │   │   └── script/
│   │   │       ├── download.py
│   │   │       └── setup
│   │   ├── larynx/
│   │   │   ├── README.md
│   │   │   ├── bin/
│   │   │   │   └── larynx_client.py
│   │   │   ├── requirements.txt
│   │   │   └── script/
│   │   │       ├── list_models
│   │   │       ├── server
│   │   │       └── setup
│   │   ├── marytts/
│   │   │   └── bin/
│   │   │       └── marytts.py
│   │   ├── mimic3/
│   │   │   ├── README.md
│   │   │   ├── bin/
│   │   │   │   └── mimic3_server.py
│   │   │   ├── requirements.txt
│   │   │   └── script/
│   │   │       ├── server
│   │   │       └── setup
│   │   └── piper/
│   │       ├── README.md
│   │       ├── bin/
│   │       │   └── piper_server.py
│   │       └── script/
│   │           ├── download.py
│   │           ├── server
│   │           └── setup.py
│   ├── vad/
│   │   ├── energy/
│   │   │   └── bin/
│   │   │       └── energy_speech_prob.py
│   │   ├── silero/
│   │   │   ├── README.md
│   │   │   ├── bin/
│   │   │   │   └── silero_speech_prob.py
│   │   │   ├── requirements.txt
│   │   │   ├── script/
│   │   │   │   ├── setup
│   │   │   │   └── speech_prob
│   │   │   └── share/
│   │   │       └── silero_vad.onnx
│   │   └── webrtcvad/
│   │       ├── README.md
│   │       ├── bin/
│   │       │   └── webrtcvad_speech_prob.py
│   │       ├── requirements.txt
│   │       └── script/
│   │           ├── setup
│   │           └── speech_prob
│   └── wake/
│       ├── porcupine1/
│       │   ├── bin/
│       │   │   ├── list_models.py
│       │   │   ├── porcupine_raw_text.py
│       │   │   ├── porcupine_shared.py
│       │   │   └── porcupine_stream.py
│       │   ├── requirements.txt
│       │   └── script/
│       │       ├── download.py
│       │       ├── list_models
│       │       ├── raw2text
│       │       └── setup
│       ├── precise-lite/
│       │   ├── bin/
│       │   │   └── precise.py
│       │   ├── requirements.txt
│       │   ├── script/
│       │   │   └── setup
│       │   └── share/
│       │       └── hey_mycroft.tflite
│       └── snowboy/
│           ├── bin/
│           │   └── snowboy_raw_text.py
│           ├── requirements.txt
│           ├── script/
│           │   └── setup
│           └── share/
│               ├── hey_extreme.umdl
│               ├── jarvis.umdl
│               ├── neoya.umdl
│               ├── smart_mirror.umdl
│               ├── snowboy.umdl
│               ├── subex.umdl
│               └── view_glass.umdl
├── pylintrc
├── requirements_dev.txt
├── requirements_http_api.txt
├── rhasspy3/
│   ├── VERSION
│   ├── __init__.py
│   ├── asr.py
│   ├── audio.py
│   ├── config.py
│   ├── configuration.yaml
│   ├── core.py
│   ├── event.py
│   ├── handle.py
│   ├── intent.py
│   ├── mic.py
│   ├── pipeline.py
│   ├── program.py
│   ├── py.typed
│   ├── remote.py
│   ├── snd.py
│   ├── tts.py
│   ├── util/
│   │   ├── __init__.py
│   │   ├── dataclasses_json.py
│   │   └── jaml.py
│   ├── vad.py
│   └── wake.py
├── rhasspy3_http_api/
│   ├── __init__.py
│   ├── __main__.py
│   ├── asr.py
│   ├── css/
│   │   └── main.css
│   ├── handle.py
│   ├── intent.py
│   ├── js/
│   │   ├── main.js
│   │   └── recorder.worklet.js
│   ├── pipeline.py
│   ├── snd.py
│   ├── templates/
│   │   ├── asr.html
│   │   ├── index.html
│   │   ├── layout.html
│   │   ├── pipeline.html
│   │   ├── satellite.html
│   │   └── tts.html
│   ├── tts.py
│   └── wake.py
├── script/
│   ├── format
│   ├── http_server
│   ├── lint
│   ├── run
│   ├── setup
│   ├── setup_http_server
│   └── test
├── setup.cfg
├── setup.py
├── tests/
│   ├── test_dataclasses_json.py
│   └── test_jaml.py
└── tools/
    └── websocket-client/
        ├── bin/
        │   └── websocket_client.py
        ├── requirements.txt
        └── script/
            ├── run
            └── setup
Download .txt
SYMBOL INDEX (315 symbols across 116 files)

FILE: bin/asr_adapter_raw2text.py
  function main (line 17) | def main() -> None:

FILE: bin/asr_adapter_wav2text.py
  function main (line 19) | def main() -> None:

FILE: bin/asr_transcribe.py
  function main (line 22) | async def main() -> None:

FILE: bin/asr_transcribe_stream.py
  function main (line 30) | async def main() -> None:

FILE: bin/asr_transcribe_wav.py
  function main (line 35) | async def main() -> None:
  function get_wav_bytes (line 134) | def get_wav_bytes(args: argparse.Namespace) -> Iterable[bytes]:

FILE: bin/client_unix_socket.py
  function main (line 12) | def main():
  function read_proc (line 41) | def read_proc(conn_file):
  function write_proc (line 53) | def write_proc(conn_file):

FILE: bin/config_print.py
  function main (line 16) | def main() -> None:

FILE: bin/handle_adapter_json.py
  function main (line 18) | def main() -> None:

FILE: bin/handle_adapter_text.py
  function main (line 17) | def main() -> None:

FILE: bin/handle_intent.py
  function main (line 21) | async def main() -> None:
  function get_input (line 65) | def get_input(args: argparse.Namespace) -> Iterable[str]:

FILE: bin/handle_text.py
  function main (line 21) | async def main() -> None:
  function get_input (line 65) | def get_input(args: argparse.Namespace) -> Iterable[str]:

FILE: bin/intent_recognize.py
  function main (line 19) | async def main() -> None:
  function get_texts (line 59) | def get_texts(args: argparse.Namespace) -> Iterable[str]:

FILE: bin/mic_adapter_raw.py
  function main (line 18) | def main() -> None:

FILE: bin/mic_record_sample.py
  function main (line 24) | async def main() -> None:

FILE: bin/mic_test_energy.py
  function main (line 20) | async def main() -> None:

FILE: bin/pipeline_run.py
  function main (line 26) | async def main() -> None:

FILE: bin/program_download.py
  function main (line 16) | def main() -> None:

FILE: bin/program_install.py
  function main (line 16) | def main() -> None:

FILE: bin/satellite_run.py
  function main (line 25) | async def main() -> None:

FILE: bin/server_run.py
  function main (line 20) | def main() -> None:

FILE: bin/snd_adapter_raw.py
  function main (line 25) | def main() -> None:

FILE: bin/snd_play.py
  function main (line 18) | async def main() -> None:

FILE: bin/tts_adapter_http.py
  function main (line 18) | def main():

FILE: bin/tts_adapter_text2wav.py
  function main (line 23) | def main():
  function text_to_wav (line 90) | def text_to_wav(args: argparse.Namespace, text: str) -> bytes:

FILE: bin/tts_speak.py
  function main (line 21) | async def main() -> None:

FILE: bin/tts_synthesize.py
  function main (line 18) | async def main() -> None:

FILE: bin/vad_adapter_raw.py
  function main (line 20) | def main() -> None:

FILE: bin/vad_segment_wav.py
  function main (line 25) | async def main() -> None:
  function get_wav_bytes (line 126) | def get_wav_bytes(args: argparse.Namespace) -> Iterable[bytes]:

FILE: bin/wake_adapter_raw.py
  class State (line 23) | class State:
  function main (line 28) | def main() -> None:
  function write_proc (line 72) | def write_proc(reader: IO[bytes], state: State):

FILE: bin/wake_detect.py
  function main (line 20) | async def main() -> None:

FILE: programs/asr/coqui-stt/bin/coqui_stt_raw2text.py
  function main (line 13) | def main() -> None:

FILE: programs/asr/coqui-stt/bin/coqui_stt_server.py
  function main (line 16) | def main():
  function handle_client (line 92) | def handle_client(connection: socket.socket, model: Model, rate: int) ->...

FILE: programs/asr/coqui-stt/bin/coqui_stt_wav2text.py
  function main (line 13) | def main() -> None:

FILE: programs/asr/coqui-stt/script/download.py
  function main (line 15) | def main() -> None:

FILE: programs/asr/faster-whisper/bin/faster_whisper_server.py
  function main (line 21) | def main() -> None:

FILE: programs/asr/faster-whisper/bin/faster_whisper_wav2text.py
  function main (line 14) | def main() -> None:

FILE: programs/asr/faster-whisper/script/download.py
  function main (line 21) | def main() -> None:

FILE: programs/asr/faster-whisper/src/faster_whisper/audio.py
  function decode_audio (line 5) | def decode_audio(input_file, sampling_rate=16000):

FILE: programs/asr/faster-whisper/src/faster_whisper/feature_extractor.py
  class FeatureExtractor (line 5) | class FeatureExtractor:
    method __init__ (line 6) | def __init__(
    method get_mel_filters (line 25) | def get_mel_filters(self, sr, n_fft, n_mels=128, dtype=np.float32):
    method fram_wave (line 74) | def fram_wave(self, waveform, center=True):
    method stft (line 118) | def stft(self, frames, window):
    method __call__ (line 145) | def __call__(self, waveform):

FILE: programs/asr/faster-whisper/src/faster_whisper/transcribe.py
  class Segment (line 12) | class Segment(collections.namedtuple("Segment", ("start", "end", "text"))):
  class AudioInfo (line 16) | class AudioInfo(
  class TranscriptionOptions (line 22) | class TranscriptionOptions(
  class WhisperModel (line 40) | class WhisperModel:
    method __init__ (line 41) | def __init__(
    method transcribe (line 80) | def transcribe(
    method generate_segments (line 159) | def generate_segments(self, features, language, options):
    method generate_tokenized_segments (line 175) | def generate_tokenized_segments(self, features, language, options):
    method decode_text_tokens (line 254) | def decode_text_tokens(self, tokens):
    method generate_with_fallback (line 261) | def generate_with_fallback(self, segment, prompt, options):
    method get_prompt (line 302) | def get_prompt(self, language, previous_tokens):
    method get_segment (line 317) | def get_segment(self, features, offset=0):
    method get_input (line 333) | def get_input(self, segment):
  function get_compression_ratio (line 339) | def get_compression_ratio(text):

FILE: programs/asr/faster-whisper/src/setup.py
  function get_requirements (line 6) | def get_requirements(path):

FILE: programs/asr/pocketsphinx/bin/pocketsphinx_raw2text.py
  function main (line 12) | def main() -> None:

FILE: programs/asr/pocketsphinx/bin/pocketsphinx_server.py
  function main (line 15) | def main() -> None:
  function handle_client (line 75) | def handle_client(

FILE: programs/asr/pocketsphinx/bin/pocketsphinx_wav2text.py
  function main (line 12) | def main() -> None:

FILE: programs/asr/pocketsphinx/script/download.py
  function main (line 15) | def main() -> None:

FILE: programs/asr/vosk/bin/vosk_raw2text.py
  function main (line 12) | def main() -> None:

FILE: programs/asr/vosk/bin/vosk_server.py
  function main (line 14) | def main():
  function handle_client (line 68) | def handle_client(connection: socket.socket, model: Model, rate: int) ->...

FILE: programs/asr/vosk/bin/vosk_wav2text.py
  function main (line 15) | def main() -> None:

FILE: programs/asr/vosk/script/download.py
  function main (line 15) | def main() -> None:

FILE: programs/asr/whisper-cpp/bin/whisper_cpp_server.py
  function main (line 15) | def main():
  function handle_client (line 62) | def handle_client(connection: socket.socket, whisper: Whisper) -> None:

FILE: programs/asr/whisper-cpp/bin/whisper_cpp_wav2text.py
  function main (line 16) | def main() -> None:

FILE: programs/asr/whisper-cpp/lib/whisper_cpp.py
  class WhisperFullParams (line 9) | class WhisperFullParams(ctypes.Structure):
  class WhisperError (line 58) | class WhisperError(Exception):
  class Whisper (line 62) | class Whisper:
    method __init__ (line 63) | def __init__(
    method transcribe (line 89) | def transcribe(self, audio_array: np.ndarray) -> Iterable[str]:
    method __enter__ (line 107) | def __enter__(self):
    method __exit__ (line 110) | def __exit__(self, exc_type, exc_value, traceback):

FILE: programs/asr/whisper-cpp/script/download.py
  function main (line 14) | def main() -> None:

FILE: programs/asr/whisper-cpp/script/setup.py
  function main (line 15) | def main() -> None:

FILE: programs/asr/whisper/bin/whisper_server.py
  function main (line 15) | def main():
  function handle_client (line 67) | def handle_client(

FILE: programs/asr/whisper/bin/whisper_wav2text.py
  function main (line 10) | def main() -> None:

FILE: programs/handle/date_time/bin/date_time.py
  function main (line 7) | def main() -> None:

FILE: programs/handle/home_assistant/bin/converse.py
  function main (line 14) | def main():

FILE: programs/intent/regex/bin/regex.py
  function main (line 11) | def main() -> None:
  function _clean (line 49) | def _clean(text: str) -> str:
  function _recognize (line 54) | def _recognize(text: str, patterns: Dict[str, List[re.Pattern]]) -> Opti...

FILE: programs/mic/pyaudio/bin/pyaudio_events.py
  function main (line 22) | def main() -> None:

FILE: programs/mic/pyaudio/bin/pyaudio_list_mics.py
  function main (line 6) | def main() -> None:

FILE: programs/mic/pyaudio/bin/pyaudio_raw.py
  function main (line 14) | def main() -> None:

FILE: programs/mic/pyaudio/bin/pyaudio_shared.py
  function iter_chunks (line 12) | def iter_chunks(

FILE: programs/mic/sounddevice/bin/sounddevice_events.py
  function main (line 22) | def main() -> None:

FILE: programs/mic/sounddevice/bin/sounddevice_list_mics.py
  function main (line 6) | def main() -> None:

FILE: programs/mic/sounddevice/bin/sounddevice_raw.py
  function main (line 14) | def main() -> None:

FILE: programs/mic/sounddevice/bin/sounddevice_shared.py
  function iter_chunks (line 12) | def iter_chunks(

FILE: programs/mic/udp_raw/bin/udp_raw.py
  function main (line 15) | def main() -> None:
  class MicUDPHandler (line 41) | class MicUDPHandler(socketserver.BaseRequestHandler):
    method __init__ (line 42) | def __init__(self, rate: int, width: int, channels: int, *args, **kwar...
    method handle (line 49) | def handle(self):

FILE: programs/remote/websocket/bin/stream2stream.py
  function main (line 21) | async def main() -> None:
  function play (line 91) | async def play(websocket, done_event: asyncio.Event):

FILE: programs/snd/udp_raw/bin/udp_raw.py
  function main (line 17) | def main() -> None:

FILE: programs/tts/flite/script/download.py
  function main (line 12) | def main() -> None:

FILE: programs/tts/larynx/bin/larynx_client.py
  function main (line 15) | def main():

FILE: programs/tts/marytts/bin/marytts.py
  function main (line 15) | def main():

FILE: programs/tts/mimic3/bin/mimic3_server.py
  function main (line 22) | def main() -> None:
  function handle_client (line 86) | def handle_client(connection: socket.socket, mimic3: Mimic3TextToSpeechS...

FILE: programs/tts/piper/bin/piper_server.py
  function main (line 17) | def main() -> None:
  function handle_connection (line 73) | def handle_connection(

FILE: programs/tts/piper/script/download.py
  function main (line 105) | def main() -> None:

FILE: programs/tts/piper/script/setup.py
  function main (line 17) | def main() -> None:

FILE: programs/vad/energy/bin/energy_speech_prob.py
  function main (line 13) | def main() -> None:
  function get_debiased_energy (line 54) | def get_debiased_energy(audio_data: bytes, width: int) -> float:

FILE: programs/vad/silero/bin/silero_speech_prob.py
  function main (line 17) | def main() -> None:
  class SileroDetector (line 44) | class SileroDetector:
    method start (line 50) | def start(self):
    method get_speech_probability (line 59) | def get_speech_probability(self, chunk: bytes) -> float:
    method stop (line 76) | def stop(self):
    method reset (line 81) | def reset(self):

FILE: programs/vad/webrtcvad/bin/webrtcvad_speech_prob.py
  function main (line 14) | def main() -> None:

FILE: programs/wake/porcupine1/bin/list_models.py
  function main (line 7) | def main() -> None:

FILE: programs/wake/porcupine1/bin/porcupine_raw_text.py
  function main (line 17) | def main() -> None:

FILE: programs/wake/porcupine1/bin/porcupine_shared.py
  function get_arg_parser (line 8) | def get_arg_parser() -> argparse.ArgumentParser:
  function load_porcupine (line 28) | def load_porcupine(args: argparse.Namespace) -> Tuple[pvporcupine.Porcup...

FILE: programs/wake/porcupine1/bin/porcupine_stream.py
  function main (line 19) | def main() -> None:

FILE: programs/wake/porcupine1/script/download.py
  function main (line 12) | def main() -> None:

FILE: programs/wake/precise-lite/bin/precise.py
  function main (line 34) | def main():
  class TFLiteHotWordEngine (line 92) | class TFLiteHotWordEngine:
    method __init__ (line 93) | def __init__(
    method _load_model (line 139) | def _load_model(self):
    method update (line 157) | def update(self, chunk):
    method found_wake_word (line 234) | def found_wake_word(self, frame_data):
    method reset (line 237) | def reset(self):
    method probability (line 247) | def probability(self) -> Optional[float]:
  class Vectorizer (line 254) | class Vectorizer(IntEnum):
  class ListenerParams (line 270) | class ListenerParams:
    method buffer_samples (line 316) | def buffer_samples(self):
    method n_features (line 322) | def n_features(self):
    method window_samples (line 329) | def window_samples(self):
    method hop_samples (line 334) | def hop_samples(self):
    method max_samples (line 339) | def max_samples(self):
    method feature_size (line 344) | def feature_size(self):
  function chunk_audio (line 356) | def chunk_audio(
  function buffer_to_audio (line 363) | def buffer_to_audio(audio_buffer: bytes) -> np.ndarray:
  function audio_to_buffer (line 370) | def audio_to_buffer(audio: np.ndarray) -> bytes:

FILE: programs/wake/snowboy/bin/snowboy_raw_text.py
  function main (line 15) | def main() -> None:

FILE: rhasspy3/asr.py
  class Transcript (line 23) | class Transcript(Eventable):
    method is_type (line 27) | def is_type(event_type: str) -> bool:
    method event (line 30) | def event(self) -> Event:
    method from_event (line 34) | def from_event(event: Event) -> "Transcript":
  function transcribe (line 39) | async def transcribe(
  function transcribe_stream (line 94) | async def transcribe_stream(

FILE: rhasspy3/audio.py
  class AudioChunk (line 26) | class AudioChunk(Eventable):
    method is_type (line 45) | def is_type(event_type: str) -> bool:
    method event (line 48) | def event(self) -> Event:
    method from_event (line 61) | def from_event(event: Event) -> "AudioChunk":
    method samples (line 74) | def samples(self) -> int:
    method seconds (line 78) | def seconds(self) -> float:
    method milliseconds (line 82) | def milliseconds(self) -> int:
  class AudioStart (line 87) | class AudioStart(Eventable):
    method is_type (line 103) | def is_type(event_type: str) -> bool:
    method event (line 106) | def event(self) -> Event:
    method from_event (line 119) | def from_event(event: Event) -> "AudioStart":
  class AudioStop (line 130) | class AudioStop(Eventable):
    method is_type (line 137) | def is_type(event_type: str) -> bool:
    method event (line 140) | def event(self) -> Event:
    method from_event (line 147) | def from_event(event: Event) -> "AudioStop":
  class AudioChunkConverter (line 152) | class AudioChunkConverter:
    method convert (line 160) | def convert(self, chunk: AudioChunk) -> AudioChunk:
  function wav_to_chunks (line 205) | def wav_to_chunks(

FILE: rhasspy3/config.py
  class CommandConfig (line 11) | class CommandConfig(DataClassJsonMixin):
  class ProgramDownloadConfig (line 17) | class ProgramDownloadConfig(DataClassJsonMixin):
  class ProgramInstallConfig (line 23) | class ProgramInstallConfig(CommandConfig):
  class ProgramConfig (line 30) | class ProgramConfig(CommandConfig):
  class PipelineProgramConfig (line 38) | class PipelineProgramConfig(DataClassJsonMixin):
  class PipelineConfig (line 45) | class PipelineConfig(DataClassJsonMixin):
  class SatelliteConfig (line 58) | class SatelliteConfig(DataClassJsonMixin):
  class ServerConfig (line 66) | class ServerConfig(DataClassJsonMixin):
  class Config (line 73) | class Config(DataClassJsonMixin):
    method __post_init__ (line 86) | def __post_init__(self):

FILE: rhasspy3/core.py
  class Rhasspy (line 16) | class Rhasspy:
    method programs_dir (line 23) | def programs_dir(self) -> Path:
    method data_dir (line 28) | def data_dir(self) -> Path:
    method load (line 33) | def load(config_dir: Union[str, Path]) -> "Rhasspy":

FILE: rhasspy3/event.py
  class Event (line 15) | class Event:
    method to_dict (line 20) | def to_dict(self) -> Dict[str, Any]:
    method from_dict (line 24) | def from_dict(event_dict: Dict[str, Any]) -> "Event":
  class Eventable (line 28) | class Eventable(ABC):
    method event (line 30) | def event(self) -> Event:
    method is_type (line 35) | def is_type(event_type: str) -> bool:
    method to_dict (line 38) | def to_dict(self) -> Dict[str, Any]:
  function async_read_event (line 42) | async def async_read_event(reader: asyncio.StreamReader) -> Optional[Eve...
  function async_write_event (line 64) | async def async_write_event(event: Event, writer: asyncio.StreamWriter):
  function async_write_events (line 82) | async def async_write_events(events: Iterable[Event], writer: asyncio.St...
  function read_event (line 103) | def read_event(reader: Optional[IO[bytes]] = None) -> Optional[Event]:
  function write_event (line 129) | def write_event(event: Event, writer: Optional[IO[bytes]] = None):

FILE: rhasspy3/handle.py
  class Handled (line 21) | class Handled(Eventable):
    method is_type (line 25) | def is_type(event_type: str) -> bool:
    method event (line 28) | def event(self) -> Event:
    method from_event (line 36) | def from_event(event: Event) -> "Handled":
  class NotHandled (line 42) | class NotHandled(Eventable):
    method is_type (line 46) | def is_type(event_type: str) -> bool:
    method event (line 49) | def event(self) -> Event:
    method from_event (line 57) | def from_event(event: Event) -> "NotHandled":
  function handle (line 62) | async def handle(

FILE: rhasspy3/intent.py
  class Entity (line 20) | class Entity:
  class Recognize (line 26) | class Recognize(Eventable):
    method is_type (line 30) | def is_type(event_type: str) -> bool:
    method event (line 33) | def event(self) -> Event:
    method from_event (line 38) | def from_event(event: Event) -> "Recognize":
  class Intent (line 44) | class Intent(Eventable):
    method is_type (line 49) | def is_type(event_type: str) -> bool:
    method event (line 52) | def event(self) -> Event:
    method from_dict (line 60) | def from_dict(data: Dict[str, Any]) -> "Intent":
    method from_event (line 72) | def from_event(event: Event) -> "Intent":
    method to_rhasspy (line 76) | def to_rhasspy(self) -> Dict[str, Any]:
  class NotRecognized (line 90) | class NotRecognized(Eventable):
    method is_type (line 94) | def is_type(event_type: str) -> bool:
    method event (line 97) | def event(self) -> Event:
    method from_event (line 105) | def from_event(event: Event) -> "NotRecognized":
  function recognize (line 110) | async def recognize(

FILE: rhasspy3/pipeline.py
  class PipelineResult (line 28) | class PipelineResult(DataClassJsonMixin):
    method to_event_dict (line 36) | def to_event_dict(self) -> Dict[str, Any]:
  class StopAfterDomain (line 49) | class StopAfterDomain(str, Enum):
  function run (line 57) | async def run(
  function _mic_wake (line 217) | async def _mic_wake(
  function _mic_asr (line 240) | async def _mic_asr(
  function _mic_wake_asr (line 272) | async def _mic_wake_asr(

FILE: rhasspy3/program.py
  class MissingProgramConfigError (line 17) | class MissingProgramConfigError(Exception):
  class ProcessContextManager (line 21) | class ProcessContextManager:
    method __init__ (line 24) | def __init__(self, proc: Process, name: str):
    method __aenter__ (line 28) | async def __aenter__(self):
    method __aexit__ (line 31) | async def __aexit__(self, exc_type, exc, tb):
  function create_process (line 43) | async def create_process(
  function run_command (line 167) | async def run_command(rhasspy: Rhasspy, command_config: CommandConfig) -...

FILE: rhasspy3/snd.py
  class Played (line 17) | class Played(Eventable):
    method is_type (line 19) | def is_type(event_type: str) -> bool:
    method event (line 22) | def event(self) -> Event:
    method from_event (line 26) | def from_event(event: Event) -> "Played":
  function play (line 30) | async def play(
  function play_stream (line 63) | async def play_stream(

FILE: rhasspy3/tts.py
  class Synthesize (line 17) | class Synthesize(Eventable):
    method is_type (line 24) | def is_type(event_type: str) -> bool:
    method event (line 27) | def event(self) -> Event:
    method from_event (line 31) | def from_event(event: Event) -> "Synthesize":
  function synthesize (line 36) | async def synthesize(
  function synthesize_stream (line 78) | async def synthesize_stream(

FILE: rhasspy3/util/__init__.py
  function merge_dict (line 4) | def merge_dict(base_dict, new_dict):

FILE: rhasspy3/util/dataclasses_json.py
  class DataClassJsonMixin (line 7) | class DataClassJsonMixin:
    method from_dict (line 11) | def from_dict(cls, data: Dict[str, Any]) -> Any:
    method to_dict (line 26) | def to_dict(self) -> Dict[str, Any]:
  function _decode (line 31) | def _decode(value: Any, target_type: Type) -> Any:

FILE: rhasspy3/util/jaml.py
  function safe_load (line 9) | def safe_load(fp: IO[str]) -> Dict[str, Any]:
  class LoaderState (line 17) | class LoaderState(Enum):
  class JamlLoader (line 22) | class JamlLoader:
    method __init__ (line 23) | def __init__(self) -> None:
    method process_line (line 30) | def process_line(self, line: str):
    method _add_key (line 59) | def _add_key(self, line, line_indent: int):

FILE: rhasspy3/vad.py
  class VoiceStarted (line 22) | class VoiceStarted(Eventable):
    method is_type (line 29) | def is_type(event_type: str) -> bool:
    method event (line 32) | def event(self) -> Event:
    method from_event (line 39) | def from_event(event: Event) -> "VoiceStarted":
  class VoiceStopped (line 44) | class VoiceStopped(Eventable):
    method is_type (line 51) | def is_type(event_type: str) -> bool:
    method event (line 54) | def event(self) -> Event:
    method from_event (line 61) | def from_event(event: Event) -> "VoiceStopped":
  class Segmenter (line 66) | class Segmenter:
    method __post_init__ (line 111) | def __post_init__(self):
    method reset (line 114) | def reset(self):
    method process (line 124) | def process(
  function segment (line 167) | async def segment(

FILE: rhasspy3/wake.py
  class Detection (line 21) | class Detection(Eventable):
    method is_type (line 31) | def is_type(event_type: str) -> bool:
    method event (line 34) | def event(self) -> Event:
    method from_event (line 40) | def from_event(event: Event) -> "Detection":
  class NotDetected (line 48) | class NotDetected(Eventable):
    method is_type (line 52) | def is_type(event_type: str) -> bool:
    method event (line 55) | def event(self) -> Event:
    method from_event (line 59) | def from_event(event: Event) -> "NotDetected":
  function detect (line 63) | async def detect(
  function detect_stream (line 125) | async def detect_stream(

FILE: rhasspy3_http_api/__main__.py
  function main (line 30) | def main():
  function run_servers (line 145) | def run_servers(rhasspy, servers):

FILE: rhasspy3_http_api/asr.py
  function add_asr (line 22) | def add_asr(

FILE: rhasspy3_http_api/handle.py
  function add_handle (line 19) | def add_handle(

FILE: rhasspy3_http_api/intent.py
  function add_intent (line 13) | def add_intent(

FILE: rhasspy3_http_api/js/main.js
  function q (line 1) | function q(selector) {
  function buildWaveHeader (line 6) | function buildWaveHeader(opts) {

FILE: rhasspy3_http_api/js/recorder.worklet.js
  class RecorderProcessor (line 1) | class RecorderProcessor extends AudioWorkletProcessor {
    method constructor (line 2) | constructor() {
    method process (line 6) | process(inputList, outputList, parameters) {

FILE: rhasspy3_http_api/pipeline.py
  class StartAfterDomain (line 40) | class StartAfterDomain(str, Enum):
  function add_pipeline (line 48) | def add_pipeline(

FILE: rhasspy3_http_api/snd.py
  function add_snd (line 22) | def add_snd(

FILE: rhasspy3_http_api/tts.py
  function add_tts (line 15) | def add_tts(

FILE: rhasspy3_http_api/wake.py
  function add_wake (line 25) | def add_wake(

FILE: tests/test_dataclasses_json.py
  class Class1 (line 8) | class Class1(DataClassJsonMixin):
  class Class2 (line 13) | class Class2(DataClassJsonMixin):
  function test_to_dict (line 37) | def test_to_dict():
  function test_from_dict (line 41) | def test_from_dict():

FILE: tests/test_jaml.py
  function test_safe_load (line 29) | def test_safe_load():

FILE: tools/websocket-client/bin/websocket_client.py
  function main (line 10) | async def main() -> None:
Condensed preview — 258 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (504K chars).
[
  {
    "path": ".gitignore",
    "chars": 119,
    "preview": ".DS_Store\n.idea\n*.log\ntmp/\n\n*.py[cod]\n*.egg\n/build\nhtmlcov\n\n.projectile\n.venv/\nvenv/\n.mypy_cache/\n*.egg-info/\n\n/local/\n"
  },
  {
    "path": ".gitmodules",
    "chars": 156,
    "preview": "[submodule \"programs/asr/whisper.cpp/build/whisper.cpp\"]\n\tpath = programs/asr/whisper.cpp/build/whisper.cpp\n\turl = https"
  },
  {
    "path": ".isort.cfg",
    "chars": 113,
    "preview": "[settings]\nmulti_line_output=3\ninclude_trailing_comma=True\nforce_grid_wrap=0\nuse_parentheses=True\nline_length=88\n"
  },
  {
    "path": "LICENSE.md",
    "chars": 1071,
    "preview": "MIT License\n\nCopyright (c) 2022 Michael Hansen\n\nPermission is hereby granted, free of charge, to any person obtaining a "
  },
  {
    "path": "README.md",
    "chars": 7392,
    "preview": "![Rhasspy 3](img/banner.png)\n\n**NOTE: This is a very early developer preview!**\n\nAn open source toolkit for building voi"
  },
  {
    "path": "bin/asr_adapter_raw2text.py",
    "chars": 2068,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport logging\nimport shlex\nimport subprocess\nfrom pathlib import Path\n\nfrom rhas"
  },
  {
    "path": "bin/asr_adapter_wav2text.py",
    "chars": 2109,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport logging\nimport shlex\nimport subprocess\nimport tempfile\nimport wave\nfrom pa"
  },
  {
    "path": "bin/asr_transcribe.py",
    "chars": 3070,
    "preview": "#!/usr/bin/env python3\n\"\"\"Transcribes mic audio into text.\"\"\"\nimport argparse\nimport asyncio\nimport json\nimport logging\n"
  },
  {
    "path": "bin/asr_transcribe_stream.py",
    "chars": 4526,
    "preview": "#!/usr/bin/env python3\n\"\"\"Transcribes raw audio from stdin into text.\"\"\"\nimport argparse\nimport asyncio\nimport json\nimpo"
  },
  {
    "path": "bin/asr_transcribe_wav.py",
    "chars": 4748,
    "preview": "#!/usr/bin/env python3\n\"\"\"Transcribes WAV audio into text.\"\"\"\nimport argparse\nimport asyncio\nimport io\nimport json\nimpor"
  },
  {
    "path": "bin/client_unix_socket.py",
    "chars": 1722,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport logging\nimport socket\nimport threading\n\nfrom rhasspy3.event import read_ev"
  },
  {
    "path": "bin/config_print.py",
    "chars": 906,
    "preview": "#!/usr/bin/env python3\n\"\"\"Prints configuration as JSON.\"\"\"\nimport argparse\nimport json\nimport logging\nimport sys\nfrom pa"
  },
  {
    "path": "bin/handle_adapter_json.py",
    "chars": 2667,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport json\nimport logging\nimport shlex\nimport subprocess\nfrom pathlib import Pat"
  },
  {
    "path": "bin/handle_adapter_text.py",
    "chars": 1837,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport logging\nimport shlex\nimport subprocess\nfrom pathlib import Path\n\nfrom rhas"
  },
  {
    "path": "bin/handle_intent.py",
    "chars": 2334,
    "preview": "#!/usr/bin/env python3\n\"\"\"Handle text or intent.\"\"\"\nimport argparse\nimport asyncio\nimport json\nimport logging\nimport os\n"
  },
  {
    "path": "bin/handle_text.py",
    "chars": 2279,
    "preview": "#!/usr/bin/env python3\n\"\"\"Handle text or intent.\"\"\"\nimport argparse\nimport asyncio\nimport json\nimport logging\nimport os\n"
  },
  {
    "path": "bin/intent_recognize.py",
    "chars": 2049,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport asyncio\nimport json\nimport logging\nimport os\nimport sys\nfrom pathlib impor"
  },
  {
    "path": "bin/mic_adapter_raw.py",
    "chars": 2299,
    "preview": "#!/usr/bin/env python3\n\"\"\"Reads raw audio chunks from stdin.\"\"\"\nimport argparse\nimport logging\nimport shlex\nimport subpr"
  },
  {
    "path": "bin/mic_record_sample.py",
    "chars": 8278,
    "preview": "#!/usr/bin/env python3\n\"\"\"Record a spoken audio sample to a WAV file.\"\"\"\nimport argparse\nimport asyncio\nimport logging\ni"
  },
  {
    "path": "bin/mic_test_energy.py",
    "chars": 3481,
    "preview": "#!/usr/bin/env python3\n\"\"\"Prints microphone energy level to console for testing.\"\"\"\nimport argparse\nimport asyncio\nimpor"
  },
  {
    "path": "bin/pipeline_run.py",
    "chars": 4382,
    "preview": "#!/usr/bin/env python3\n\"\"\"Run a pipeline all or part of the way.\"\"\"\nimport argparse\nimport asyncio\nimport json\nimport lo"
  },
  {
    "path": "bin/program_download.py",
    "chars": 2503,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport logging\nimport shlex\nimport string\nimport subprocess\nfrom pathlib import P"
  },
  {
    "path": "bin/program_install.py",
    "chars": 2030,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport logging\nimport shlex\nimport string\nimport subprocess\nfrom pathlib import P"
  },
  {
    "path": "bin/satellite_run.py",
    "chars": 7068,
    "preview": "#!/usr/bin/env python3\n\"\"\"Run satellite loop.\"\"\"\nimport argparse\nimport asyncio\nimport logging\nfrom collections import d"
  },
  {
    "path": "bin/server_run.py",
    "chars": 2636,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport logging\nimport os\nimport shlex\nimport string\nimport subprocess\nimport sys\n"
  },
  {
    "path": "bin/snd_adapter_raw.py",
    "chars": 2218,
    "preview": "#!/usr/bin/env python3\n\"\"\"Play audio through a command that accepts raw PCM.\"\"\"\nimport argparse\nimport logging\nimport sh"
  },
  {
    "path": "bin/snd_play.py",
    "chars": 1994,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport asyncio\nimport logging\nimport os\nimport sys\nfrom pathlib import Path\n\nfrom"
  },
  {
    "path": "bin/tts_adapter_http.py",
    "chars": 2819,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport logging\nimport wave\nfrom pathlib import Path\nfrom urllib.parse import urle"
  },
  {
    "path": "bin/tts_adapter_text2wav.py",
    "chars": 3713,
    "preview": "#!/usr/bin/env python3\n\"\"\"\nRuns a text to speech command that returns WAV audio on stdout or in a temp file.\n\"\"\"\nimport "
  },
  {
    "path": "bin/tts_speak.py",
    "chars": 2527,
    "preview": "#!/usr/bin/env python3\n\"\"\"Synthesize and speak audio.\"\"\"\nimport argparse\nimport asyncio\nimport io\nimport json\nimport log"
  },
  {
    "path": "bin/tts_synthesize.py",
    "chars": 1770,
    "preview": "#!/usr/bin/env python3\n\"\"\"Synthesize WAV audio from text.\"\"\"\nimport argparse\nimport asyncio\nimport io\nimport logging\nimp"
  },
  {
    "path": "bin/vad_adapter_raw.py",
    "chars": 5473,
    "preview": "#!/usr/bin/env python3\n\"\"\"Voice activity detection programs that accept raw PCM audio and print a speech probability for"
  },
  {
    "path": "bin/vad_segment_wav.py",
    "chars": 4897,
    "preview": "#!/usr/bin/env python3\n\"\"\"Prints voice start/stop in WAV file.\"\"\"\nimport argparse\nimport asyncio\nimport io\nimport json\ni"
  },
  {
    "path": "bin/wake_adapter_raw.py",
    "chars": 2419,
    "preview": "#!/usr/bin/env python3\n\"\"\"Wake word detection with a command that accepts raw PCM audio and prints a line for each detec"
  },
  {
    "path": "bin/wake_detect.py",
    "chars": 2508,
    "preview": "#!/usr/bin/env python3\n\"\"\"Wait for wake word to be detected.\"\"\"\nimport argparse\nimport asyncio\nimport json\nimport loggin"
  },
  {
    "path": "docs/README.md",
    "chars": 121,
    "preview": "# Rhasspy 3\n\n* [Tutorial](tutorial.md)\n* [Domains](domains.md)\n* [Wyoming Protcol](wyoming.md)\n* [Adapters](adapters.md)"
  },
  {
    "path": "docs/adapters.md",
    "chars": 859,
    "preview": "# Adapters\n\nScripts in `bin/`:\n\n* `asr_adapter_raw2text.py`\n    * Raw audio stream in, text or JSON out\n* `asr_adapter_w"
  },
  {
    "path": "docs/domains.md",
    "chars": 1332,
    "preview": "# Domains\n\nPrograms belong to a specific domain. This defines the kinds of [events](wyoming.md) they are expected to rec"
  },
  {
    "path": "docs/home_assistant.md",
    "chars": 1699,
    "preview": "# Home Assistant\n\nThis will connect Rhasspy to Home Assistant via [Assist](https://www.home-assistant.io/docs/assist).\n\n"
  },
  {
    "path": "docs/satellite.md",
    "chars": 2069,
    "preview": "# Satellite\n\nOnce you have a Rhasspy HTTP server running, you can use Rhasspy as a satellite on a separate device.\n\n**NO"
  },
  {
    "path": "docs/tutorial.md",
    "chars": 14225,
    "preview": "# Tutorial\n\nWelcome to Rhasspy 3! This is a developer preview, so many of the manual steps here will be replaced with so"
  },
  {
    "path": "docs/wyoming.md",
    "chars": 4831,
    "preview": "# The Wyoming Protocol\n\nAn interprocess event protocol over stdin/stdout for Rhasspy v3.\n\n(effectively [JSONL](https://j"
  },
  {
    "path": "examples/satellite/configuration.yaml",
    "chars": 389,
    "preview": "satellites:\n  default:\n    mic:\n      name: arecord\n      template_args:\n        device: \"default\"\n    wake:\n      name:"
  },
  {
    "path": "mypy.ini",
    "chars": 88,
    "preview": "[mypy]\nignore_missing_imports = true\n\n[mypy-setuptools.*]\nignore_missing_imports = True\n"
  },
  {
    "path": "programs/asr/coqui-stt/README.md",
    "chars": 571,
    "preview": "# Coqui STT\n\nSpeech to text service for Rhasspy based on [Coqui STT](https://stt.readthedocs.io/en/latest/).\n\nAdditional"
  },
  {
    "path": "programs/asr/coqui-stt/bin/coqui_stt_raw2text.py",
    "chars": 1939,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport logging\nimport sys\nfrom pathlib import Path\n\nimport numpy as np\nfrom stt i"
  },
  {
    "path": "programs/asr/coqui-stt/bin/coqui_stt_server.py",
    "chars": 4094,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport json\nimport logging\nimport os\nimport socket\nimport threading\nfrom pathlib "
  },
  {
    "path": "programs/asr/coqui-stt/bin/coqui_stt_wav2text.py",
    "chars": 2176,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport logging\nimport wave\nfrom pathlib import Path\n\nimport numpy as np\nfrom stt "
  },
  {
    "path": "programs/asr/coqui-stt/requirements.txt",
    "chars": 22,
    "preview": "stt>=1.4.0,<2.0\nnumpy\n"
  },
  {
    "path": "programs/asr/coqui-stt/script/download.py",
    "chars": 1666,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport itertools\nimport logging\nimport tarfile\nfrom pathlib import Path\nfrom urll"
  },
  {
    "path": "programs/asr/coqui-stt/script/raw2text",
    "chars": 359,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/asr/coqui-stt/script/server",
    "chars": 462,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/asr/coqui-stt/script/setup",
    "chars": 876,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/asr/coqui-stt/script/wav2text",
    "chars": 359,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/asr/faster-whisper/README.md",
    "chars": 614,
    "preview": "# Faster Whisper\n\nSpeech to text service for Rhasspy based on [faster-whisper](https://github.com/guillaumekln/faster-wh"
  },
  {
    "path": "programs/asr/faster-whisper/bin/faster_whisper_server.py",
    "chars": 3987,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport io\nimport logging\nimport os\nimport socket\nimport wave\nfrom pathlib import "
  },
  {
    "path": "programs/asr/faster-whisper/bin/faster_whisper_wav2text.py",
    "chars": 1994,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport logging\nimport time\nfrom pathlib import Path\n\nfrom faster_whisper import W"
  },
  {
    "path": "programs/asr/faster-whisper/script/download.py",
    "chars": 1631,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport logging\nimport tarfile\nfrom pathlib import Path\nfrom urllib.request import"
  },
  {
    "path": "programs/asr/faster-whisper/script/server",
    "chars": 472,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/asr/faster-whisper/script/setup",
    "chars": 957,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/asr/faster-whisper/script/wav2text",
    "chars": 364,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/asr/faster-whisper/src/LICENSE",
    "chars": 1072,
    "preview": "MIT License\n\nCopyright (c) 2023 Guillaume Klein\n\nPermission is hereby granted, free of charge, to any person obtaining a"
  },
  {
    "path": "programs/asr/faster-whisper/src/README.md",
    "chars": 2593,
    "preview": "# Faster Whisper transcription with CTranslate2\n\nThis repository demonstrates how to implement the Whisper transcription"
  },
  {
    "path": "programs/asr/faster-whisper/src/faster_whisper/__init__.py",
    "chars": 51,
    "preview": "from faster_whisper.transcribe import WhisperModel\n"
  },
  {
    "path": "programs/asr/faster-whisper/src/faster_whisper/audio.py",
    "chars": 968,
    "preview": "import av\nimport numpy as np\n\n\ndef decode_audio(input_file, sampling_rate=16000):\n    \"\"\"Decodes the audio.\n\n    Args:\n "
  },
  {
    "path": "programs/asr/faster-whisper/src/faster_whisper/feature_extractor.py",
    "chars": 5809,
    "preview": "import numpy as np\n\n\n# Adapted from https://github.com/huggingface/transformers/blob/main/src/transformers/models/whispe"
  },
  {
    "path": "programs/asr/faster-whisper/src/faster_whisper/transcribe.py",
    "chars": 12116,
    "preview": "import collections\nimport os\nimport zlib\n\nimport ctranslate2\nimport numpy as np\nimport tokenizers\nfrom faster_whisper.au"
  },
  {
    "path": "programs/asr/faster-whisper/src/requirements.conversion.txt",
    "chars": 26,
    "preview": "transformers[torch]>=4.23\n"
  },
  {
    "path": "programs/asr/faster-whisper/src/requirements.txt",
    "chars": 48,
    "preview": "av==10.*\nctranslate2>=3.5,<4\ntokenizers==0.13.*\n"
  },
  {
    "path": "programs/asr/faster-whisper/src/setup.py",
    "chars": 767,
    "preview": "import os\n\nfrom setuptools import find_packages, setup\n\n\ndef get_requirements(path):\n    with open(path, encoding=\"utf-8"
  },
  {
    "path": "programs/asr/pocketsphinx/README.md",
    "chars": 804,
    "preview": "# Pocketsphinx\n\nSpeech to text service for Rhasspy based on [Pocketsphinx](https://github.com/cmusphinx/pocketsphinx).\n\n"
  },
  {
    "path": "programs/asr/pocketsphinx/bin/pocketsphinx_raw2text.py",
    "chars": 1620,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport logging\nimport sys\nfrom pathlib import Path\n\nimport pocketsphinx\n\n_LOGGER "
  },
  {
    "path": "programs/asr/pocketsphinx/bin/pocketsphinx_server.py",
    "chars": 3798,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport json\nimport logging\nimport os\nimport socket\nimport threading\nfrom pathlib "
  },
  {
    "path": "programs/asr/pocketsphinx/bin/pocketsphinx_wav2text.py",
    "chars": 1890,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport logging\nimport wave\nfrom pathlib import Path\n\nimport pocketsphinx\n\n_LOGGER"
  },
  {
    "path": "programs/asr/pocketsphinx/requirements.txt",
    "chars": 117,
    "preview": "pocketsphinx @ https://github.com/synesthesiam/pocketsphinx-python/releases/download/v1.0/pocketsphinx-python.tar.gz\n"
  },
  {
    "path": "programs/asr/pocketsphinx/script/download.py",
    "chars": 1641,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport itertools\nimport logging\nimport tarfile\nfrom pathlib import Path\nfrom urll"
  },
  {
    "path": "programs/asr/pocketsphinx/script/raw2text",
    "chars": 362,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/asr/pocketsphinx/script/server",
    "chars": 468,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/asr/pocketsphinx/script/setup",
    "chars": 876,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/asr/pocketsphinx/script/wav2text",
    "chars": 362,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/asr/vosk/README.md",
    "chars": 554,
    "preview": "# Vosk\n\nSpeech to text service for Rhasspy based on [Vosk](https://alphacephei.com/vosk/).\n\nYou can download additional "
  },
  {
    "path": "programs/asr/vosk/bin/vosk_raw2text.py",
    "chars": 1243,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport json\nimport logging\nimport sys\n\nfrom vosk import KaldiRecognizer, Model, S"
  },
  {
    "path": "programs/asr/vosk/bin/vosk_server.py",
    "chars": 3401,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport json\nimport logging\nimport os\nimport socket\nimport threading\n\nfrom vosk im"
  },
  {
    "path": "programs/asr/vosk/bin/vosk_wav2text.py",
    "chars": 1712,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport json\nimport logging\nimport wave\nfrom pathlib import Path\n\nfrom vosk import"
  },
  {
    "path": "programs/asr/vosk/requirements.txt",
    "chars": 5,
    "preview": "vosk\n"
  },
  {
    "path": "programs/asr/vosk/script/download.py",
    "chars": 1676,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport itertools\nimport logging\nimport tarfile\nfrom pathlib import Path\nfrom urll"
  },
  {
    "path": "programs/asr/vosk/script/raw2text",
    "chars": 354,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/asr/vosk/script/server",
    "chars": 452,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/asr/vosk/script/setup",
    "chars": 876,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/asr/vosk/script/wav2text",
    "chars": 354,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/asr/whisper/README.md",
    "chars": 538,
    "preview": "# Whisper\n\nSpeech to text service for Rhasspy based on [Whisper](https://github.com/openai/whisper).\n\nModels are downloa"
  },
  {
    "path": "programs/asr/whisper/bin/whisper_server.py",
    "chars": 3451,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport json\nimport logging\nimport os\nimport socket\nimport threading\n\nimport numpy"
  },
  {
    "path": "programs/asr/whisper/bin/whisper_wav2text.py",
    "chars": 1197,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport logging\n\nfrom whisper import load_model, transcribe\n\n_LOGGER = logging.get"
  },
  {
    "path": "programs/asr/whisper/requirements.txt",
    "chars": 42,
    "preview": "git+https://github.com/openai/whisper.git\n"
  },
  {
    "path": "programs/asr/whisper/script/server",
    "chars": 458,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/asr/whisper/script/setup",
    "chars": 876,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/asr/whisper/script/wav2text",
    "chars": 357,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/asr/whisper-cpp/.gitignore",
    "chars": 8,
    "preview": "/build/\n"
  },
  {
    "path": "programs/asr/whisper-cpp/Dockerfile.libwhisper",
    "chars": 599,
    "preview": "FROM debian:bullseye as build\nARG TARGETARCH\nARG TARGETVARIANT\n\nENV LANG C.UTF-8\nENV DEBIAN_FRONTEND=noninteractive\n\nRUN"
  },
  {
    "path": "programs/asr/whisper-cpp/Dockerfile.libwhisper.dockerignore",
    "chars": 16,
    "preview": "*\n!lib/Makefile\n"
  },
  {
    "path": "programs/asr/whisper-cpp/README.md",
    "chars": 602,
    "preview": "# Whisper.cpp\n\nSpeech to text service for Rhasspy based on [whisper.cpp](https://github.com/ggerganov/whisper.cpp/).\n\nAd"
  },
  {
    "path": "programs/asr/whisper-cpp/bin/whisper_cpp_server.py",
    "chars": 3240,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport json\nimport logging\nimport os\nimport socket\nimport threading\n\nimport numpy"
  },
  {
    "path": "programs/asr/whisper-cpp/bin/whisper_cpp_wav2text.py",
    "chars": 1877,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport audioop\nimport logging\nimport wave\nfrom pathlib import Path\n\nimport numpy "
  },
  {
    "path": "programs/asr/whisper-cpp/lib/Makefile",
    "chars": 1072,
    "preview": "UNAME_M := $(shell uname -m)\n\nCFLAGS   = -Iwhisper.cpp -O3 -std=c11   -fPIC\nCXXFLAGS = -Iwhisper.cpp -O3 -std=c++11 -fPI"
  },
  {
    "path": "programs/asr/whisper-cpp/lib/whisper_cpp.py",
    "chars": 3766,
    "preview": "import ctypes\nfrom pathlib import Path\nfrom typing import Iterable, Union\n\nimport numpy as np\n\n\n# Must match struct in w"
  },
  {
    "path": "programs/asr/whisper-cpp/requirements.txt",
    "chars": 6,
    "preview": "numpy\n"
  },
  {
    "path": "programs/asr/whisper-cpp/script/build_libwhisper",
    "chars": 498,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/asr/whisper-cpp/script/download.py",
    "chars": 1555,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport logging\nimport tarfile\nfrom pathlib import Path\nfrom urllib.request import"
  },
  {
    "path": "programs/asr/whisper-cpp/script/server",
    "chars": 593,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/asr/whisper-cpp/script/setup",
    "chars": 908,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/asr/whisper-cpp/script/setup.py",
    "chars": 1459,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport logging\nimport platform\nimport tarfile\nfrom pathlib import Path\nfrom urlli"
  },
  {
    "path": "programs/asr/whisper-cpp/script/wav2text",
    "chars": 488,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/handle/date_time/bin/date_time.py",
    "chars": 404,
    "preview": "#!/usr/bin/env python3\nimport re\nimport sys\nfrom datetime import datetime\n\n\ndef main() -> None:\n    text = sys.stdin.rea"
  },
  {
    "path": "programs/handle/home_assistant/bin/converse.py",
    "chars": 1423,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport json\nimport logging\nimport sys\nfrom pathlib import Path\nfrom urllib.reques"
  },
  {
    "path": "programs/intent/regex/bin/regex.py",
    "chars": 1975,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport re\nfrom collections import defaultdict\nfrom typing import Dict, List, Opti"
  },
  {
    "path": "programs/mic/pyaudio/README.md",
    "chars": 114,
    "preview": "# PyAudio\n\nAudio input service for Rhasspy based on [PyAudio](https://people.csail.mit.edu/hubert/pyaudio/docs/).\n"
  },
  {
    "path": "programs/mic/pyaudio/bin/pyaudio_events.py",
    "chars": 1517,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport logging\nfrom pathlib import Path\n\nfrom pyaudio_shared import iter_chunks\n\n"
  },
  {
    "path": "programs/mic/pyaudio/bin/pyaudio_list_mics.py",
    "chars": 247,
    "preview": "#!/usr/bin/env python3\n\nimport pyaudio\n\n\ndef main() -> None:\n    audio_system = pyaudio.PyAudio()\n    for i in range(aud"
  },
  {
    "path": "programs/mic/pyaudio/bin/pyaudio_raw.py",
    "chars": 1213,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport logging\nimport sys\nfrom pathlib import Path\n\nfrom pyaudio_shared import it"
  },
  {
    "path": "programs/mic/pyaudio/bin/pyaudio_shared.py",
    "chars": 1432,
    "preview": "import logging\nfrom pathlib import Path\nfrom typing import Iterable, Optional, Union\n\nimport pyaudio\n\n_FILE = Path(__fil"
  },
  {
    "path": "programs/mic/pyaudio/requirements.txt",
    "chars": 8,
    "preview": "pyaudio\n"
  },
  {
    "path": "programs/mic/pyaudio/script/events",
    "chars": 355,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/mic/pyaudio/script/list_mics",
    "chars": 358,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/mic/pyaudio/script/raw",
    "chars": 352,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/mic/pyaudio/script/setup",
    "chars": 969,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/mic/sounddevice/README.md",
    "chars": 114,
    "preview": "# sounddevice\n\nAudio input service for Rhasspy based on [sounddevice](https://python-sounddevice.readthedocs.io).\n"
  },
  {
    "path": "programs/mic/sounddevice/bin/sounddevice_events.py",
    "chars": 1521,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport logging\nfrom pathlib import Path\n\nfrom sounddevice_shared import iter_chun"
  },
  {
    "path": "programs/mic/sounddevice/bin/sounddevice_list_mics.py",
    "chars": 170,
    "preview": "#!/usr/bin/env python3\n\nimport sounddevice\n\n\ndef main() -> None:\n    for info in sounddevice.query_devices():\n        pr"
  },
  {
    "path": "programs/mic/sounddevice/bin/sounddevice_raw.py",
    "chars": 1217,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport logging\nimport sys\nfrom pathlib import Path\n\nfrom sounddevice_shared impor"
  },
  {
    "path": "programs/mic/sounddevice/bin/sounddevice_shared.py",
    "chars": 1276,
    "preview": "import logging\nfrom pathlib import Path\nfrom typing import Iterable, Optional, Union\n\nimport sounddevice\n\n_FILE = Path(_"
  },
  {
    "path": "programs/mic/sounddevice/requirements.txt",
    "chars": 12,
    "preview": "sounddevice\n"
  },
  {
    "path": "programs/mic/sounddevice/script/events",
    "chars": 359,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/mic/sounddevice/script/list_mics",
    "chars": 329,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/mic/sounddevice/script/raw",
    "chars": 356,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/mic/sounddevice/script/setup",
    "chars": 969,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/mic/udp_raw/bin/udp_raw.py",
    "chars": 1615,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport socketserver\nfrom functools import partial\n\nfrom rhasspy3.audio import (\n "
  },
  {
    "path": "programs/remote/websocket/bin/stream2stream.py",
    "chars": 3098,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport asyncio\nimport json\nimport logging\nimport sys\nfrom pathlib import Path\nfro"
  },
  {
    "path": "programs/remote/websocket/requirements.txt",
    "chars": 11,
    "preview": "websockets\n"
  },
  {
    "path": "programs/remote/websocket/script/run",
    "chars": 354,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/remote/websocket/script/setup",
    "chars": 969,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/snd/udp_raw/bin/udp_raw.py",
    "chars": 1440,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport socket\n\nfrom rhasspy3.audio import (\n    DEFAULT_OUT_CHANNELS,\n    DEFAULT"
  },
  {
    "path": "programs/tts/coqui-tts/README.md",
    "chars": 98,
    "preview": "# Coqui-TTS\n\nText to speech service for Rhasspy based on [Coqui-TTS](https://tts.readthedocs.io).\n"
  },
  {
    "path": "programs/tts/coqui-tts/requirements.txt",
    "chars": 4,
    "preview": "tts\n"
  },
  {
    "path": "programs/tts/coqui-tts/script/list_models",
    "chars": 329,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/tts/coqui-tts/script/server",
    "chars": 365,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/tts/coqui-tts/script/setup",
    "chars": 876,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/tts/flite/script/download.py",
    "chars": 1351,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport logging\nimport tarfile\nfrom pathlib import Path\nfrom urllib.request import"
  },
  {
    "path": "programs/tts/flite/script/setup",
    "chars": 159,
    "preview": "#!/usr/bin/env bash\nsudo apt-get update\nsudo apt-get install flite\n\n# --------------------------------------------------"
  },
  {
    "path": "programs/tts/larynx/README.md",
    "chars": 100,
    "preview": "# Larynx\n\nText to speech service for Rhasspy based on [Larynx](https://github.com/rhasspy/larynx/).\n"
  },
  {
    "path": "programs/tts/larynx/bin/larynx_client.py",
    "chars": 937,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport logging\nimport shutil\nimport sys\nfrom pathlib import Path\nfrom urllib.pars"
  },
  {
    "path": "programs/tts/larynx/requirements.txt",
    "chars": 7,
    "preview": "larynx\n"
  },
  {
    "path": "programs/tts/larynx/script/list_models",
    "chars": 325,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/tts/larynx/script/server",
    "chars": 325,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/tts/larynx/script/setup",
    "chars": 1020,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/tts/marytts/bin/marytts.py",
    "chars": 937,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport logging\nimport shutil\nimport sys\nfrom pathlib import Path\nfrom urllib.pars"
  },
  {
    "path": "programs/tts/mimic3/README.md",
    "chars": 103,
    "preview": "# Mimic 3\n\nText to speech service for Rhasspy based on [Mimic 3](https://github.com/mycroftAI/mimic3).\n"
  },
  {
    "path": "programs/tts/mimic3/bin/mimic3_server.py",
    "chars": 4916,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport json\nimport logging\nimport os\nimport socket\nimport threading\nfrom pathlib "
  },
  {
    "path": "programs/tts/mimic3/requirements.txt",
    "chars": 24,
    "preview": "mycroft-mimic3-tts[all]\n"
  },
  {
    "path": "programs/tts/mimic3/script/server",
    "chars": 456,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/tts/mimic3/script/setup",
    "chars": 934,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/tts/piper/README.md",
    "chars": 542,
    "preview": "# Piper\n\nText to speech service for Rhasspy based on [Piper](https://github.com/rhasspy/piper).\n\n\n## Installation\n\n1. Co"
  },
  {
    "path": "programs/tts/piper/bin/piper_server.py",
    "chars": 4796,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport json\nimport logging\nimport os\nimport socket\nimport subprocess\nimport tempf"
  },
  {
    "path": "programs/tts/piper/script/download.py",
    "chars": 3811,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport logging\nimport tarfile\nfrom pathlib import Path\nfrom urllib.request import"
  },
  {
    "path": "programs/tts/piper/script/server",
    "chars": 454,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/tts/piper/script/setup.py",
    "chars": 1986,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport logging\nimport platform\nimport shutil\nimport tarfile\nimport tempfile\nfrom "
  },
  {
    "path": "programs/vad/energy/bin/energy_speech_prob.py",
    "chars": 1965,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport audioop\nimport logging\nimport sys\nfrom pathlib import Path\n\n_FILE = Path(_"
  },
  {
    "path": "programs/vad/silero/README.md",
    "chars": 122,
    "preview": "# Silero VAD\n\nVoice activity detection service for Rhasspy based on [silero-vad](https://github.com/snakers4/silero-vad)"
  },
  {
    "path": "programs/vad/silero/bin/silero_speech_prob.py",
    "chars": 2680,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport logging\nimport sys\nfrom dataclasses import dataclass\nfrom pathlib import P"
  },
  {
    "path": "programs/vad/silero/requirements.txt",
    "chars": 18,
    "preview": "onnxruntime\nnumpy\n"
  },
  {
    "path": "programs/vad/silero/script/setup",
    "chars": 969,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/vad/silero/script/speech_prob",
    "chars": 359,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/vad/webrtcvad/README.md",
    "chars": 117,
    "preview": "# webrtcvad\n\nVoice activity detection service for Rhasspy based on [webrtcvad](https://pypi.org/project/webrtcvad/).\n"
  },
  {
    "path": "programs/vad/webrtcvad/bin/webrtcvad_speech_prob.py",
    "chars": 1709,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport logging\nimport sys\nfrom pathlib import Path\n\nimport webrtcvad\n\n_FILE = Pat"
  },
  {
    "path": "programs/vad/webrtcvad/requirements.txt",
    "chars": 10,
    "preview": "webrtcvad\n"
  },
  {
    "path": "programs/vad/webrtcvad/script/setup",
    "chars": 969,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/vad/webrtcvad/script/speech_prob",
    "chars": 362,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/wake/porcupine1/bin/list_models.py",
    "chars": 381,
    "preview": "#!/usr/bin/env python3\nfrom pathlib import Path\n\nimport pvporcupine\n\n\ndef main() -> None:\n    \"\"\"Main method.\"\"\"\n\n    fo"
  },
  {
    "path": "programs/wake/porcupine1/bin/porcupine_raw_text.py",
    "chars": 1544,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport logging\nimport struct\nimport sys\nfrom pathlib import Path\n\nfrom porcupine_"
  },
  {
    "path": "programs/wake/porcupine1/bin/porcupine_shared.py",
    "chars": 1957,
    "preview": "import argparse\nfrom pathlib import Path\nfrom typing import List, Tuple\n\nimport pvporcupine\n\n\ndef get_arg_parser() -> ar"
  },
  {
    "path": "programs/wake/porcupine1/bin/porcupine_stream.py",
    "chars": 2032,
    "preview": "#!/usr/bin/env python3\nimport logging\nimport struct\nfrom pathlib import Path\n\nfrom porcupine_shared import get_arg_parse"
  },
  {
    "path": "programs/wake/porcupine1/requirements.txt",
    "chars": 19,
    "preview": "pvporcupine~=1.9.0\n"
  },
  {
    "path": "programs/wake/porcupine1/script/download.py",
    "chars": 1268,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport logging\nimport tarfile\nfrom pathlib import Path\nfrom urllib.request import"
  },
  {
    "path": "programs/wake/porcupine1/script/list_models",
    "chars": 352,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/wake/porcupine1/script/raw2text",
    "chars": 359,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/wake/porcupine1/script/setup",
    "chars": 876,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/wake/precise-lite/bin/precise.py",
    "chars": 12979,
    "preview": "#!/usr/bin/env python3\n# Copyright 2021 Mycroft AI Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License"
  },
  {
    "path": "programs/wake/precise-lite/requirements.txt",
    "chars": 47,
    "preview": "numpy\nsonopy~=0.1.0\ntflite_runtime>=2.5.0,<3.0\n"
  },
  {
    "path": "programs/wake/precise-lite/script/setup",
    "chars": 876,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "programs/wake/snowboy/bin/snowboy_raw_text.py",
    "chars": 2767,
    "preview": "#!/usr/bin/env python3\nimport argparse\nimport logging\nimport sys\nfrom pathlib import Path\nfrom typing import Dict\n\nfrom "
  },
  {
    "path": "programs/wake/snowboy/requirements.txt",
    "chars": 67,
    "preview": "snowboy @ https://github.com/Kitt-AI/snowboy/archive/v1.3.0.tar.gz\n"
  },
  {
    "path": "programs/wake/snowboy/script/setup",
    "chars": 876,
    "preview": "#!/usr/bin/env bash\nset -eo pipefail\n\n# Directory of *this* script\nthis_dir=\"$( cd \"$( dirname \"$0\" )\" && pwd )\"\n\n# Base"
  },
  {
    "path": "pylintrc",
    "chars": 781,
    "preview": "[MESSAGES CONTROL]\ndisable=\n  format,\n  abstract-method,\n  cyclic-import,\n  duplicate-code,\n  global-statement,\n  import"
  },
  {
    "path": "requirements_dev.txt",
    "chars": 84,
    "preview": "black==22.12.0\nflake8==6.0.0\nisort==5.11.3\nmypy==0.991\npylint==2.15.9\npytest==7.2.0\n"
  },
  {
    "path": "requirements_http_api.txt",
    "chars": 27,
    "preview": "quart\nQuart-CORS\nhypercorn\n"
  },
  {
    "path": "rhasspy3/VERSION",
    "chars": 6,
    "preview": "0.0.1\n"
  },
  {
    "path": "rhasspy3/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "rhasspy3/asr.py",
    "chars": 6220,
    "preview": "\"\"\"Speech to text.\"\"\"\nimport asyncio\nimport logging\nimport wave\nfrom dataclasses import dataclass\nfrom typing import IO,"
  },
  {
    "path": "rhasspy3/audio.py",
    "chars": 5808,
    "preview": "\"\"\"Audio input/output.\"\"\"\nimport audioop\nimport wave\nfrom dataclasses import dataclass\nfrom typing import Iterable, Opti"
  },
  {
    "path": "rhasspy3/config.py",
    "chars": 4133,
    "preview": "import argparse\nfrom dataclasses import dataclass, field\nfrom typing import Any, Dict, Optional\n\nfrom .util import merge"
  },
  {
    "path": "rhasspy3/configuration.yaml",
    "chars": 17434,
    "preview": "programs:\n\n  # -----------\n  # Audio input\n  # -----------\n  mic:\n\n    # apt-get install alsa-utils\n    arecord:\n      c"
  }
]

// ... and 58 more files (download for full content)

About this extraction

This page contains the full source code of the rhasspy/rhasspy3 GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 258 files (454.7 KB), approximately 113.5k tokens, and a symbol index with 315 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!