Repository: livekit/egress Branch: main Commit: bafdf0955fd8 Files: 184 Total size: 881.4 KB Directory structure: gitextract_ye_8hhme/ ├── .github/ │ ├── CODEOWNERS │ ├── ISSUE_TEMPLATE/ │ │ ├── bug_report.md │ │ └── feature_request.md │ └── workflows/ │ ├── publish-chrome.yaml │ ├── publish-egress.yaml │ ├── publish-gstreamer-base.yaml │ ├── publish-gstreamer.yaml │ ├── publish-template-sdk.yaml │ ├── publish-template.yaml │ ├── slack-notifier.yaml │ ├── test-cleanup.yaml │ ├── test-integration.yaml │ └── test-template.yaml ├── .gitignore ├── .golangci.yaml ├── LICENSE ├── NOTICE ├── README.md ├── bootstrap.sh ├── build/ │ ├── chrome/ │ │ ├── Dockerfile │ │ ├── README.md │ │ ├── install-chrome │ │ └── scripts/ │ │ ├── amd64.sh │ │ ├── arm64.sh │ │ └── driver.sh │ ├── egress/ │ │ ├── Dockerfile │ │ └── entrypoint.sh │ ├── gstreamer/ │ │ ├── Dockerfile-base │ │ ├── Dockerfile-dev │ │ ├── Dockerfile-prod │ │ ├── Dockerfile-prod-rs │ │ ├── compile │ │ ├── compile-rs │ │ ├── install-dependencies │ │ └── tag.sh │ ├── template/ │ │ └── Dockerfile │ └── test/ │ ├── Dockerfile │ ├── entrypoint.sh │ └── fetch-media-samples.sh ├── chrome-sandboxing-seccomp-profile.json ├── cmd/ │ ├── server/ │ │ ├── http.go │ │ └── main.go │ └── template_version/ │ └── main.go ├── go.mod ├── go.sum ├── magefile.go ├── pkg/ │ ├── config/ │ │ ├── base.go │ │ ├── config_test.go │ │ ├── encoding.go │ │ ├── manifest.go │ │ ├── output.go │ │ ├── output_file.go │ │ ├── output_image.go │ │ ├── output_segment.go │ │ ├── output_stream.go │ │ ├── pipeline.go │ │ ├── retry_test.go │ │ ├── service.go │ │ ├── storage.go │ │ ├── test_overrides.go │ │ ├── urls.go │ │ └── urls_test.go │ ├── errors/ │ │ └── errors.go │ ├── gstreamer/ │ │ ├── bin.go │ │ ├── builder.go │ │ ├── callbacks.go │ │ ├── pads.go │ │ ├── pipeline.go │ │ ├── queue_monitor.go │ │ ├── state.go │ │ └── time_provider.go │ ├── handler/ │ │ ├── handler.go │ │ ├── handler_ipc.go │ │ └── handler_rpc.go │ ├── info/ │ │ └── io.go │ ├── ipc/ │ │ ├── conn.go │ │ ├── ipc.pb.go │ │ ├── ipc.proto │ │ └── ipc_grpc.pb.go │ ├── logging/ │ │ ├── csv.go │ │ ├── handler.go │ │ └── s3.go │ ├── pipeline/ │ │ ├── builder/ │ │ │ ├── audio.go │ │ │ ├── file.go │ │ │ ├── image.go │ │ │ ├── muxer.go │ │ │ ├── muxer_test.go │ │ │ ├── pts_fixer.go │ │ │ ├── segment.go │ │ │ ├── stream.go │ │ │ ├── video.go │ │ │ ├── vp9_probe.go │ │ │ └── websocket.go │ │ ├── controller.go │ │ ├── debug.go │ │ ├── sink/ │ │ │ ├── file.go │ │ │ ├── image.go │ │ │ ├── m3u8/ │ │ │ │ ├── writer.go │ │ │ │ └── writer_test.go │ │ │ ├── segments.go │ │ │ ├── sink.go │ │ │ ├── stream.go │ │ │ ├── uploader/ │ │ │ │ ├── uploader.go │ │ │ │ └── uploader_test.go │ │ │ └── websocket.go │ │ ├── source/ │ │ │ ├── pulse/ │ │ │ │ └── pactl.go │ │ │ ├── sdk/ │ │ │ │ ├── appwriter.go │ │ │ │ └── translator.go │ │ │ ├── sdk.go │ │ │ ├── source.go │ │ │ ├── tracer.go │ │ │ ├── track_worker.go │ │ │ ├── track_worker_test.go │ │ │ └── web.go │ │ ├── tempo/ │ │ │ ├── controller.go │ │ │ └── controller_test.go │ │ └── watch.go │ ├── server/ │ │ ├── integration.go │ │ ├── server.go │ │ ├── server_ipc.go │ │ └── server_rpc.go │ ├── service/ │ │ ├── debug.go │ │ ├── metrics.go │ │ ├── process.go │ │ └── servicefakes/ │ │ └── fake_process_manager.go │ ├── stats/ │ │ ├── handler.go │ │ ├── monitor.go │ │ ├── monitor_memory_test.go │ │ └── monitor_prom.go │ └── types/ │ ├── types.go │ └── types_test.go ├── renovate.json ├── template-default/ │ ├── .gitignore │ ├── .prettierrc │ ├── README.md │ ├── eslint.config.js │ ├── index.html │ ├── package.json │ ├── public/ │ │ ├── manifest.json │ │ └── robots.txt │ ├── src/ │ │ ├── App.css │ │ ├── App.tsx │ │ ├── Room.tsx │ │ ├── SingleSpeakerLayout.tsx │ │ ├── SpeakerLayout.tsx │ │ ├── common.ts │ │ ├── index.css │ │ ├── index.tsx │ │ └── vite-env.d.ts │ ├── tsconfig.app.json │ ├── tsconfig.json │ ├── tsconfig.node.json │ └── vite.config.ts ├── template-sdk/ │ ├── .gitignore │ ├── .npmignore │ ├── .prettierrc │ ├── README.md │ ├── package.json │ ├── src/ │ │ └── index.ts │ └── tsconfig.json ├── test/ │ ├── agents/ │ │ ├── .gitignore │ │ ├── guest.py │ │ ├── host.py │ │ └── requirements.txt │ ├── agents.go │ ├── builder.go │ ├── config-sample.yaml │ ├── content_checks.go │ ├── download.go │ ├── edge.go │ ├── ffprobe.go │ ├── file.go │ ├── flags.go │ ├── images.go │ ├── integration.go │ ├── integration_test.go │ ├── ioserver.go │ ├── multi.go │ ├── publish.go │ ├── runner.go │ ├── segments.go │ ├── stream.go │ └── test_content.go └── version/ └── version.go ================================================ FILE CONTENTS ================================================ ================================================ FILE: .github/CODEOWNERS ================================================ * @livekit/cs-devs ================================================ FILE: .github/ISSUE_TEMPLATE/bug_report.md ================================================ --- name: Bug report about: Report an egress issue title: "[BUG]" labels: bug assignees: frostbyte73 --- **Describe the bug** A clear and concise description of what the bug is. **Egress Version** What version are you running? **Egress Request** Post the request here (be sure to remove any PII). **Additional context** Add any other context about the problem here. **Logs** Post any relevant logs from the egress service here. ================================================ FILE: .github/ISSUE_TEMPLATE/feature_request.md ================================================ --- name: Feature request about: Suggest an idea for this project title: "[FEATURE]" labels: enhancement, help wanted assignees: '' --- **Is your feature request related to a problem? Please describe.** A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] **Describe the solution you'd like** A clear and concise description of what you want to happen. **Describe alternatives you've considered** A clear and concise description of any alternative solutions or features you've considered. **Additional context** Add any other context or screenshots about the feature request here. ================================================ FILE: .github/workflows/publish-chrome.yaml ================================================ name: Publish Chrome on: workflow_dispatch: inputs: chrome_version: description: "Version of Chrome to build" required: true type: string image_tag: description: "Docker image tag (defaults to chrome_version if empty)" required: false type: string jobs: build: runs-on: ubuntu-latest steps: - name: Checkout code uses: actions/checkout@v6 - name: Install the Linode CLI uses: linode/action-linode-cli@v1 with: token: ${{ secrets.LINODE_PAT }} - name: Get firewall id id: firewall shell: bash env: LINODE_CLI_TOKEN: ${{ secrets.LINODE_PAT }} run: | set -euo pipefail firewall_id="$(linode-cli firewalls list --label chrome-builder --json | jq -r '.[0].id // empty')" if [ -z "$firewall_id" ]; then echo "Firewall with label chrome-builder not found" exit 1 fi echo "firewall_id=$firewall_id" >> "$GITHUB_OUTPUT" echo "Using firewall_id=$firewall_id" - name: Build cloud-init user-data id: userdata shell: bash run: | set -euo pipefail cat > cloud-init.yaml <<'EOF' #cloud-config package_update: true package_upgrade: false packages: - sudo - zip - unzip - curl - git - netcat-openbsd users: - name: chrome gecos: Chrome Builder shell: /bin/bash groups: [sudo] sudo: ALL=(ALL) NOPASSWD:ALL lock_passwd: true ssh_authorized_keys: - ${LINODE_SSH_PUBLIC_KEY} write_files: - path: /etc/ssh/sshd_config.d/99-github-actions.conf permissions: '0644' content: | PasswordAuthentication no ClientAliveInterval 60 ClientAliveCountMax 3 runcmd: - mkdir -p /home/chrome/.ssh - chmod 700 /home/chrome/.ssh - printf '%s\n' "${LINODE_SSH_PUBLIC_KEY}" > /home/chrome/.ssh/authorized_keys - chmod 600 /home/chrome/.ssh/authorized_keys - chown -R chrome:chrome /home/chrome/.ssh - systemctl restart ssh || systemctl restart sshd || true EOF sed "s|\${LINODE_SSH_PUBLIC_KEY}|${{ secrets.LINODE_SSH_PUBLIC_KEY }}|g" cloud-init.yaml > cloud-init.rendered.yaml user_data_b64="$(base64 -w 0 cloud-init.rendered.yaml)" echo "user_data_b64=$user_data_b64" >> "$GITHUB_OUTPUT" - name: Get or create builder id: builder shell: bash env: LINODE_CLI_TOKEN: ${{ secrets.LINODE_PAT }} run: | set -euo pipefail builder_json="$(linode-cli linodes list --label chrome-builder --json)" builder_id="$(echo "$builder_json" | jq -r '.[0].id // empty')" if [ -n "$builder_id" ]; then echo "Reusing existing builder: $builder_id" builder_ip="$(echo "$builder_json" | jq -r '.[0].ipv4[0] // empty')" builder_status="$(echo "$builder_json" | jq -r '.[0].status // empty')" if [ "$builder_status" = "offline" ]; then echo "Booting existing builder" linode-cli linodes boot "$builder_id" fi echo "builder_created=false" >> "$GITHUB_OUTPUT" echo "builder_id=$builder_id" >> "$GITHUB_OUTPUT" echo "builder_ip=$builder_ip" >> "$GITHUB_OUTPUT" exit 0 fi echo "No existing builder found, creating a new one" builder_info="$(linode-cli linodes create \ --backups_enabled false \ --booted true \ --image linode/ubuntu22.04 \ --label chrome-builder \ --private_ip false \ --region us-west \ --root_pass "${{ secrets.LINODE_ROOT_PASS }}" \ --type g6-dedicated-56 \ --authorized_keys "${{ secrets.LINODE_SSH_PUBLIC_KEY }}" \ --firewall_id "${{ steps.firewall.outputs.firewall_id }}" \ --metadata.user_data "${{ steps.userdata.outputs.user_data_b64 }}" \ --json)" echo "$builder_info" builder_id="$(echo "$builder_info" | jq -r '.[0].id')" builder_ip="$(echo "$builder_info" | jq -r '.[0].ipv4[0]')" echo "builder_created=true" >> "$GITHUB_OUTPUT" echo "builder_id=$builder_id" >> "$GITHUB_OUTPUT" echo "builder_ip=$builder_ip" >> "$GITHUB_OUTPUT" - name: Wait for Builder status shell: bash env: LINODE_CLI_TOKEN: ${{ secrets.LINODE_PAT }} run: | set -euo pipefail status="$(linode-cli linodes view "${{ steps.builder.outputs.builder_id }}" --json | jq -r '.[0].status')" while [ "$status" = "provisioning" ] || [ "$status" = "booting" ]; do echo "Builder status: $status" sleep 5 status="$(linode-cli linodes view "${{ steps.builder.outputs.builder_id }}" --json | jq -r '.[0].status')" done echo "Builder status: $status" if [ "$status" != "running" ]; then echo "Builder failed to reach running state" exit 1 fi - name: Write SSH keys shell: bash run: | set -euo pipefail mkdir -p ~/.ssh chmod 700 ~/.ssh printf '%s\n' "${{ secrets.LINODE_SSH_PRIVATE_KEY }}" > ~/.ssh/linode_ed25519 printf '%s\n' "${{ secrets.LINODE_SSH_PUBLIC_KEY }}" > ~/.ssh/linode_ed25519.pub chmod 600 ~/.ssh/linode_ed25519 ~/.ssh/linode_ed25519.pub - name: Wait for SSH and cloud-init if needed shell: bash run: | set -euo pipefail ip="${{ steps.builder.outputs.builder_ip }}" for i in $(seq 1 180); do if [ "${{ steps.builder.outputs.builder_created }}" = "true" ]; then remote_cmd='cloud-init status --wait >/dev/null 2>&1 || true; echo ready' else remote_cmd='echo ready' fi if ssh -i ~/.ssh/linode_ed25519 \ -o BatchMode=yes \ -o PasswordAuthentication=no \ -o StrictHostKeyChecking=accept-new \ -o ConnectTimeout=5 \ root@"$ip" \ "$remote_cmd" >/tmp/ssh-ready.txt 2>/tmp/ssh-ready.err; then echo "SSH is ready" break fi echo "Waiting for SSH on $ip..." cat /tmp/ssh-ready.err || true sleep 2 done grep -q ready /tmp/ssh-ready.txt - name: Verify chrome user shell: bash run: | set -euo pipefail ssh -i ~/.ssh/linode_ed25519 \ -o BatchMode=yes \ -o PasswordAuthentication=no \ -o StrictHostKeyChecking=yes \ root@${{ steps.builder.outputs.builder_ip }} \ 'id chrome && sudo -iu chrome whoami' - name: Amd64 shell: bash run: | set -euo pipefail ssh -i ~/.ssh/linode_ed25519 \ -o BatchMode=yes \ -o PasswordAuthentication=no \ -o StrictHostKeyChecking=yes \ -o ServerAliveInterval=60 \ root@${{ steps.builder.outputs.builder_ip }} \ "sudo -iu chrome bash -s -- '${{ inputs.chrome_version }}'" \ < ./build/chrome/scripts/amd64.sh - name: Arm64 shell: bash run: | set -euo pipefail ssh -i ~/.ssh/linode_ed25519 \ -o BatchMode=yes \ -o PasswordAuthentication=no \ -o StrictHostKeyChecking=yes \ -o ServerAliveInterval=60 \ root@${{ steps.builder.outputs.builder_ip }} \ "sudo -iu chrome bash -s -- '${{ inputs.chrome_version }}'" \ < ./build/chrome/scripts/arm64.sh - name: Drivers shell: bash run: | set -euo pipefail ssh -i ~/.ssh/linode_ed25519 \ -o BatchMode=yes \ -o PasswordAuthentication=no \ -o StrictHostKeyChecking=yes \ -o ServerAliveInterval=60 \ root@${{ steps.builder.outputs.builder_ip }} \ "sudo -iu chrome bash -s -- '${{ inputs.chrome_version }}'" \ < ./build/chrome/scripts/driver.sh - name: Prepare artifacts shell: bash run: | set -euo pipefail ssh -i ~/.ssh/linode_ed25519 \ -o BatchMode=yes \ -o PasswordAuthentication=no \ -o StrictHostKeyChecking=yes \ root@${{ steps.builder.outputs.builder_ip }} \ 'sudo -iu chrome bash -lc "cd /home/chrome && rm -f output.zip && zip -r output.zip ./output"' - name: Download artifacts shell: bash run: | set -euo pipefail rm -rf "${{ github.workspace }}/build/chrome/output" mkdir -p "${{ github.workspace }}/build/chrome" scp -i ~/.ssh/linode_ed25519 \ -o BatchMode=yes \ -o PasswordAuthentication=no \ -o StrictHostKeyChecking=yes \ root@${{ steps.builder.outputs.builder_ip }}:/home/chrome/output.zip \ "${{ github.workspace }}/build/chrome/output.zip" unzip -o "${{ github.workspace }}/build/chrome/output.zip" -d "${{ github.workspace }}/build/chrome" - name: Set up Docker Buildx uses: docker/setup-buildx-action@v4 - name: Login to DockerHub uses: docker/login-action@v4 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Build and push uses: docker/build-push-action@v7 with: context: ./build/chrome file: ./build/chrome/Dockerfile push: true platforms: linux/amd64,linux/arm64 tags: livekit/chrome-installer:${{ inputs.image_tag || inputs.chrome_version }} - name: Delete created builder on success if: success() shell: bash env: LINODE_CLI_TOKEN: ${{ secrets.LINODE_PAT }} run: | set -euo pipefail linode-cli linodes delete "${{ steps.builder.outputs.builder_id }}" ================================================ FILE: .github/workflows/publish-egress.yaml ================================================ # Copyright 2023 LiveKit, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. name: Publish Egress # Controls when the action will run. on: workflow_dispatch: push: # only publish on version tags tags: - 'v*.*.*' jobs: docker: runs-on: namespace-profile-8vcpu-cache steps: - uses: actions/checkout@v6 - uses: actions/cache@v5 with: path: | ~/go/pkg/mod ~/go/bin ~/.cache key: "${{ runner.os }}-egress-${{ hashFiles('**/go.sum') }}" restore-keys: ${{ runner.os }}-egress - name: Docker metadata id: docker-md uses: docker/metadata-action@v6 with: images: livekit/egress tags: | type=semver,pattern=v{{version}} type=semver,pattern=v{{major}}.{{minor}} - name: Set up Go uses: actions/setup-go@v6 with: go-version: 1.26.1 - name: Download Go modules run: go mod download - name: Get template image id: template-tag run: | TEMPLATE_TAG=`go run github.com/livekit/egress/cmd/template_version` echo "template_tag=$TEMPLATE_TAG" > "$GITHUB_OUTPUT" - name: Set up QEMU uses: docker/setup-qemu-action@v4 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v4 - name: Login to DockerHub uses: docker/login-action@v4 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Build and push uses: docker/build-push-action@v7 with: context: . file: ./build/egress/Dockerfile push: true platforms: linux/amd64,linux/arm64 tags: ${{ steps.docker-md.outputs.tags }} labels: ${{ steps.docker-md.outputs.labels }} build-args: | TEMPLATE_TAG=${{ steps.template-tag.outputs.template_tag }} ================================================ FILE: .github/workflows/publish-gstreamer-base.yaml ================================================ on: workflow_call: inputs: version: required: true type: string buildjet-runs-on: required: true type: string arch: required: true type: string secrets: DOCKERHUB_USERNAME: required: true DOCKERHUB_TOKEN: required: true env: GST_VERSION: "${{ inputs.version }}" LIBNICE_VERSION: "0.1.21" jobs: base-gstreamer-build: runs-on: ${{ inputs.buildjet-runs-on }} steps: - name: Checkout code uses: actions/checkout@v6 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v4 - name: Login to Docker Hub uses: docker/login-action@v4 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Build and push base uses: docker/build-push-action@v7 with: context: ./build/gstreamer push: true build-args: | GSTREAMER_VERSION=${{ env.GST_VERSION }} LIBNICE_VERSION=${{ env.LIBNICE_VERSION }} file: ./build/gstreamer/Dockerfile-base tags: livekit/gstreamer:${{ env.GST_VERSION }}-base-${{ inputs.arch }} - name: Build and push dev uses: docker/build-push-action@v7 with: context: ./build/gstreamer push: true build-args: | GSTREAMER_VERSION=${{ env.GST_VERSION }} LIBNICE_VERSION=${{ env.LIBNICE_VERSION }} file: ./build/gstreamer/Dockerfile-dev tags: livekit/gstreamer:${{ env.GST_VERSION }}-dev-${{ inputs.arch }} - name: Build and push prod uses: docker/build-push-action@v7 with: context: ./build/gstreamer push: true build-args: | GSTREAMER_VERSION=${{ env.GST_VERSION }} LIBNICE_VERSION=${{ env.LIBNICE_VERSION }} file: ./build/gstreamer/Dockerfile-prod tags: livekit/gstreamer:${{ env.GST_VERSION }}-prod-${{ inputs.arch }} - name: Build and push prod RS uses: docker/build-push-action@v7 with: context: ./build/gstreamer push: true build-args: | GSTREAMER_VERSION=${{ env.GST_VERSION }} LIBNICE_VERSION=${{ env.LIBNICE_VERSION }} file: ./build/gstreamer/Dockerfile-prod-rs tags: livekit/gstreamer:${{ env.GST_VERSION }}-prod-rs-${{ inputs.arch }} ================================================ FILE: .github/workflows/publish-gstreamer.yaml ================================================ name: Publish GStreamer on: workflow_dispatch: inputs: version: description: "GStreamer version to publish (e.g. 1.24.4)" required: true type: string jobs: gstreamer-build-amd64: uses: ./.github/workflows/publish-gstreamer-base.yaml with: version: ${{ inputs.version }} buildjet-runs-on: namespace-profile-8vcpu-cache arch: amd64 secrets: DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} gstreamer-build-arm64: uses: ./.github/workflows/publish-gstreamer-base.yaml with: version: ${{ inputs.version }} buildjet-runs-on: namespace-profile-arm-16 arch: arm64 secrets: DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} tag-gstreamer-build: needs: [gstreamer-build-amd64, gstreamer-build-arm64] runs-on: ubuntu-latest steps: - name: Checkout code uses: actions/checkout@v6 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v4 - name: Login to Docker Hub uses: docker/login-action@v4 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Run tag script run: ./build/gstreamer/tag.sh ${{ inputs.version }} ================================================ FILE: .github/workflows/publish-template-sdk.yaml ================================================ # Copyright 2023 LiveKit, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. name: Publish Template SDK on: push: tags: - "template*" jobs: deploy: runs-on: ubuntu-latest defaults: run: working-directory: ./template-sdk steps: - uses: actions/checkout@v6 - uses: pnpm/action-setup@v5 with: version: 10 - name: Use Node.js 18 uses: actions/setup-node@v6 with: node-version: 24 cache: "pnpm" cache-dependency-path: ./template-sdk/pnpm-lock.yaml - name: Install Dependencies run: pnpm install - name: Build run: pnpm build - name: Publish to npm run: | npm config set '//registry.npmjs.org/:_authToken' $NPM_TOKEN npm publish env: NPM_TOKEN: ${{ secrets.NPM_TOKEN }} ================================================ FILE: .github/workflows/publish-template.yaml ================================================ # Copyright 2023 LiveKit, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. name: Publish Templates on: workflow_dispatch: pull_request: branches: [main] paths: - build/template/Dockerfile - template-default/** - template-sdk/** jobs: docker: runs-on: namespace-profile-8vcpu-cache steps: - uses: actions/checkout@v6 # for pull requests, need to checkout head for EndBug/add-and-commit to work if: github.event_name == 'pull_request' with: repository: ${{ github.event.pull_request.head.repo.full_name }} ref: ${{ github.event.pull_request.head.ref }} - uses: actions/checkout@v6 if: github.event_name != 'pull_request' - name: Docker metadata id: docker-md uses: docker/metadata-action@v6 with: images: livekit/egress-templates tags: | type=sha type=raw,value=latest,enable={{is_default_branch}} - name: Set up Docker Buildx uses: docker/setup-buildx-action@v4 - name: Login to DockerHub uses: docker/login-action@v4 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Build and push uses: docker/build-push-action@v7 with: context: . file: ./build/template/Dockerfile push: true platforms: linux/amd64,linux/arm64 tags: ${{ steps.docker-md.outputs.tags }} labels: ${{ steps.docker-md.outputs.labels }} - name: Update template version run: | SHORT_SHA=`echo ${GITHUB_SHA} | cut -c 1-7` sed "s/TemplateVersion.*= \"[-a-z0-9]*\"/TemplateVersion = \"sha-${SHORT_SHA}\"/" < version/version.go > version.go mv -f version.go version/version.go - name: Commit version changes uses: EndBug/add-and-commit@v9 with: default_author: github_actions message: | Commit: https://github.com/${{ github.repository }}/commit/${{ github.sha }} Ref: https://github.com/${{ github.repository }}/tree/${{ github.ref_name }} By: ${{ github.actor }} push: true ================================================ FILE: .github/workflows/slack-notifier.yaml ================================================ name: PR Slack Notifier on: pull_request: types: [review_requested, reopened, closed, synchronize] pull_request_review: types: [submitted] permissions: contents: read pull-requests: write issues: write concurrency: group: pr-slack-${{ github.event.pull_request.number }}-${{ github.workflow }} cancel-in-progress: false jobs: notify-devs: runs-on: ubuntu-latest steps: - uses: livekit/slack-notifier-action@main with: config_json: ${{ secrets.SLACK_NOTIFY_CONFIG_JSON }} slack_token: ${{ secrets.SLACK_PR_NOTIFIER_TOKEN }} ================================================ FILE: .github/workflows/test-cleanup.yaml ================================================ name: Cleanup Integration Images on: schedule: - cron: '0 6 * * *' workflow_dispatch: permissions: {} jobs: cleanup: runs-on: ubuntu-latest steps: - name: Delete old integration image tags env: DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_CLEANUP_USERNAME }} DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_CLEANUP_TOKEN }} REPO: livekit/egress-integration RETENTION_DAYS: 3 run: | set -euo pipefail # Authenticate with Docker Hub TOKEN=$(curl -sf "https://hub.docker.com/v2/users/login" \ -H "Content-Type: application/json" \ -d "{\"username\":\"${DOCKERHUB_USERNAME}\",\"password\":\"${DOCKERHUB_TOKEN}\"}" \ | jq -r '.token') if [ -z "$TOKEN" ] || [ "$TOKEN" = "null" ]; then echo "::error::Failed to authenticate with Docker Hub" exit 1 fi CUTOFF=$(date -u -d "${RETENTION_DAYS} days ago" +%Y-%m-%dT%H:%M:%S.%NZ) echo "Deleting tags last updated before ${CUTOFF}" DELETED=0 RETAINED=0 PAGE=1 while true; do RESPONSE=$(curl -sf "https://hub.docker.com/v2/repositories/${REPO}/tags?page_size=100&page=${PAGE}" \ -H "Authorization: Bearer ${TOKEN}") TAGS=$(echo "$RESPONSE" | jq -r '.results // empty') if [ -z "$TAGS" ] || [ "$TAGS" = "[]" ]; then break fi for TAG_INFO in $(echo "$TAGS" | jq -c '.[]'); do TAG_NAME=$(echo "$TAG_INFO" | jq -r '.name') LAST_UPDATED=$(echo "$TAG_INFO" | jq -r '.last_updated') if [[ "$LAST_UPDATED" < "$CUTOFF" ]]; then STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X DELETE \ "https://hub.docker.com/v2/repositories/${REPO}/tags/${TAG_NAME}" \ -H "Authorization: Bearer ${TOKEN}") if [ "$STATUS" = "204" ]; then echo "Deleted: ${TAG_NAME} (last updated: ${LAST_UPDATED})" DELETED=$((DELETED + 1)) else echo "::warning::Failed to delete ${TAG_NAME}: HTTP ${STATUS}" fi sleep 0.5 else RETAINED=$((RETAINED + 1)) fi done NEXT=$(echo "$RESPONSE" | jq -r '.next // empty') if [ -z "$NEXT" ] || [ "$NEXT" = "null" ]; then break fi PAGE=$((PAGE + 1)) done echo "" echo "Summary: deleted=${DELETED} retained=${RETAINED}" ================================================ FILE: .github/workflows/test-integration.yaml ================================================ # Copyright 2023 LiveKit, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. name: Integration Test on: workflow_dispatch: pull_request: branches: [ main ] paths: - build/chrome/** - build/egress/** - build/gstreamer/** - build/test/** - cmd/** - pkg/** - test/** - go.mod jobs: build: runs-on: namespace-profile-8vcpu-cache outputs: image: ${{ steps.docker-md.outputs.tags }} steps: - uses: actions/checkout@v6 with: lfs: true - name: Fetch media-samples (with LFS) env: GITHUB_TOKEN: ${{ github.token }} run: build/test/fetch-media-samples.sh - uses: actions/cache@v5 with: path: | ~/go/pkg/mod ~/go/bin ~/.cache key: egress-integration-${{ hashFiles('**/go.sum') }} restore-keys: egress-integration - name: Docker metadata id: docker-md uses: docker/metadata-action@v6 with: images: livekit/egress-integration tags: | type=sha - name: Set up Go uses: actions/setup-go@v6 with: go-version: 1.26.1 - name: Download Go modules run: go mod download - name: Get template image id: template-tag run: | TEMPLATE_TAG=`go run github.com/livekit/egress/cmd/template_version` echo "template_tag=$TEMPLATE_TAG" > "$GITHUB_OUTPUT" - name: Login to DockerHub uses: docker/login-action@v4 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Set up Buildx id: buildx uses: docker/setup-buildx-action@v4 with: driver: docker-container install: true - name: Build and push uses: docker/build-push-action@v7 with: builder: ${{ steps.buildx.outputs.name }} context: . file: ./build/test/Dockerfile push: true platforms: linux/amd64 tags: ${{ steps.docker-md.outputs.tags }} labels: ${{ steps.docker-md.outputs.labels }} build-args: | DEADLOCK=1 TEMPLATE_TAG=${{ steps.template-tag.outputs.template_tag }} # TODO: Enable caching once registry periodic cleanup is implemented #cache-from: type=registry,ref=livekit/egress-integration:buildcache #cache-to: type=registry,ref=livekit/egress-integration:buildcache,mode=max,ignore-error=true test: needs: build strategy: fail-fast: false matrix: integration_type: [file-room, file-track, file-media, stream, segments, images, multi, edge] runs-on: namespace-profile-8vcpu-cache steps: - uses: shogo82148/actions-setup-redis@v1 with: redis-version: '6.x' auto-start: true - run: redis-cli ping - name: Run tests env: IMAGE: ${{needs.build.outputs.image}} run: | docker run --rm \ --network host \ -e GITHUB_WORKFLOW=1 \ -e EGRESS_CONFIG_STRING="$(echo ${{ secrets.EGRESS_CONFIG_STRING }} | base64 -d)" \ -e INTEGRATION_TYPE="${{ matrix.integration_type }}" \ -e S3_UPLOAD="$(echo ${{ secrets.S3_UPLOAD }} | base64 -d)" \ -e GCP_UPLOAD="$(echo ${{ secrets.GCP_UPLOAD }} | base64 -d)" \ ${{ env.IMAGE }} ================================================ FILE: .github/workflows/test-template.yaml ================================================ # Copyright 2023 LiveKit, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. name: Template Test on: workflow_dispatch: pull_request: branches: [main] paths: - build/template/Dockerfile - template-default/** - template-sdk/** defaults: run: working-directory: template-default jobs: test: runs-on: ubuntu-latest steps: - uses: actions/checkout@v6 - uses: pnpm/action-setup@v5 with: version: 10 - name: Use Node.js 22 uses: actions/setup-node@v6 with: node-version: 24 cache: "pnpm" cache-dependency-path: ./template-default/pnpm-lock.yaml - run: pnpm install - run: pnpm build ================================================ FILE: .gitignore ================================================ .idea/ .DS_Store .github/workflows/config.yaml build/plugins/ media-samples/ test/output/* test/*.yaml !test/config-sample.yaml ================================================ FILE: .golangci.yaml ================================================ version: "2" run: build-tags: - deadlock - integration tests: true linters: default: none enable: - asasalint - dupl - errname - fatcontext - forbidigo - goconst - govet - misspell - nilerr - revive - staticcheck settings: forbidigo: forbid: - pattern: sync\.Mutex - pattern: sync\.RWMutex analyze-types: true staticcheck: checks: - "all" - "-ST1000" # package comments — not useful for internal packages - "-ST1003" # naming conventions — would break exported API misspell: mode: default locale: US revive: confidence: 0.8 severity: warning rules: - name: argument-limit - name: atomic - name: blank-imports - name: context-as-argument - name: context-keys-type - name: deep-exit - name: defer - name: dot-imports - name: early-return - name: errorf - name: error-strings - name: if-return - name: increment-decrement - name: indent-error-flow - name: range - name: range-val-address - name: receiver-naming - name: superfluous-else - name: unexported-return - name: unused-parameter - name: var-declaration - name: waitgroup-by-value - name: datarace - name: identical-branches - name: identical-switch-branches - name: unconditional-recursion - name: unreachable-code - name: empty-block exclusions: generated: lax rules: - path: cmd/server/main.go text: 'pattern templates: no matching files found' paths: - third_party$ - builtin$ - examples$ issues: max-issues-per-linter: 0 max-same-issues: 0 ================================================ FILE: LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: NOTICE ================================================ Copyright 2023 LiveKit, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: README.md ================================================ The LiveKit icon, the name of the repository and some sample code in the background. # LiveKit Egress WebRTC is fantastic for last-mile media delivery, but interoperability with other services can be challenging. An application may want to do things like store a session for future playback, relay a stream to a CDN, or process a track through a transcription service – workflows where media travels through a different system or protocol. LiveKit Egress is the solution to these interoperability challenges. It provides a consistent set of APIs that gives you universal export of your LiveKit sessions and tracks. ## Capabilities 1. **Room composite** for exporting an entire room. 2. **Web egress** for recordings that aren't attached to a single LiveKit room. 3. **Track composite** for exporting synchronized tracks of a single participant. 4. **Track egress** for exporting individual tracks. Depending on your request type, the egress service will either launch Chrome using a web template (room composite requests) or a supplied url (web requests), or it will use the Go SDK directly (track and track composite requests). Irrespective of method used, when moving between protocols, containers or encodings, LiveKit's egress service will automatically transcode streams for you using GStreamer. ## Supported Output | Egress Type | MP4 File | OGG File | WebM File | HLS (TS Segments) | RTMP(s) Stream | SRT Stream | WebSocket Stream | Thumbnails (JPEGs) | |-----------------|----------|----------|-----------|-------------------|----------------|------------------|------------------|--------------------| | Room Composite | ✅ | ✅ | | ✅ | ✅ | ✅ | | ✅ | | Web | ✅ | ✅ | | ✅ | ✅ | ✅ | | ✅ | | Track Composite | ✅ | ✅ | | ✅ | ✅ | ✅ | | ✅ | | Track | ✅ | ✅ | ✅ | | | | ✅ | | Files can be uploaded to any S3 compatible storage, Azure, or GCP. ## Documentation Full docs available [here](https://docs.livekit.io/guides/egress/) ### Config The Egress service takes a yaml config file: ```yaml # required fields api_key: livekit server api key. LIVEKIT_API_KEY env can be used instead api_secret: livekit server api secret. LIVEKIT_API_SECRET env can be used instead ws_url: livekit server websocket url. LIVEKIT_WS_URL can be used instead redis: address: must be the same redis address used by your livekit server username: redis username password: redis password db: redis db # optional fields health_port: port used for http health checks (default 0) template_port: port used to host default templates (default 7980) prometheus_port: port used to collect prometheus metrics (default 0) debug_handler_port: port used to host http debug handlers (default 0) logging: level: debug, info, warn, or error (default info) json: true template_base: can be used to host custom templates (default http://localhost:/) backup_storage: files will be moved here when uploads fail. location must have write access granted for all users enable_chrome_sandbox: if true, egress will run Chrome with sandboxing enabled. This requires a specific Docker setup, see below. cpu_cost: # optionally override cpu cost estimation, used when accepting or denying requests room_composite_cpu_cost: 3.0 web_cpu_cost: 3.0 track_composite_cpu_cost: 2.0 track_cpu_cost: 1.0 session_limits: # optional egress duration limits - once hit, egress will end with status EGRESS_LIMIT_REACHED file_output_max_duration: 1h stream_output_max_duration: 90m segment_output_max_duration: 3h # file upload config - only one of the following. Can be overridden per request storage: s3: access_key: AWS_ACCESS_KEY_ID env or EMPTY if using IAM Role or instance profile secret: AWS_SECRET_ACCESS_KEY env or EMPTY if using IAM Role or instance profile session_token: AWS_SESSION_TOKEN env or EMPTY if using IAM Role or instance profile region: AWS_DEFAULT_REGION env or EMPTY if using IAM Role or instance profile endpoint: (optional) custom endpoint bucket: bucket to upload files to # the following s3 options can only be set in config, *not* per request, they will be added to any per-request options proxy_config: url: (optional) proxy url username: (optional) proxy username password: (optional) proxy password max_retries: (optional, default=3) number or retries to attempt max_retry_delay: (optional, default=5s) max delay between retries (e.g. 5s, 100ms, 1m...) min_retry_delay: (optional, default=500ms) min delay between retries (e.g. 100ms, 1s...) aws_log_level: (optional, default=LogOff) log level for aws sdk (LogDebugWithRequestRetries, LogDebug, ...) azure: account_name: AZURE_STORAGE_ACCOUNT env can be used instead account_key: AZURE_STORAGE_KEY env can be used instead container_name: container to upload files to gcp: credentials_json: GOOGLE_APPLICATION_CREDENTIALS env can be used instead bucket: bucket to upload files to proxy_config: url: (optional) proxy url username: (optional) proxy username password: (optional) proxy password alioss: access_key: Ali OSS AccessKeyId secret: Ali OSS AccessKeySecret region: Ali OSS region endpoint: (optional) custom endpoint bucket: bucket to upload files to # dev/debugging fields insecure: can be used to connect to an insecure websocket (default false) debug: enable_profiling: create and upload pipeline dot file and pprof file on pipeline failure s3: upload config for dotfiles (see above) azure: upload config for dotfiles (see above) gcp: upload config for dotfiles (see above) alioss: upload config for dotfiles (see above) ``` The config file can be added to a mounted volume with its location passed in the EGRESS_CONFIG_FILE env var, or its body can be passed in the EGRESS_CONFIG_BODY env var. ### Filenames The below templates can also be used in filename/filepath parameters: | Egress Type | {room_id} | {room_name} | {time} | {utc} | {publisher_identity} | {track_id} | {track_type} | {track_source} | |-----------------|-----------|-------------|--------|-------|----------------------|------------|--------------|----------------| | Room Composite | ✅ | ✅ | ✅ | ✅ | | | | | | Web | | | ✅ | ✅ | | | | | | Track Composite | ✅ | ✅ | ✅ | ✅ | ✅ | | | | | Track | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | * If no filename is provided with a request, one will be generated in the form of `"{room_name}-{time}"`. * If your filename ends with a `/`, a file will be generated in that directory. * For 1/2/2006, 3:04:05.789 PM, {time} format would display "2006-01-02T150405", and {utc} format "20060102150405789" Examples: | Request filename | Resulting filename | |------------------------------------------|---------------------------------------------------| | "" | testroom-2022-10-04T011306.mp4 | | "livekit-recordings/" | livekit-recordings/testroom-2022-10-04T011306.mp4 | | "{room_name}/{time}" | testroom/2022-10-04T011306.mp4 | | "{room_id}-{publisher_identity}.mp4" | 10719607-f7b0-4d82-afe1-06b77e91fe12-david.mp4 | | "{track_type}-{track_source}-{track_id}" | audio-microphone-TR_SKasdXCVgHsei.ogg | ### Running locally These changes are **not** recommended for a production setup. To run against a local livekit server, you'll need to do the following: - open `/usr/local/etc/redis.conf` and comment out the line that says `bind 127.0.0.1` - change `protected-mode yes` to `protected-mode no` in the same file - find your IP as seen by docker - `ws_url` needs to be set using the IP as Docker sees it - on linux, this should be `172.17.0.1` - on mac or windows, run `docker run -it --rm alpine nslookup host.docker.internal` and you should see something like `Name: host.docker.internal Address: 192.168.65.2` These changes allow the service to connect to your local redis instance from inside the docker container. Create a directory to mount. In this example, we will use `~/egress-test`. Create a config.yaml in the above directory. - `redis` and `ws_url` should use the above IP instead of `localhost` - `insecure` should be set to true ```yaml log_level: debug api_key: your-api-key api_secret: your-api-secret ws_url: ws://192.168.65.2:7880 insecure: true redis: address: 192.168.65.2:6379 ``` Then to run the service: ```shell docker run --rm \ -e EGRESS_CONFIG_FILE=/out/config.yaml \ -v ~/egress-test:/out \ livekit/egress ``` You can then use our [cli](https://github.com/livekit/livekit-cli) to submit egress requests to your server. ### Chrome sandboxing By default, Room Composite and Web egresses run with Chrome sandboxing disabled. This is because the default docker security settings prevent Chrome from switching to a different kernel namespace, which is needed by Chrome to setup its sandbox. Chrome sandboxing within Egress can be reenabled by setting the the `enable_chrome_sandbox` option to `true` in the egress configuration, and launching docker using the [provided seccomp security profile](https://github.com/livekit/egress/blob/main/chrome-sandboxing-seccomp-profile.json): ```shell docker run --rm \ -e EGRESS_CONFIG_FILE=/out/config.yaml \ -v ~/egress-test:/out \ --security-opt seccomp=chrome-sandboxing-seccomp-profile.json \ livekit/egress ``` This profile is based on the [default docker seccomp security profile](https://github.com/moby/moby/blob/master/profiles/seccomp/default.json) and allows the 2 extra system calls (`clone` and `unshare`) that Chrome needs to setup the sandbox. Note that kubernetes disables seccomp entirely by default, which means that running with Chrome sandboxing enabled is possible on a kubernetes cluster with the default security settings. ## FAQ ### Can I store the files locally instead of uploading to cloud storage? - Yes, you can mount a volume with your `docker run` command (e.g. `-v ~/livekit-egress:/out/`), and use the mounted directory in your filenames (e.g. `/out/my-recording.mp4`). Since egress is not run as the root user, write permissions will need to be enabled for all users. ### I get a `"no response from egress service"` error when sending a request - Your livekit server cannot connect to an egress instance through redis. Make sure they are both able to reach the same redis db. - If all of your egress instances are full, you'll need to deploy more instances or set up autoscaling. ### I get a different error when sending a request - Make sure your egress, livekit, server sdk, and livekit-cli are all up to date. ### Can I run this without docker? - It's possible, but not recommended. To do so, you would need to install gstreamer along with its plugins, chrome, xvfb, and have a pulseaudio server running. ## Testing and Development To run the test against your own LiveKit rooms, a deployed LiveKit server with a secure websocket url is required. First, create `egress/test/config.yaml`: ```yaml log_level: debug api_key: your-api-key api_secret: your-api-secret ws_url: wss://your-livekit-url.com redis: address: 192.168.65.2:6379 room_only: false web_only: false track_composite_only: false track_only: false file_only: false stream_only: false segments_only: false muting: false dot_files: false short: false ``` Join a room using https://example.livekit.io or your own client, then run `mage integration test/config.yaml`. This will test recording different file types, output settings, and streams against your room.
LiveKit Ecosystem
Agents SDKsPython · Node.js
LiveKit SDKsBrowser · Swift · Android · Flutter · React Native · Rust · Node.js · Python · Unity · Unity (WebGL) · ESP32 · C++
Starter AppsPython Agent · TypeScript Agent · React App · SwiftUI App · Android App · Flutter App · React Native App · Web Embed
UI ComponentsReact · Android Compose · SwiftUI · Flutter
Server APIsNode.js · Golang · Ruby · Java/Kotlin · Python · Rust · PHP (community) · .NET (community)
ResourcesDocs · Docs MCP Server · CLI · LiveKit Cloud
LiveKit Server OSSLiveKit server · Egress · Ingress · SIP
CommunityDeveloper Community · Slack · X · YouTube
================================================ FILE: bootstrap.sh ================================================ #!/bin/bash # Copyright 2023 LiveKit, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. if ! command -v mage &> /dev/null then pushd /tmp git clone https://github.com/magefile/mage cd mage go run bootstrap.go rm -rf /tmp/mage popd fi if ! command -v mage &> /dev/null then echo "Ensure `go env GOPATH`/bin is in your \$PATH" exit 1 fi go mod download ================================================ FILE: build/chrome/Dockerfile ================================================ # Copyright 2023 LiveKit, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. FROM ubuntu:24.04 RUN mkdir /chrome-installer COPY output/arm64 /chrome-installer/arm64 COPY output/amd64 /chrome-installer/amd64 COPY install-chrome /chrome-installer/install-chrome ================================================ FILE: build/chrome/README.md ================================================ # Chrome installer This dockerfile is used to install chrome on ubuntu amd64 and arm64. There is no official or available arm64 build with H264 support, so we needed to compile it from source. ## Usage To install chrome, add the following to your dockerfile: ```dockerfile ARG TARGETPLATFORM COPY --from=livekit/chrome-installer:124.0.6367.201 /chrome-installer /chrome-installer RUN /chrome-installer/install-chrome "$TARGETPLATFORM" ENV PATH=${PATH}:/chrome ENV CHROME_DEVEL_SANDBOX=/usr/local/sbin/chrome-devel-sandbox ``` ## Compilation It must be cross compiled from an amd64 builder. This build takes multiple hours, even on fast machines. Relevant docs: * [Build instructions](https://chromium.googlesource.com/chromium/src/+/main/docs/linux/build_instructions.md) * [Cross compiling](https://chromium.googlesource.com/chromium/src/+/main/docs/linux/chromium_arm.md) ### Requirements * 64-bit Intel machine (x86_64) * Ubuntu 22.04 LTS * 64+ CPU cores * 128GB+ RAM * 100GB+ disk space ================================================ FILE: build/chrome/install-chrome ================================================ #!/bin/bash set -euxo pipefail if [ "$1" = "linux/arm64" ] then apt-get update apt-get install -y \ ca-certificates \ fonts-liberation \ libasound2t64 \ libatk-bridge2.0-0 \ libatk1.0-0 \ libc6 \ libcairo2 \ libcups2 \ libdbus-1-3 \ libexpat1 \ libfontconfig1 \ libgbm1 \ libglib2.0-0 \ libnspr4 \ libnss3 \ libpango-1.0-0 \ libpangocairo-1.0-0 \ libx11-6 \ libx11-xcb1 \ libxcb1 \ libxcomposite1 \ libxcursor1 \ libxdamage1 \ libxext6 \ libxfixes3 \ libxi6 \ libxrandr2 \ libxrender1 \ libxss1 \ libxtst6 \ xdg-utils chmod +x /chrome-installer/arm64/chromedriver-mac-arm64/chromedriver mv -f /chrome-installer/arm64/chromedriver-mac-arm64/chromedriver /usr/local/bin/chromedriver mv /chrome-installer/arm64/ /chrome cp /chrome/chrome_sandbox /usr/local/sbin/chrome-devel-sandbox chown root:root /usr/local/sbin/chrome-devel-sandbox chmod 4755 /usr/local/sbin/chrome-devel-sandbox else apt-get install -y /chrome-installer/amd64/google-chrome-stable_amd64.deb chmod +x /chrome-installer/amd64/chromedriver-linux64/chromedriver mv -f /chrome-installer/amd64/chromedriver-linux64/chromedriver /usr/local/bin/chromedriver fi rm -rf /chrome-installer ================================================ FILE: build/chrome/scripts/amd64.sh ================================================ #!/bin/bash set -xeuo pipefail wget https://dl.google.com/linux/chrome/deb/pool/main/g/google-chrome-stable/google-chrome-stable_"$1"-1_amd64.deb mkdir -p "$HOME/output/amd64" mv google-chrome-stable_"$1"-1_amd64.deb "$HOME/output/amd64/google-chrome-stable_amd64.deb" ================================================ FILE: build/chrome/scripts/arm64.sh ================================================ #!/bin/bash set -xeuo pipefail sudo apt-get update sudo apt-get install -y \ apt-utils \ build-essential \ curl \ git \ python3 \ sudo \ zip if [ ! -d "$HOME/depot_tools" ]; then git clone https://chromium.googlesource.com/chromium/tools/depot_tools.git "$HOME/depot_tools" fi export PATH="$PATH:$HOME/depot_tools" mkdir -p "$HOME" if [ ! -d "$HOME/chromium/.gclient" ] && [ ! -f "$HOME/chromium/.gclient" ]; then mkdir -p "$HOME/chromium" cd "$HOME/chromium" fetch --nohooks --no-history chromium fi cd "$HOME/chromium" cat > .gclient <<'EOF' solutions = [ { "name": "src", "url": "https://chromium.googlesource.com/chromium/src.git", "managed": False, "custom_deps": {}, "custom_vars": { "checkout_pgo_profiles": True, }, "target_cpu": "arm64", }, ] EOF cd src git fetch --no-tags --depth=1 origin "refs/tags/$1:refs/tags/$1" git checkout -B stable "tags/$1" for attempt in 1 2 3 4 5; do if gclient sync -D --with_branch_heads -j 8; then break fi if [ "$attempt" -eq 5 ]; then echo "gclient sync failed after $attempt attempts" exit 1 fi sleep_secs=$((attempt * 30)) echo "gclient sync failed, retrying in ${sleep_secs}s..." sleep "$sleep_secs" done ./build/install-build-deps.sh ./build/linux/sysroot_scripts/install-sysroot.py --arch=arm64 gclient runhooks gn gen out/default --args=' target_cpu="arm64" proprietary_codecs=true ffmpeg_branding="Chrome" is_official_build=true is_debug=false symbol_level=0 blink_symbol_level=0 v8_symbol_level=0 enable_nacl=false rtc_use_pipewire=false is_component_build=false use_jumbo_build=true dcheck_always_on=false ' export NINJA_SUMMARIZE_BUILD=1 autoninja -C out/default chrome chrome_sandbox -j "$(nproc)" cd out/default rm -rf "$HOME/output/arm64" mkdir -p "$HOME/output/arm64/locales" mv locales/en-US.pak "$HOME/output/arm64/locales/" required_files=( chrome chrome-wrapper chrome_100_percent.pak chrome_200_percent.pak chrome_crashpad_handler chrome_sandbox icudtl.dat libEGL.so libGLESv2.so resources.pak snapshot_blob.bin v8_context_snapshot.bin ) for f in "${required_files[@]}"; do if [ ! -e "$f" ]; then echo "Missing required build output: $f" exit 1 fi mv "$f" "$HOME/output/arm64/" done ================================================ FILE: build/chrome/scripts/driver.sh ================================================ #!/bin/bash set -xeuo pipefail wget https://storage.googleapis.com/chrome-for-testing-public/"$1"/linux64/chromedriver-linux64.zip unzip chromedriver-linux64.zip -d "$HOME/output/amd64" wget https://storage.googleapis.com/chrome-for-testing-public/"$1"/mac-arm64/chromedriver-mac-arm64.zip unzip chromedriver-mac-arm64.zip -d "$HOME/output/arm64" ================================================ FILE: build/egress/Dockerfile ================================================ # Copyright 2023 LiveKit, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ARG TEMPLATE_TAG=latest FROM livekit/egress-templates:$TEMPLATE_TAG AS template FROM livekit/gstreamer:1.24.12-dev ARG TARGETPLATFORM ARG TARGETARCH ENV TARGETARCH=${TARGETARCH} ENV TARGETPLATFORM=${TARGETPLATFORM} WORKDIR /workspace # install go RUN wget https://go.dev/dl/go1.26.1.linux-${TARGETARCH}.tar.gz && \ rm -rf /usr/local/go && \ tar -C /usr/local -xzf go1.26.1.linux-${TARGETARCH}.tar.gz ENV PATH="/usr/local/go/bin:${PATH}" # download go modules COPY go.mod . COPY go.sum . RUN go mod download # copy source COPY cmd/ cmd/ COPY pkg/ pkg/ COPY version/ version/ # copy templates COPY --from=template workspace/build/ cmd/server/templates/ # delete .map files RUN find cmd/server/templates/ -name *.map | xargs rm # build RUN CGO_ENABLED=1 GOOS=linux GOARCH=${TARGETARCH} GO111MODULE=on GODEBUG=disablethp=1 go build -a -o egress ./cmd/server # install tini ENV TINI_VERSION v0.19.0 ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-${TARGETARCH} /tini RUN chmod +x /tini FROM livekit/gstreamer:1.24.12-prod ARG TARGETPLATFORM # install deps RUN apt-get update && \ apt-get install -y \ curl \ fonts-noto \ gnupg \ pulseaudio \ unzip \ wget \ xvfb \ gstreamer1.0-plugins-base- # install chrome COPY --from=livekit/chrome-installer:146.0.7680.177-1 /chrome-installer /chrome-installer RUN /chrome-installer/install-chrome "$TARGETPLATFORM" # clean up RUN rm -rf /var/lib/apt/lists/* # create egress user RUN useradd -ms /bin/bash -g root -G sudo,pulse,pulse-access egress RUN mkdir -p home/egress/tmp home/egress/.cache/xdgr && \ chown -R egress /home/egress # copy files COPY --from=1 /workspace/egress /bin/ COPY --from=1 /tini /tini COPY build/egress/entrypoint.sh / # run USER egress ENV PATH=${PATH}:/chrome ENV XDG_RUNTIME_DIR=/home/egress/.cache/xdgr ENV CHROME_DEVEL_SANDBOX=/usr/local/sbin/chrome-devel-sandbox ENTRYPOINT ["/entrypoint.sh"] ================================================ FILE: build/egress/entrypoint.sh ================================================ #!/usr/bin/env bash # Copyright 2023 LiveKit, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -euo pipefail # Clean out tmp rm -rf /home/egress/tmp/* # Start pulseaudio rm -rf /var/run/pulse /var/lib/pulse /home/egress/.config/pulse /home/egress/.cache/xdgr/pulse pulseaudio -D --verbose --exit-idle-time=-1 --disallow-exit > /dev/null 2>&1 # Run egress service exec /tini -- egress ================================================ FILE: build/gstreamer/Dockerfile-base ================================================ FROM ubuntu:24.04 ARG GSTREAMER_VERSION ARG LIBNICE_VERSION COPY install-dependencies / RUN /install-dependencies ENV PATH=/root/.cargo/bin:$PATH RUN for lib in gstreamer gst-plugins-base gst-plugins-good gst-plugins-bad gst-plugins-ugly gst-libav; \ do \ wget https://gstreamer.freedesktop.org/src/$lib/$lib-$GSTREAMER_VERSION.tar.xz && \ tar -xf $lib-$GSTREAMER_VERSION.tar.xz && \ rm $lib-$GSTREAMER_VERSION.tar.xz && \ mv $lib-$GSTREAMER_VERSION $lib; \ done # rust plugins are apparently only realeased on gitlab RUN wget https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/archive/gstreamer-$GSTREAMER_VERSION/gst-plugins-rs-gstreamer-$GSTREAMER_VERSION.tar.gz && \ tar xfz gst-plugins-rs-gstreamer-$GSTREAMER_VERSION.tar.gz && \ rm gst-plugins-rs-gstreamer-$GSTREAMER_VERSION.tar.gz && \ mv gst-plugins-rs-gstreamer-$GSTREAMER_VERSION gst-plugins-rs RUN wget https://libnice.freedesktop.org/releases/libnice-$LIBNICE_VERSION.tar.gz && \ tar xfz libnice-$LIBNICE_VERSION.tar.gz && \ rm libnice-$LIBNICE_VERSION.tar.gz && \ mv libnice-$LIBNICE_VERSION libnice ================================================ FILE: build/gstreamer/Dockerfile-dev ================================================ ARG GSTREAMER_VERSION FROM livekit/gstreamer:${GSTREAMER_VERSION}-base-${TARGETARCH} ENV DEBUG=true ENV OPTIMIZATIONS=false COPY compile / COPY compile-rs / RUN /compile RUN /compile-rs FROM ubuntu:24.04 COPY install-dependencies / RUN /install-dependencies COPY --from=0 /compiled-binaries / ================================================ FILE: build/gstreamer/Dockerfile-prod ================================================ ARG GSTREAMER_VERSION FROM livekit/gstreamer:${GSTREAMER_VERSION}-base-${TARGETARCH} ENV DEBUG=false ENV OPTIMIZATIONS=true COPY compile / RUN /compile FROM ubuntu:24.04 RUN apt-get update && \ apt-get dist-upgrade -y && \ apt-get install -y --no-install-recommends \ bubblewrap \ ca-certificates \ iso-codes \ ladspa-sdk \ liba52-0.7.4 \ libaa1 \ libaom3 \ libass9 \ libavcodec60 \ libavfilter9 \ libavformat60 \ libavutil58 \ libbs2b0 \ libbz2-1.0 \ libcaca0 \ libcap2 \ libchromaprint1 \ libcurl3-gnutls \ libdca0 \ libde265-0 \ libdv4 \ libdvdnav4 \ libdvdread8 \ libdw1 \ libegl1 \ libepoxy0 \ libfaac0 \ libfaad2 \ libfdk-aac2 \ libflite1 \ libgbm1 \ libgcrypt20 \ libgl1 \ libgles1 \ libgles2 \ libglib2.0-0 \ libgme0 \ libgmp10 \ libgsl27 \ libgsm1 \ libgudev-1.0-0 \ libharfbuzz-icu0 \ libjpeg8 \ libkate1 \ liblcms2-2 \ liblilv-0-0 \ libmjpegutils-2.1-0 \ libmodplug1 \ libmp3lame0 \ libmpcdec6 \ libmpeg2-4 \ libmpg123-0 \ libofa0 \ libogg0 \ libopencore-amrnb0 \ libopencore-amrwb0 \ libopenexr-3-1-30 \ libopenjp2-7 \ libopus0 \ liborc-0.4-0 \ libpango-1.0-0 \ libpng16-16 \ librsvg2-2 \ librtmp1 \ libsbc1 \ libseccomp2 \ libshout3 \ libsndfile1 \ libsoundtouch1 \ libsoup2.4-1 \ libspandsp2 \ libspeex1 \ libsrt1.5-openssl \ libsrtp2-1 \ libssl3 \ libtag1v5 \ libtheora0 \ libtwolame0 \ libunwind8 \ libvisual-0.4-0 \ libvo-aacenc0 \ libvo-amrwbenc0 \ libvorbis0a \ libvpx9 \ libvulkan1 \ libwavpack1 \ libwebp7 \ libwebpdemux2 \ libwebpmux3 \ libwebrtc-audio-processing1 \ libwildmidi2 \ libwoff1 \ libx264-164 \ libx265-199 \ libxkbcommon0 \ libxslt1.1 \ libzbar0 \ libzvbi0 \ mjpegtools \ xdg-dbus-proxy && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* COPY --from=0 /compiled-binaries / ================================================ FILE: build/gstreamer/Dockerfile-prod-rs ================================================ ARG GSTREAMER_VERSION FROM livekit/gstreamer:${GSTREAMER_VERSION}-base-${TARGETARCH} FROM livekit/gstreamer:${GSTREAMER_VERSION}-dev-${TARGETARCH} COPY --from=0 /gst-plugins-rs /gst-plugins-rs ENV DEBUG=false ENV OPTIMIZATIONS=true ENV PATH=/root/.cargo/bin:$PATH COPY compile-rs / RUN /compile-rs FROM livekit/gstreamer:${GSTREAMER_VERSION}-prod-${TARGETARCH} COPY --from=1 /compiled-binaries / ================================================ FILE: build/gstreamer/compile ================================================ #!/bin/bash set -euxo pipefail for repo in gstreamer libnice gst-plugins-base gst-plugins-good gst-plugins-bad gst-plugins-ugly gst-libav; do pushd $repo opts="-D prefix=/usr" if [[ $repo != "libnice" ]]; then opts="$opts -D tests=disabled -D doc=disabled" fi if [[ $repo == "gstreamer" ]]; then opts="$opts -D examples=disabled -D introspection=disabled" elif [[ $repo == "gst-plugins-base" ]]; then opts="$opts -D examples=disabled -D introspection=disabled -D qt5=disabled" elif [[ $repo == "gst-plugins-good" ]]; then opts="$opts -D examples=disabled -D pulse=enabled -D qt5=disabled" elif [[ $repo == "gst-plugins-bad" ]]; then opts="$opts -D gpl=enabled -D examples=disabled -D introspection=disabled" elif [[ $repo == "gst-plugins-ugly" ]]; then opts="$opts -D gpl=enabled" fi if [[ $DEBUG == 'true' ]]; then if [[ $OPTIMIZATIONS == 'true' ]]; then opts="$opts -D buildtype=debugoptimized" else opts="$opts -D buildtype=debug" fi else opts="$opts -D buildtype=release -D b_lto=true" fi rm -rf build meson setup build $opts if [[ -z "${NINJA_JOBS:-}" ]]; then # Limit to 4 jobs to avoid OOM issues NINJA_JOBS=4 fi # This is needed for other plugins to be built properly ninja -j "${NINJA_JOBS}" -C build install # This is where we'll grab build artifacts from DESTDIR=/compiled-binaries ninja -j "${NINJA_JOBS}" -C build install popd done gst-inspect-1.0 ================================================ FILE: build/gstreamer/compile-rs ================================================ #!/bin/bash set -euxo pipefail : "${CARGO_BUILD_JOBS:=4}" export CARGO_BUILD_JOBS for repo in gst-plugins-rs; do pushd $repo # strip binaries in debug mode mv Cargo.toml Cargo.toml.old sed s,'\[profile.release\]','[profile.release]\nstrip="debuginfo"', Cargo.toml.old > Cargo.toml cargo update -p time opts="-D prefix=/usr -D tests=disabled -D doc=disabled" if [[ $DEBUG == 'true' ]]; then if [[ $OPTIMIZATIONS == 'true' ]]; then opts="$opts -D buildtype=debugoptimized" else opts="$opts -D buildtype=debug" fi else opts="$opts -D buildtype=release -D b_lto=true" fi rm -rf build meson setup build $opts if [[ -z "${NINJA_JOBS:-}" ]]; then # Limit to 4 jobs to avoid OOM issues NINJA_JOBS=4 fi # This is needed for other plugins to be built properly ninja -j "${NINJA_JOBS}" -C build install # This is where we'll grab build artifacts from DESTDIR=/compiled-binaries ninja -j "${NINJA_JOBS}" -C build install popd done gst-inspect-1.0 ================================================ FILE: build/gstreamer/install-dependencies ================================================ #!/bin/bash set -euxo pipefail export DEBIAN_FRONTEND=noninteractive apt-get update apt-get dist-upgrade -y apt-get install -y --no-install-recommends \ bison \ bubblewrap \ ca-certificates \ cmake \ curl \ flex \ flite1-dev \ gcc \ gettext \ git \ gperf \ iso-codes \ liba52-0.7.4-dev \ libaa1-dev \ libaom-dev \ libass-dev \ libavcodec-dev \ libavfilter-dev \ libavformat-dev \ libavutil-dev \ libbs2b-dev \ libbz2-dev \ libcaca-dev \ libcap-dev \ libchromaprint-dev \ libcurl4-gnutls-dev \ libdca-dev \ libde265-dev \ libdrm-dev \ libdv4-dev \ libdvdnav-dev \ libdvdread-dev \ libdw-dev \ libepoxy-dev \ libfaac-dev \ libfaad-dev \ libfdk-aac-dev \ libgbm-dev \ libgcrypt20-dev \ libgirepository1.0-dev \ libgl-dev \ libgles-dev \ libglib2.0-dev \ libgme-dev \ libgmp-dev \ libgsl-dev \ libgsm1-dev \ libgudev-1.0-dev \ libjpeg-dev \ libkate-dev \ liblcms2-dev \ liblilv-dev \ libmjpegtools-dev \ libmodplug-dev \ libmp3lame-dev \ libmpcdec-dev \ libmpeg2-4-dev \ libmpg123-dev \ libofa0-dev \ libogg-dev \ libopencore-amrnb-dev \ libopencore-amrwb-dev \ libopenexr-dev \ libopenjp2-7-dev \ libopus-dev \ liborc-0.4-dev \ libpango1.0-dev \ libpng-dev \ libpulse-dev \ librsvg2-dev \ librtmp-dev \ libsbc-dev \ libseccomp-dev \ libshout3-dev \ libsndfile1-dev \ libsoundtouch-dev \ libsoup2.4-dev \ libspandsp-dev \ libspeex-dev \ libsrt-gnutls-dev \ libsrtp2-dev \ libssl-dev \ libtag1-dev \ libtheora-dev \ libtwolame-dev \ libudev-dev \ libunwind-dev \ libvisual-0.4-dev \ libvo-aacenc-dev \ libvo-amrwbenc-dev \ libvorbis-dev \ libvpx-dev \ libvulkan-dev \ libwavpack-dev \ libwebp-dev \ libwebrtc-audio-processing-dev \ libwildmidi-dev \ libwoff-dev \ libx264-dev \ libx265-dev \ libxkbcommon-dev \ libxslt1-dev \ libzbar-dev \ libzvbi-dev \ ninja-build \ python3 \ ruby \ wget \ xdg-dbus-proxy # install meson version needed for gstreamer 1.26.7 as it's not available in ubuntu 24.04 MESON_VERSION=1.4.1 MESON_BASENAME="meson-${MESON_VERSION}" MESON_BASE_URL="https://github.com/mesonbuild/meson/releases/download/${MESON_VERSION}" pushd /tmp >/dev/null curl -fsSL "${MESON_BASE_URL}/${MESON_BASENAME}.tar.gz" -o "${MESON_BASENAME}.tar.gz" popd >/dev/null tar -xzf "/tmp/${MESON_BASENAME}.tar.gz" -C /opt rm -f "/tmp/${MESON_BASENAME}.tar.gz" cat </usr/local/bin/meson #!/bin/sh exec /usr/bin/env python3 /opt/${MESON_BASENAME}/meson.py "\$@" EOF chmod +x /usr/local/bin/meson ln -sf /usr/local/bin/meson /usr/bin/meson apt-get clean rm -rf /var/lib/apt/lists/* # install rust curl -o install-rustup.sh --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs sh install-rustup.sh -y source "$HOME/.cargo/env" cargo install cargo-c rm -rf install-rustup.sh ================================================ FILE: build/gstreamer/tag.sh ================================================ #!/bin/bash image_suffix=(base dev prod prod-rs) archs=(amd64 arm64) gst_version=$1 for suffix in ${image_suffix[*]} do digests=() for arch in ${archs[*]} do digest=`docker manifest inspect livekit/gstreamer:$gst_version-$suffix-$arch | jq ".manifests[] | select(.platform.architecture == \"$arch\").digest"` # remove quotes digest=${digest:1:$[${#digest}-2]} digests+=($digest) done manifests="" for digest in ${digests[*]} do manifests+=" livekit/gstreamer@$digest" done docker manifest create livekit/gstreamer:$gst_version-$suffix$manifests docker manifest push livekit/gstreamer:$gst_version-$suffix done ================================================ FILE: build/template/Dockerfile ================================================ # Copyright 2023 LiveKit, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. FROM ubuntu:24.04 WORKDIR /workspace RUN apt update RUN apt install -y curl RUN curl -sL https://deb.nodesource.com/setup_22.x | bash - RUN apt update RUN apt install -y nodejs RUN npm install -g pnpm # copy templates COPY template-default/ . # build RUN pnpm install RUN pnpm build ================================================ FILE: build/test/Dockerfile ================================================ # Copyright 2023 LiveKit, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # syntax=docker/dockerfile:1.6 ARG TARGETPLATFORM ARG TEMPLATE_TAG=latest ARG GO_VERSION=1.26.1 ARG LINT_VERSION=v2.11.3 FROM livekit/egress-templates:$TEMPLATE_TAG AS template FROM livekit/gstreamer:1.24.12-dev AS builder WORKDIR /workspace ARG TARGETPLATFORM # Deadlock 0 = off, 1 = on ARG DEADLOCK=0 ARG GO_VERSION # install go RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then GOARCH=arm64; else GOARCH=amd64; fi && \ wget https://go.dev/dl/go${GO_VERSION}.linux-${GOARCH}.tar.gz && \ rm -rf /usr/local/go && \ tar -C /usr/local -xzf go${GO_VERSION}.linux-${GOARCH}.tar.gz ENV PATH="/usr/local/go/bin:${PATH}" ENV GOMODCACHE=/go/pkg/mod \ GOCACHE=/go/build-cache # download go modules COPY go.mod . COPY go.sum . RUN --mount=type=cache,target=/go/pkg/mod,sharing=locked \ --mount=type=cache,target=/go/build-cache \ go mod download # copy source COPY cmd/ cmd/ COPY pkg/ pkg/ COPY test/ test/ COPY version/ version/ # copy templates COPY --from=template workspace/build/ cmd/server/templates/ COPY --from=template workspace/build/ test/templates/ # build (service tests will need to launch the handler) RUN --mount=type=cache,target=/go/pkg/mod,sharing=locked \ --mount=type=cache,target=/go/build-cache \ if [ "$TARGETPLATFORM" = "linux/arm64" ]; then GOARCH=arm64; else GOARCH=amd64; fi && \ TAGS=""; \ if [ "${DEADLOCK:-0}" = "1" ]; then TAGS="deadlock"; fi; \ CGO_ENABLED=1 GOOS=linux GOARCH=${GOARCH} GODEBUG=disablethp=1 go build ${TAGS:+-tags} ${TAGS:+"$TAGS"} -o egress ./cmd/server RUN --mount=type=cache,target=/go/pkg/mod,sharing=locked \ --mount=type=cache,target=/go/build-cache \ if [ "$TARGETPLATFORM" = "linux/arm64" ]; then GOARCH=arm64; else GOARCH=amd64; fi && \ CGO_ENABLED=1 GOOS=linux GOARCH=${GOARCH} go test -c -v -race --tags=integration ./test FROM golangci/golangci-lint:${LINT_VERSION} AS golangci FROM builder AS lint COPY --from=golangci /usr/bin/golangci-lint /usr/local/bin/golangci-lint COPY .golangci.yaml . RUN --mount=type=cache,target=/go/pkg/mod,sharing=locked \ --mount=type=cache,target=/go/build-cache \ CGO_ENABLED=1 \ golangci-lint run \ --timeout=5m \ --modules-download-mode=mod \ --build-tags="${LINT_TAGS}" RUN echo ok >/lint_ok FROM livekit/gstreamer:1.24.12-prod ARG TARGETPLATFORM # install deps RUN apt-get update && \ apt-get install -y \ curl \ ffmpeg \ fonts-noto \ gnupg \ pulseaudio \ python3 \ python3-pip \ unzip \ wget \ xvfb \ gstreamer1.0-plugins-base- # install go COPY --from=1 /usr/local/go /usr/local/go ENV PATH="/usr/local/go/bin:${PATH}" # install chrome COPY --from=livekit/chrome-installer:146.0.7680.177-1 /chrome-installer /chrome-installer RUN /chrome-installer/install-chrome "$TARGETPLATFORM" # clean up RUN rm -rf /var/lib/apt/lists/* # install rtsp server RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then ARCH=arm64v8; else ARCH=amd64; fi && \ wget https://github.com/bluenviron/mediamtx/releases/download/v1.8.1/mediamtx_v1.8.1_linux_${ARCH}.tar.gz && \ tar -zxvf mediamtx_v1.8.1_linux_${ARCH}.tar.gz && \ rm mediamtx_v1.8.1_linux_${ARCH}.tar.gz && \ sed -i 's_record: no_record: yes_g' mediamtx.yml && \ sed -i 's_recordPath: ./recordings/%path/_recordPath: /out/output/stream-_g' mediamtx.yml # create egress user RUN useradd -ms /bin/bash -g root -G sudo,pulse,pulse-access egress RUN mkdir -p home/egress/tmp home/egress/.cache/xdgr && \ chown -R egress /home/egress # copy files COPY test/agents/requirements.txt /agents/requirements.txt RUN pip install --break-system-packages --no-cache-dir -r /agents/requirements.txt COPY test/ /workspace/test/ COPY --from=1 /workspace/egress /bin/ COPY --from=1 /workspace/test.test . COPY media-samples /media-samples COPY --from=1 /workspace/test/agents /agents COPY build/test/entrypoint.sh . # Force lint stage to run successfully COPY --from=lint /lint_ok /__lint_ok # run tests USER egress ENV PATH=${PATH}:/chrome ENV XDG_RUNTIME_DIR=/home/egress/.cache/xdgr ENV CHROME_DEVEL_SANDBOX=/usr/local/sbin/chrome-devel-sandbox ENTRYPOINT ["./entrypoint.sh"] ================================================ FILE: build/test/entrypoint.sh ================================================ #!/usr/bin/env bash # Copyright 2023 LiveKit, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -eo pipefail # Start pulseaudio rm -rf /var/run/pulse /var/lib/pulse /home/egress/.config/pulse /home/egress/.cache/xdgr/pulse pulseaudio -D --verbose --exit-idle-time=-1 --disallow-exit > /dev/null 2>&1 # Run RTSP server ./mediamtx > /dev/null 2>&1 & # Run tests if [[ -z ${GITHUB_WORKFLOW+x} ]]; then exec ./test.test -test.v -test.timeout 30m else go install github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest exec go tool test2json -p egress ./test.test -test.v -test.timeout 30m 2>&1 | "$HOME"/go/bin/gotestfmt fi ================================================ FILE: build/test/fetch-media-samples.sh ================================================ #!/usr/bin/env bash set -euo pipefail REPO="livekit/media-samples" DEST="media-samples" REF="${1:-main}" export GIT_TERMINAL_PROMPT=0 if ! command -v git-lfs >/dev/null 2>&1; then echo "git-lfs not found. Install it (brew install git-lfs / apt-get install git-lfs)" >&2 exit 1 fi git lfs install --local # run git with an Authorization header only if GITHUB_TOKEN is set g() { if [[ -n "${GITHUB_TOKEN:-}" ]]; then local b64 b64="$(printf 'x-access-token:%s' "$GITHUB_TOKEN" | base64)" git -c "http.https://github.com/.extraheader=AUTHORIZATION: basic $b64" "$@" else git "$@" fi } if [[ -d "$DEST/.git" ]]; then git -C "$DEST" config core.hooksPath /dev/null g -C "$DEST" fetch --depth=1 origin "$REF" git -C "$DEST" checkout -f FETCH_HEAD else tmpl="$(mktemp -d)" g -c core.hooksPath=/dev/null \ clone --template "$tmpl" --depth 1 --branch "$REF" \ "https://github.com/${REPO}.git" "$DEST" rm -rf "$tmpl" fi g -C "$DEST" lfs pull ================================================ FILE: chrome-sandboxing-seccomp-profile.json ================================================ { "defaultAction": "SCMP_ACT_ERRNO", "defaultErrnoRet": 1, "archMap": [ { "architecture": "SCMP_ARCH_X86_64", "subArchitectures": [ "SCMP_ARCH_X86", "SCMP_ARCH_X32" ] }, { "architecture": "SCMP_ARCH_AARCH64", "subArchitectures": [ "SCMP_ARCH_ARM" ] }, { "architecture": "SCMP_ARCH_MIPS64", "subArchitectures": [ "SCMP_ARCH_MIPS", "SCMP_ARCH_MIPS64N32" ] }, { "architecture": "SCMP_ARCH_MIPS64N32", "subArchitectures": [ "SCMP_ARCH_MIPS", "SCMP_ARCH_MIPS64" ] }, { "architecture": "SCMP_ARCH_MIPSEL64", "subArchitectures": [ "SCMP_ARCH_MIPSEL", "SCMP_ARCH_MIPSEL64N32" ] }, { "architecture": "SCMP_ARCH_MIPSEL64N32", "subArchitectures": [ "SCMP_ARCH_MIPSEL", "SCMP_ARCH_MIPSEL64" ] }, { "architecture": "SCMP_ARCH_S390X", "subArchitectures": [ "SCMP_ARCH_S390" ] }, { "architecture": "SCMP_ARCH_RISCV64", "subArchitectures": null } ], "syscalls": [ { "names": [ "accept", "accept4", "access", "adjtimex", "alarm", "bind", "brk", "capget", "capset", "chdir", "chmod", "chown", "chown32", "clock_adjtime", "clock_adjtime64", "clock_getres", "clock_getres_time64", "clock_gettime", "clock_gettime64", "clock_nanosleep", "clock_nanosleep_time64", "close", "close_range", "connect", "copy_file_range", "creat", "dup", "dup2", "dup3", "epoll_create", "epoll_create1", "epoll_ctl", "epoll_ctl_old", "epoll_pwait", "epoll_pwait2", "epoll_wait", "epoll_wait_old", "eventfd", "eventfd2", "execve", "execveat", "exit", "exit_group", "faccessat", "faccessat2", "fadvise64", "fadvise64_64", "fallocate", "fanotify_mark", "fchdir", "fchmod", "fchmodat", "fchown", "fchown32", "fchownat", "fcntl", "fcntl64", "fdatasync", "fgetxattr", "flistxattr", "flock", "fork", "fremovexattr", "fsetxattr", "fstat", "fstat64", "fstatat64", "fstatfs", "fstatfs64", "fsync", "ftruncate", "ftruncate64", "futex", "futex_time64", "futex_waitv", "futimesat", "getcpu", "getcwd", "getdents", "getdents64", "getegid", "getegid32", "geteuid", "geteuid32", "getgid", "getgid32", "getgroups", "getgroups32", "getitimer", "getpeername", "getpgid", "getpgrp", "getpid", "getppid", "getpriority", "getrandom", "getresgid", "getresgid32", "getresuid", "getresuid32", "getrlimit", "get_robust_list", "getrusage", "getsid", "getsockname", "getsockopt", "get_thread_area", "gettid", "gettimeofday", "getuid", "getuid32", "getxattr", "inotify_add_watch", "inotify_init", "inotify_init1", "inotify_rm_watch", "io_cancel", "ioctl", "io_destroy", "io_getevents", "io_pgetevents", "io_pgetevents_time64", "ioprio_get", "ioprio_set", "io_setup", "io_submit", "io_uring_enter", "io_uring_register", "io_uring_setup", "ipc", "kill", "landlock_add_rule", "landlock_create_ruleset", "landlock_restrict_self", "lchown", "lchown32", "lgetxattr", "link", "linkat", "listen", "listxattr", "llistxattr", "_llseek", "lremovexattr", "lseek", "lsetxattr", "lstat", "lstat64", "madvise", "membarrier", "memfd_create", "memfd_secret", "mincore", "mkdir", "mkdirat", "mknod", "mknodat", "mlock", "mlock2", "mlockall", "mmap", "mmap2", "mprotect", "mq_getsetattr", "mq_notify", "mq_open", "mq_timedreceive", "mq_timedreceive_time64", "mq_timedsend", "mq_timedsend_time64", "mq_unlink", "mremap", "msgctl", "msgget", "msgrcv", "msgsnd", "msync", "munlock", "munlockall", "munmap", "name_to_handle_at", "nanosleep", "newfstatat", "_newselect", "open", "openat", "openat2", "pause", "pidfd_open", "pidfd_send_signal", "pipe", "pipe2", "pkey_alloc", "pkey_free", "pkey_mprotect", "poll", "ppoll", "ppoll_time64", "prctl", "pread64", "preadv", "preadv2", "prlimit64", "process_mrelease", "pselect6", "pselect6_time64", "pwrite64", "pwritev", "pwritev2", "read", "readahead", "readlink", "readlinkat", "readv", "recv", "recvfrom", "recvmmsg", "recvmmsg_time64", "recvmsg", "remap_file_pages", "removexattr", "rename", "renameat", "renameat2", "restart_syscall", "rmdir", "rseq", "rt_sigaction", "rt_sigpending", "rt_sigprocmask", "rt_sigqueueinfo", "rt_sigreturn", "rt_sigsuspend", "rt_sigtimedwait", "rt_sigtimedwait_time64", "rt_tgsigqueueinfo", "sched_getaffinity", "sched_getattr", "sched_getparam", "sched_get_priority_max", "sched_get_priority_min", "sched_getscheduler", "sched_rr_get_interval", "sched_rr_get_interval_time64", "sched_setaffinity", "sched_setattr", "sched_setparam", "sched_setscheduler", "sched_yield", "seccomp", "select", "semctl", "semget", "semop", "semtimedop", "semtimedop_time64", "send", "sendfile", "sendfile64", "sendmmsg", "sendmsg", "sendto", "setfsgid", "setfsgid32", "setfsuid", "setfsuid32", "setgid", "setgid32", "setgroups", "setgroups32", "setitimer", "setpgid", "setpriority", "setregid", "setregid32", "setresgid", "setresgid32", "setresuid", "setresuid32", "setreuid", "setreuid32", "setrlimit", "set_robust_list", "setsid", "setsockopt", "set_thread_area", "set_tid_address", "setuid", "setuid32", "setxattr", "shmat", "shmctl", "shmdt", "shmget", "shutdown", "sigaltstack", "signalfd", "signalfd4", "sigprocmask", "sigreturn", "socketcall", "socketpair", "splice", "stat", "stat64", "statfs", "statfs64", "statx", "symlink", "symlinkat", "sync", "sync_file_range", "syncfs", "sysinfo", "tee", "tgkill", "time", "timer_create", "timer_delete", "timer_getoverrun", "timer_gettime", "timer_gettime64", "timer_settime", "timer_settime64", "timerfd_create", "timerfd_gettime", "timerfd_gettime64", "timerfd_settime", "timerfd_settime64", "times", "tkill", "truncate", "truncate64", "ugetrlimit", "umask", "uname", "unlink", "unlinkat", "utime", "utimensat", "utimensat_time64", "utimes", "vfork", "vmsplice", "wait4", "waitid", "waitpid", "write", "writev", "clone", "unshare" ], "action": "SCMP_ACT_ALLOW" }, { "names": [ "process_vm_readv", "process_vm_writev", "ptrace" ], "action": "SCMP_ACT_ALLOW", "includes": { "minKernel": "4.8" } }, { "names": [ "socket" ], "action": "SCMP_ACT_ALLOW", "args": [ { "index": 0, "value": 40, "op": "SCMP_CMP_NE" } ] }, { "names": [ "personality" ], "action": "SCMP_ACT_ALLOW", "args": [ { "index": 0, "value": 0, "op": "SCMP_CMP_EQ" } ] }, { "names": [ "personality" ], "action": "SCMP_ACT_ALLOW", "args": [ { "index": 0, "value": 8, "op": "SCMP_CMP_EQ" } ] }, { "names": [ "personality" ], "action": "SCMP_ACT_ALLOW", "args": [ { "index": 0, "value": 131072, "op": "SCMP_CMP_EQ" } ] }, { "names": [ "personality" ], "action": "SCMP_ACT_ALLOW", "args": [ { "index": 0, "value": 131080, "op": "SCMP_CMP_EQ" } ] }, { "names": [ "personality" ], "action": "SCMP_ACT_ALLOW", "args": [ { "index": 0, "value": 4294967295, "op": "SCMP_CMP_EQ" } ] }, { "names": [ "sync_file_range2", "swapcontext" ], "action": "SCMP_ACT_ALLOW", "includes": { "arches": [ "ppc64le" ] } }, { "names": [ "arm_fadvise64_64", "arm_sync_file_range", "sync_file_range2", "breakpoint", "cacheflush", "set_tls" ], "action": "SCMP_ACT_ALLOW", "includes": { "arches": [ "arm", "arm64" ] } }, { "names": [ "arch_prctl" ], "action": "SCMP_ACT_ALLOW", "includes": { "arches": [ "amd64", "x32" ] } }, { "names": [ "modify_ldt" ], "action": "SCMP_ACT_ALLOW", "includes": { "arches": [ "amd64", "x32", "x86" ] } }, { "names": [ "s390_pci_mmio_read", "s390_pci_mmio_write", "s390_runtime_instr" ], "action": "SCMP_ACT_ALLOW", "includes": { "arches": [ "s390", "s390x" ] } }, { "names": [ "riscv_flush_icache" ], "action": "SCMP_ACT_ALLOW", "includes": { "arches": [ "riscv64" ] } }, { "names": [ "open_by_handle_at" ], "action": "SCMP_ACT_ALLOW", "includes": { "caps": [ "CAP_DAC_READ_SEARCH" ] } }, { "names": [ "bpf", "clone3", "fanotify_init", "fsconfig", "fsmount", "fsopen", "fspick", "lookup_dcookie", "mount", "mount_setattr", "move_mount", "open_tree", "perf_event_open", "quotactl", "quotactl_fd", "setdomainname", "sethostname", "setns", "syslog", "umount", "umount2" ], "action": "SCMP_ACT_ALLOW", "includes": { "caps": [ "CAP_SYS_ADMIN" ] } }, { "names": [ "clone3" ], "action": "SCMP_ACT_ERRNO", "errnoRet": 38, "excludes": { "caps": [ "CAP_SYS_ADMIN" ] } }, { "names": [ "reboot" ], "action": "SCMP_ACT_ALLOW", "includes": { "caps": [ "CAP_SYS_BOOT" ] } }, { "names": [ "chroot" ], "action": "SCMP_ACT_ALLOW", "includes": { "caps": [ "CAP_SYS_CHROOT" ] } }, { "names": [ "delete_module", "init_module", "finit_module" ], "action": "SCMP_ACT_ALLOW", "includes": { "caps": [ "CAP_SYS_MODULE" ] } }, { "names": [ "acct" ], "action": "SCMP_ACT_ALLOW", "includes": { "caps": [ "CAP_SYS_PACCT" ] } }, { "names": [ "kcmp", "pidfd_getfd", "process_madvise", "process_vm_readv", "process_vm_writev", "ptrace" ], "action": "SCMP_ACT_ALLOW", "includes": { "caps": [ "CAP_SYS_PTRACE" ] } }, { "names": [ "iopl", "ioperm" ], "action": "SCMP_ACT_ALLOW", "includes": { "caps": [ "CAP_SYS_RAWIO" ] } }, { "names": [ "settimeofday", "stime", "clock_settime", "clock_settime64" ], "action": "SCMP_ACT_ALLOW", "includes": { "caps": [ "CAP_SYS_TIME" ] } }, { "names": [ "vhangup" ], "action": "SCMP_ACT_ALLOW", "includes": { "caps": [ "CAP_SYS_TTY_CONFIG" ] } }, { "names": [ "get_mempolicy", "mbind", "set_mempolicy" ], "action": "SCMP_ACT_ALLOW", "includes": { "caps": [ "CAP_SYS_NICE" ] } }, { "names": [ "syslog" ], "action": "SCMP_ACT_ALLOW", "includes": { "caps": [ "CAP_SYSLOG" ] } }, { "names": [ "bpf" ], "action": "SCMP_ACT_ALLOW", "includes": { "caps": [ "CAP_BPF" ] } }, { "names": [ "perf_event_open" ], "action": "SCMP_ACT_ALLOW", "includes": { "caps": [ "CAP_PERFMON" ] } } ] } ================================================ FILE: cmd/server/http.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "net/http" "github.com/livekit/egress/pkg/server" "github.com/livekit/protocol/logger" ) type httpHandler struct { svc *server.Server } func (h *httpHandler) ServeHTTP(w http.ResponseWriter, _ *http.Request) { info, err := h.svc.Status() if err != nil { logger.Errorw("failed to read status", err) } w.Header().Set("Content-Type", "application/json") _, _ = w.Write(info) } ================================================ FILE: cmd/server/main.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "embed" "fmt" "io/fs" "net/http" "os" "os/signal" "syscall" "github.com/urfave/cli/v3" "google.golang.org/protobuf/encoding/protojson" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/handler" "github.com/livekit/egress/pkg/info" "github.com/livekit/egress/pkg/server" "github.com/livekit/egress/version" "github.com/livekit/protocol/logger" lkredis "github.com/livekit/protocol/redis" "github.com/livekit/protocol/rpc" _ "github.com/livekit/protocol/utils/hwstats/maxprocs" "github.com/livekit/psrpc" ) var ( //go:embed templates templateEmbedFs embed.FS ) func main() { cmd := &cli.Command{ Name: "egress", Usage: "LiveKit Egress", Version: version.Version, Description: "runs the recorder in standalone mode or as a service", Commands: []*cli.Command{ { Name: "run-handler", Description: "runs a request in a new process", Flags: []cli.Flag{ &cli.StringFlag{ Name: "request", }, &cli.StringFlag{ Name: "config", }, }, Action: runHandler, Hidden: true, }, }, Flags: []cli.Flag{ &cli.StringFlag{ Name: "config", Usage: "LiveKit Egress yaml config file", Sources: cli.EnvVars("EGRESS_CONFIG_FILE"), }, &cli.StringFlag{ Name: "config-body", Usage: "LiveKit Egress yaml config body", Sources: cli.EnvVars("EGRESS_CONFIG_BODY"), }, }, Action: runService, } if err := cmd.Run(context.Background(), os.Args); err != nil { fmt.Println(err) os.Exit(1) } } func runService(_ context.Context, c *cli.Command) error { configFile := c.String("config") configBody := c.String("config-body") if configBody == "" { if configFile == "" { return errors.ErrNoConfig } content, err := os.ReadFile(configFile) if err != nil { return err } configBody = string(content) } conf, err := config.NewServiceConfig(configBody) if err != nil { return err } rc, err := lkredis.GetRedisClient(conf.Redis) if err != nil { return err } bus := psrpc.NewRedisMessageBus(rc) ioClient, err := info.NewSessionReporter(&conf.BaseConfig, bus) if err != nil { return err } svc, err := server.NewServer(conf, bus, ioClient) if err != nil { return err } if conf.HealthPort != 0 { go func() { _ = http.ListenAndServe(fmt.Sprintf(":%d", conf.HealthPort), &httpHandler{svc: svc}) }() } stopChan := make(chan os.Signal, 1) signal.Notify(stopChan, syscall.SIGTERM, syscall.SIGQUIT) killChan := make(chan os.Signal, 1) signal.Notify(killChan, syscall.SIGINT) go func() { select { case sig := <-stopChan: logger.Infow("exit requested, finishing recording then shutting down", "signal", sig) svc.Shutdown(true, false) case sig := <-killChan: logger.Infow("exit requested, stopping recording and shutting down", "signal", sig) svc.Shutdown(true, true) } }() rfs, err := fs.Sub(templateEmbedFs, "templates") if err != nil { return err } err = svc.StartTemplatesServer(rfs) if err != nil { return err } return svc.Run() } func runHandler(_ context.Context, c *cli.Command) error { configBody := c.String("config") if configBody == "" { return errors.ErrNoConfig } req := &rpc.StartEgressRequest{} reqString := c.String("request") err := protojson.Unmarshal([]byte(reqString), req) if err != nil { return err } conf, err := config.NewPipelineConfig(configBody, req) if err != nil { return err } logger.Debugw("handler launched") err = os.MkdirAll(conf.TmpDir, 0755) if err != nil { return err } defer os.RemoveAll(conf.TmpDir) _ = os.Setenv("TMPDIR", conf.TmpDir) rc, err := lkredis.GetRedisClient(conf.Redis) if err != nil { return err } killChan := make(chan os.Signal, 1) signal.Notify(killChan, syscall.SIGINT) bus := psrpc.NewRedisMessageBus(rc) h, err := handler.NewHandler(conf, bus) if err != nil { // service will send info update and shut down logger.Errorw("failed to create handler", err) return err } go func() { sig := <-killChan logger.Infow("exit requested, stopping recording and shutting down", "signal", sig) h.Kill() }() h.Run() return nil } ================================================ FILE: cmd/template_version/main.go ================================================ // Copyright 2025 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "fmt" "github.com/livekit/egress/version" ) func main() { fmt.Println(version.TemplateVersion) } ================================================ FILE: go.mod ================================================ module github.com/livekit/egress replace github.com/go-gst/go-gst => github.com/livekit/gst-go v0.0.0-20250701011214-e7f61abd14cb go 1.26.1 tool github.com/maxbrunsfeld/counterfeiter/v6 require ( cloud.google.com/go/storage v1.55.0 github.com/Azure/azure-storage-blob-go v0.15.0 github.com/aws/aws-sdk-go-v2 v1.41.5 github.com/aws/aws-sdk-go-v2/config v1.29.17 github.com/aws/aws-sdk-go-v2/credentials v1.17.70 github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.81 github.com/aws/aws-sdk-go-v2/service/s3 v1.97.3 github.com/aws/smithy-go v1.24.2 github.com/chromedp/cdproto v0.0.0-20260405000525-47a8ff65b46a github.com/chromedp/chromedp v0.15.1 github.com/frostbyte73/core v0.1.1 github.com/go-gst/go-glib v1.4.1-0.20241209142714-f53cebf18559 github.com/go-gst/go-gst v1.4.0 github.com/go-jose/go-jose/v4 v4.1.4 github.com/googleapis/gax-go/v2 v2.14.2 github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 github.com/linkdata/deadlock v0.5.5 github.com/livekit/livekit-server v1.9.12 github.com/livekit/mageutil v0.0.0-20250511045019-0f1ff63f7731 github.com/livekit/media-sdk v0.0.0-20260422170315-2c3eed337496 github.com/livekit/protocol v1.45.6 github.com/livekit/psrpc v0.7.1 github.com/livekit/server-sdk-go/v2 v2.16.2-0.20260401161108-50e969e2961f github.com/livekit/storage v0.0.0-20251113154014-aa1f4d0ce057 github.com/llehouerou/go-mp3 v1.2.0 github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 github.com/pion/rtp v1.10.1 github.com/pion/webrtc/v4 v4.2.7 github.com/prometheus/client_golang v1.23.0 github.com/prometheus/client_model v0.6.2 github.com/prometheus/common v0.67.5 github.com/stretchr/testify v1.11.1 github.com/urfave/cli/v3 v3.3.9 go.opentelemetry.io/otel v1.40.0 go.uber.org/atomic v1.11.0 go.uber.org/zap v1.27.1 golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a google.golang.org/api v0.238.0 google.golang.org/grpc v1.79.3 google.golang.org/protobuf v1.36.11 gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/yaml.v3 v3.0.1 ) require ( buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.11-20260209202127-80ab13bee0bf.1 // indirect buf.build/go/protovalidate v1.1.2 // indirect buf.build/go/protoyaml v0.6.0 // indirect cel.dev/expr v0.25.1 // indirect cloud.google.com/go v0.121.1 // indirect cloud.google.com/go/auth v0.16.2 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect cloud.google.com/go/compute/metadata v0.9.0 // indirect cloud.google.com/go/iam v1.5.2 // indirect cloud.google.com/go/monitoring v1.24.2 // indirect github.com/Azure/azure-pipeline-go v0.2.3 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 // indirect github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible // indirect github.com/antlr4-go/antlr/v4 v4.13.1 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.8 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.21 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.21 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.22 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7 // indirect github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.13 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.21 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.21 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.25.5 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.34.0 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bep/debounce v1.2.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chromedp/sysutil v1.1.0 // indirect github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dennwc/iters v1.2.2 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/elliotchance/orderedmap/v2 v2.7.0 // indirect github.com/envoyproxy/go-control-plane/envoy v1.36.0 // indirect github.com/envoyproxy/protoc-gen-validate v1.3.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/gammazero/deque v1.2.1 // indirect github.com/go-gst/go-pointer v0.0.0-20241127163939-ba766f075b4c // indirect github.com/go-jose/go-jose/v3 v3.0.5 // indirect github.com/go-json-experiment/json v0.0.0-20260214004413-d219187c3433 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/gobwas/httphead v0.1.0 // indirect github.com/gobwas/pool v0.2.1 // indirect github.com/gobwas/ws v1.4.0 // indirect github.com/google/cel-go v0.27.0 // indirect github.com/google/s2a-go v0.1.9 // indirect github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.7 // indirect github.com/jellydator/ttlcache/v3 v3.4.0 // indirect github.com/jxskiss/base62 v1.1.0 // indirect github.com/klauspost/compress v1.18.4 // indirect github.com/klauspost/cpuid/v2 v2.3.0 // indirect github.com/lithammer/shortuuid/v4 v4.2.0 // indirect github.com/livekit/mediatransportutil v0.0.0-20260113174415-2e8ba344fca3 // indirect github.com/mackerelio/go-osstat v0.2.6 // indirect github.com/magefile/mage v1.15.0 // indirect github.com/mattn/go-ieproxy v0.0.12 // indirect github.com/maxbrunsfeld/counterfeiter/v6 v6.12.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nats-io/nats.go v1.48.0 // indirect github.com/nats-io/nkeys v0.4.15 // indirect github.com/nats-io/nuid v1.0.1 // indirect github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe // indirect github.com/pion/datachannel v1.6.0 // indirect github.com/pion/dtls/v3 v3.1.2 // indirect github.com/pion/ice/v4 v4.2.0 // indirect github.com/pion/interceptor v0.1.44 // indirect github.com/pion/logging v0.2.4 // indirect github.com/pion/mdns/v2 v2.1.0 // indirect github.com/pion/randutil v0.1.0 // indirect github.com/pion/rtcp v1.2.16 // indirect github.com/pion/sctp v1.9.2 // indirect github.com/pion/sdp/v3 v3.0.18 // indirect github.com/pion/srtp/v3 v3.0.10 // indirect github.com/pion/stun/v3 v3.1.1 // indirect github.com/pion/transport/v4 v4.0.1 // indirect github.com/pion/turn/v4 v4.1.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/procfs v0.19.2 // indirect github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect github.com/redis/go-redis/v9 v9.17.3 // indirect github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect github.com/twitchtv/twirp v8.1.3+incompatible // indirect github.com/wlynxg/anet v0.0.5 // indirect github.com/zeebo/xxh3 v1.1.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/contrib/detectors/gcp v1.39.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect go.opentelemetry.io/otel/sdk v1.40.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.40.0 // indirect go.opentelemetry.io/otel/trace v1.40.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap/exp v0.3.0 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect golang.org/x/crypto v0.48.0 // indirect golang.org/x/mod v0.33.0 // indirect golang.org/x/net v0.50.0 // indirect golang.org/x/oauth2 v0.34.0 // indirect golang.org/x/sync v0.19.0 // indirect golang.org/x/sys v0.42.0 // indirect golang.org/x/text v0.34.0 // indirect golang.org/x/time v0.14.0 // indirect golang.org/x/tools v0.42.0 // indirect google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 // indirect ) ================================================ FILE: go.sum ================================================ buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.11-20260209202127-80ab13bee0bf.1 h1:PMmTMyvHScV9Mn8wc6ASge9uRcHy0jtqPd+fM35LmsQ= buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.11-20260209202127-80ab13bee0bf.1/go.mod h1:tvtbpgaVXZX4g6Pn+AnzFycuRK3MOz5HJfEGeEllXYM= buf.build/go/protovalidate v1.1.2 h1:83vYHoY8f34hB8MeitGaYE3CGVPFxwdEUuskh5qQpA0= buf.build/go/protovalidate v1.1.2/go.mod h1:Ez3z+w4c+wG+EpW8ovgZaZPnPl2XVF6kaxgcv1NG/QE= buf.build/go/protoyaml v0.6.0 h1:Nzz1lvcXF8YgNZXk+voPPwdU8FjDPTUV4ndNTXN0n2w= buf.build/go/protoyaml v0.6.0/go.mod h1:RgUOsBu/GYKLDSIRgQXniXbNgFlGEZnQpRAUdLAFV2Q= cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4= cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4= cloud.google.com/go v0.121.1 h1:S3kTQSydxmu1JfLRLpKtxRPA7rSrYPRPEUmL/PavVUw= cloud.google.com/go v0.121.1/go.mod h1:nRFlrHq39MNVWu+zESP2PosMWA0ryJw8KUBZ2iZpxbw= cloud.google.com/go/auth v0.16.2 h1:QvBAGFPLrDeoiNjyfVunhQ10HKNYuOwZ5noee0M5df4= cloud.google.com/go/auth v0.16.2/go.mod h1:sRBas2Y1fB1vZTdurouM0AzuYQBMZinrUYL8EufhtEA= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8= cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE= cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc= cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA= cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM= cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U= cloud.google.com/go/storage v1.55.0 h1:NESjdAToN9u1tmhVqhXCaCwYBuvEhZLLv0gBr+2znf0= cloud.google.com/go/storage v1.55.0/go.mod h1:ztSmTTwzsdXe5syLVS0YsbFxXuvEmEyZj7v7zChEmuY= cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4= cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-storage-blob-go v0.15.0 h1:rXtgp8tN1p29GvpGgfJetavIG0V7OgcSXPpwp3tx6qk= github.com/Azure/azure-storage-blob-go v0.15.0/go.mod h1:vbjsVbX0dlxnRc4FFMPsS9BsJWPcne7GB7onqlPvz58= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 h1:sBEjpZlNHzK1voKq9695PJSX2o5NEXl7/OL3coiIY0c= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 h1:fYE9p3esPxA/C0rQ0AHhP0drtPXDRhaWiwg1DPqO7IU= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0/go.mod h1:BnBReJLvVYx2CS/UHOgVz2BXKXD9wsQPxZug20nZhd0= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.51.0 h1:OqVGm6Ei3x5+yZmSJG1Mh2NwHvpVmZ08CB5qJhT9Nuk= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.51.0/go.mod h1:SZiPHWGOOk3bl8tkevxkoiwPgsIl6CwrWcbwjfHZpdM= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 h1:6/0iUd0xrnX7qt+mLNRwg5c0PGv8wpE8K90ryANQwMI= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible h1:8psS8a+wKfiLt1iVDX79F7Y6wUM49Lcha2FMXt4UM8g= github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ= github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw= github.com/aws/aws-sdk-go-v2 v1.41.5 h1:dj5kopbwUsVUVFgO4Fi5BIT3t4WyqIDjGKCangnV/yY= github.com/aws/aws-sdk-go-v2 v1.41.5/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.8 h1:eBMB84YGghSocM7PsjmmPffTa+1FBUeNvGvFou6V/4o= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.8/go.mod h1:lyw7GFp3qENLh7kwzf7iMzAxDn+NzjXEAGjKS2UOKqI= github.com/aws/aws-sdk-go-v2/config v1.29.17 h1:jSuiQ5jEe4SAMH6lLRMY9OVC+TqJLP5655pBGjmnjr0= github.com/aws/aws-sdk-go-v2/config v1.29.17/go.mod h1:9P4wwACpbeXs9Pm9w1QTh6BwWwJjwYvJ1iCt5QbCXh8= github.com/aws/aws-sdk-go-v2/credentials v1.17.70 h1:ONnH5CM16RTXRkS8Z1qg7/s2eDOhHhaXVd72mmyv4/0= github.com/aws/aws-sdk-go-v2/credentials v1.17.70/go.mod h1:M+lWhhmomVGgtuPOhO85u4pEa3SmssPTdcYpP/5J/xc= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32 h1:KAXP9JSHO1vKGCr5f4O6WmlVKLFFXgWYAGoJosorxzU= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32/go.mod h1:h4Sg6FQdexC1yYG9RDnOvLbW1a/P986++/Y/a+GyEM8= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.81 h1:E5ff1vZlAudg24j5lF6F6/gBpln2LjWxGdQDBSLfVe4= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.81/go.mod h1:hHBLCuhHI4Aokvs5vdVoCDBzmFy86yxs5J7LEPQwQEM= github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.21 h1:Rgg6wvjjtX8bNHcvi9OnXWwcE0a2vGpbwmtICOsvcf4= github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.21/go.mod h1:A/kJFst/nm//cyqonihbdpQZwiUhhzpqTsdbhDdRF9c= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.21 h1:PEgGVtPoB6NTpPrBgqSE5hE/o47Ij9qk/SEZFbUOe9A= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.21/go.mod h1:p+hz+PRAYlY3zcpJhPwXlLC4C+kqn70WIHwnzAfs6ps= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.22 h1:rWyie/PxDRIdhNf4DzRk0lvjVOqFJuNnO8WwaIRVxzQ= github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.22/go.mod h1:zd/JsJ4P7oGfUhXn1VyLqaRZwPmZwg44Jf2dS84Dm3Y= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7 h1:5EniKhLZe4xzL7a+fU3C2tfUN4nWIqlLesfrjkuPFTY= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7/go.mod h1:x0nZssQ3qZSnIcePWLvcoFisRXJzcTVvYpAAdYX8+GI= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.13 h1:JRaIgADQS/U6uXDqlPiefP32yXTda7Kqfx+LgspooZM= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.13/go.mod h1:CEuVn5WqOMilYl+tbccq8+N2ieCy0gVn3OtRb0vBNNM= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.21 h1:c31//R3xgIJMSC8S6hEVq+38DcvUlgFY0FM6mSI5oto= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.21/go.mod h1:r6+pf23ouCB718FUxaqzZdbpYFyDtehyZcmP5KL9FkA= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.21 h1:ZlvrNcHSFFWURB8avufQq9gFsheUgjVD9536obIknfM= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.21/go.mod h1:cv3TNhVrssKR0O/xxLJVRfd2oazSnZnkUeTf6ctUwfQ= github.com/aws/aws-sdk-go-v2/service/s3 v1.97.3 h1:HwxWTbTrIHm5qY+CAEur0s/figc3qwvLWsNkF4RPToo= github.com/aws/aws-sdk-go-v2/service/s3 v1.97.3/go.mod h1:uoA43SdFwacedBfSgfFSjjCvYe8aYBS7EnU5GZ/YKMM= github.com/aws/aws-sdk-go-v2/service/sso v1.25.5 h1:AIRJ3lfb2w/1/8wOOSqYb9fUKGwQbtysJ2H1MofRUPg= github.com/aws/aws-sdk-go-v2/service/sso v1.25.5/go.mod h1:b7SiVprpU+iGazDUqvRSLf5XmCdn+JtT1on7uNL6Ipc= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 h1:BpOxT3yhLwSJ77qIY3DoHAQjZsc4HEGfMCE4NGy3uFg= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3/go.mod h1:vq/GQR1gOFLquZMSrxUK/cpvKCNVYibNyJ1m7JrU88E= github.com/aws/aws-sdk-go-v2/service/sts v1.34.0 h1:NFOJ/NXEGV4Rq//71Hs1jC/NvPs1ezajK+yQmkwnPV0= github.com/aws/aws-sdk-go-v2/service/sts v1.34.0/go.mod h1:7ph2tGpfQvwzgistp2+zga9f+bCjlQJPkPUmMgDSD7w= github.com/aws/smithy-go v1.24.2 h1:FzA3bu/nt/vDvmnkg+R8Xl46gmzEDam6mZ1hzmwXFng= github.com/aws/smithy-go v1.24.2/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bep/debounce v1.2.1 h1:v67fRdBA9UQu2NhLFXrSg0Brw7CexQekrBwDMM8bzeY= github.com/bep/debounce v1.2.1/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0= github.com/brianvoe/gofakeit/v6 v6.28.0 h1:Xib46XXuQfmlLS2EXRuJpqcw8St6qSZz75OUo0tgAW4= github.com/brianvoe/gofakeit/v6 v6.28.0/go.mod h1:Xj58BMSnFqcn/fAQeSK+/PLtC5kSb7FJIq4JyGa8vEs= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chromedp/cdproto v0.0.0-20260405000525-47a8ff65b46a h1:Kk4P1W58eAf+OUGtx51cM7CcJokJuBEmOxxwPdHFH4Q= github.com/chromedp/cdproto v0.0.0-20260405000525-47a8ff65b46a/go.mod h1:cbyjALe67vDvlvdiG9369P8w5U2w6IshwtyD2f2Tvag= github.com/chromedp/chromedp v0.15.1 h1:EJWiPm7BNqDqjYy6U0lTSL5wNH+iNt9GjC3a4gfjNyQ= github.com/chromedp/chromedp v0.15.1/go.mod h1:CdTHtUqD/dqaFw/cvFWtTydoEQS44wLBuwbMR9EkOY4= github.com/chromedp/sysutil v1.1.0 h1:PUFNv5EcprjqXZD9nJb9b/c9ibAbxiYo4exNWZyipwM= github.com/chromedp/sysutil v1.1.0/go.mod h1:WiThHUdltqCNKGc4gaU50XgYjwjYIhKWoHGPTUfWTJ8= github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 h1:6xNmx7iTtyBRev0+D/Tv1FZd4SCg8axKApyNyRsAt/w= github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5/go.mod h1:KdCmV+x/BuvyMxRnYBlmVaq4OLiKW6iRQfvC62cvdkI= github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4= github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE= github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dennwc/iters v1.2.2 h1:XH2/Etihiy9ZvPOVCR+icQXeYlhbvS7k0qro4x/2qQo= github.com/dennwc/iters v1.2.2/go.mod h1:M9KuuMBeyEXYTmB7EnI9SCyALFCmPWOIxn5W1L0CjGg= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/docker/cli v29.0.0+incompatible h1:KgsN2RUFMNM8wChxryicn4p46BdQWpXOA1XLGBGPGAw= github.com/docker/cli v29.0.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/elliotchance/orderedmap/v2 v2.7.0 h1:WHuf0DRo63uLnldCPp9ojm3gskYwEdIIfAUVG5KhoOc= github.com/elliotchance/orderedmap/v2 v2.7.0/go.mod h1:85lZyVbpGaGvHvnKa7Qhx7zncAdBIBq6u56Hb1PRU5Q= github.com/envoyproxy/go-control-plane v0.14.0 h1:hbG2kr4RuFj222B6+7T83thSPqLjwBIfQawTkC++2HA= github.com/envoyproxy/go-control-plane v0.14.0/go.mod h1:NcS5X47pLl/hfqxU70yPwL9ZMkUlwlKxtAohpi2wBEU= github.com/envoyproxy/go-control-plane/envoy v1.36.0 h1:yg/JjO5E7ubRyKX3m07GF3reDNEnfOboJ0QySbH736g= github.com/envoyproxy/go-control-plane/envoy v1.36.0/go.mod h1:ty89S1YCCVruQAm9OtKeEkQLTb+Lkz0k8v9W0Oxsv98= github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= github.com/envoyproxy/protoc-gen-validate v1.3.0 h1:TvGH1wof4H33rezVKWSpqKz5NXWg5VPuZ0uONDT6eb4= github.com/envoyproxy/protoc-gen-validate v1.3.0/go.mod h1:HvYl7zwPa5mffgyeTUHA9zHIH36nmrm7oCbo4YKoSWA= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/frostbyte73/core v0.1.1 h1:ChhJOR7bAKOCPbA+lqDLE2cGKlCG5JXsDvvQr4YaJIA= github.com/frostbyte73/core v0.1.1/go.mod h1:mhfOtR+xWAvwXiwor7jnqPMnu4fxbv1F2MwZ0BEpzZo= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/gammazero/deque v1.2.1 h1:9fnQVFCCZ9/NOc7ccTNqzoKd1tCWOqeI05/lPqFPMGQ= github.com/gammazero/deque v1.2.1/go.mod h1:5nSFkzVm+afG9+gy0VIowlqVAW4N8zNcMne+CMQVD2g= github.com/go-gst/go-glib v1.4.1-0.20241209142714-f53cebf18559 h1:AK60n6W3FLZTp9H1KU5VOa8XefNO0w0R3pfszphwX14= github.com/go-gst/go-glib v1.4.1-0.20241209142714-f53cebf18559/go.mod h1:ZWT4LXOO2PH8lSNu/dR5O2yoNQJKEgmijNa2d7nByK8= github.com/go-gst/go-pointer v0.0.0-20241127163939-ba766f075b4c h1:x8kKRVDmz5BRlolmDZGcsuZ1l+js6TRL3QWBJjGVctM= github.com/go-gst/go-pointer v0.0.0-20241127163939-ba766f075b4c/go.mod h1:qKw5ZZ0U58W6PU/7F/Lopv+14nKYmdXlOd7VnAZ17Mk= github.com/go-jose/go-jose/v3 v3.0.5 h1:BLLJWbC4nMZOfuPVxoZIxeYsn6Nl2r1fITaJ78UQlVQ= github.com/go-jose/go-jose/v3 v3.0.5/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= github.com/go-jose/go-jose/v4 v4.1.4 h1:moDMcTHmvE6Groj34emNPLs/qtYXRVcd6S7NHbHz3kA= github.com/go-jose/go-jose/v4 v4.1.4/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= github.com/go-json-experiment/json v0.0.0-20260214004413-d219187c3433 h1:vymEbVwYFP/L05h5TKQxvkXoKxNvTpjxYKdF1Nlwuao= github.com/go-json-experiment/json v0.0.0-20260214004413-d219187c3433/go.mod h1:tphK2c80bpPhMOI4v6bIc2xWywPfbqi1Z06+RcrMkDg= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU= github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og= github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.4.0 h1:CTaoG1tojrh4ucGPcoJFiAQUAsEWekEWvLy7GsVNqGs= github.com/gobwas/ws v1.4.0/go.mod h1:G3gNqMNtPppf5XUz7O4shetPpcZ1VJ7zt18dlUeakrc= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/cel-go v0.27.0 h1:e7ih85+4qVrBuqQWTW4FKSqZYokVuc3HnhH5keboFTo= github.com/google/cel-go v0.27.0/go.mod h1:tTJ11FWqnhw5KKpnWpvW9CJC3Y9GK4EIS0WXnBbebzw= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4= github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0= github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY= github.com/jellydator/ttlcache/v3 v3.4.0/go.mod h1:Hw9EgjymziQD3yGsQdf1FqFdpp7YjFMd4Srg5EJlgD4= github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/jxskiss/base62 v1.1.0 h1:A5zbF8v8WXx2xixnAKD2w+abC+sIzYJX+nxmhA6HWFw= github.com/jxskiss/base62 v1.1.0/go.mod h1:HhWAlUXvxKThfOlZbcuFzsqwtF5TcqS9ru3y5GfjWAc= github.com/klauspost/compress v1.18.4 h1:RPhnKRAQ4Fh8zU2FY/6ZFDwTVTxgJ/EMydqSTzE9a2c= github.com/klauspost/compress v1.18.4/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80 h1:6Yzfa6GP0rIo/kULo2bwGEkFvCePZ3qHDDTC3/J9Swo= github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= github.com/linkdata/deadlock v0.5.5 h1:d6O+rzEqasSfamGDA8u7bjtaq7hOX8Ha4Zn36Wxrkvo= github.com/linkdata/deadlock v0.5.5/go.mod h1:tXb28stzAD3trzEEK0UJWC+rZKuobCoPktPYzebb1u0= github.com/lithammer/shortuuid/v4 v4.2.0 h1:LMFOzVB3996a7b8aBuEXxqOBflbfPQAiVzkIcHO0h8c= github.com/lithammer/shortuuid/v4 v4.2.0/go.mod h1:D5noHZ2oFw/YaKCfGy0YxyE7M0wMbezmMjPdhyEFe6Y= github.com/livekit/gst-go v0.0.0-20250701011214-e7f61abd14cb h1:1Vjk6NaXJZQiCvXGlKv38ossk4mNKHy5ob+eZygewdw= github.com/livekit/gst-go v0.0.0-20250701011214-e7f61abd14cb/go.mod h1:pyCgY9XFSG0CAnJzoJ84R5XWn8rEj849EYJOwnAdB8k= github.com/livekit/livekit-server v1.9.12 h1:VsPJAL2EbiBKt5SIhZWazNUSkEUZi/8P8kttGXUE1zw= github.com/livekit/livekit-server v1.9.12/go.mod h1:Xh2ocHdH+z/D2u00GulDVVIDdgAlck1miHT0Ab2Skvg= github.com/livekit/mageutil v0.0.0-20250511045019-0f1ff63f7731 h1:9x+U2HGLrSw5ATTo469PQPkqzdoU7be46ryiCDO3boc= github.com/livekit/mageutil v0.0.0-20250511045019-0f1ff63f7731/go.mod h1:Rs3MhFwutWhGwmY1VQsygw28z5bWcnEYmS1OG9OxjOQ= github.com/livekit/media-sdk v0.0.0-20260422170315-2c3eed337496 h1:yIEbXERsObyjGGoTnv7Bf37pQfAHrxRmPAN//tgzwJU= github.com/livekit/media-sdk v0.0.0-20260422170315-2c3eed337496/go.mod h1:7ssWiG+U4xnbvLih9WiZbhQP6zIKMjgXdUtIE1bm/E8= github.com/livekit/mediatransportutil v0.0.0-20260113174415-2e8ba344fca3 h1:v1Xc/q/547TjLX7Nw5y2vXNnmV0XYFAbhTJrtErQeDA= github.com/livekit/mediatransportutil v0.0.0-20260113174415-2e8ba344fca3/go.mod h1:QBx/KHV6Vv00ggibg/WrOlqrkTciEA2Hc9DGWYr3Q9U= github.com/livekit/protocol v1.45.6 h1:E+wKxs8ckKNYYTNyHm5nR1ShGLJ5DmA+WCEb5AJG11A= github.com/livekit/protocol v1.45.6/go.mod h1:e6QdWDkfot+M2nRh0eitJUS0ZLuwvKCsfiz2pWWSG3s= github.com/livekit/psrpc v0.7.1 h1:ms37az0QTD3UXIWuUC5D/SkmKOlRMVRsI261eBWu/Vw= github.com/livekit/psrpc v0.7.1/go.mod h1:bZ4iHFQptTkbPnB0LasvRNu/OBYXEu1NA6O5BMFo9kk= github.com/livekit/server-sdk-go/v2 v2.16.2-0.20260401161108-50e969e2961f h1:xSUtbUe3wBIFG/Ki3KEIsmjkOcfbpSOYJh2xxwJEllg= github.com/livekit/server-sdk-go/v2 v2.16.2-0.20260401161108-50e969e2961f/go.mod h1:oQbYijcbPzfjBAOzoq7tz9Ktqur8JNRCd923VP8xOQQ= github.com/livekit/storage v0.0.0-20251113154014-aa1f4d0ce057 h1:6XTEL0cSGkDPWYl1nAS/3cNOK1QoIo11C/O4pc4vPMg= github.com/livekit/storage v0.0.0-20251113154014-aa1f4d0ce057/go.mod h1:m+EDdiNremMNJbggvfj5mY8w7nbzVGtZka5Jhj4pg0g= github.com/llehouerou/go-mp3 v1.2.0 h1:2WN/bjCGhfPZAQbSSF35DxNKS/+HPnJ76TakA7Kyscs= github.com/llehouerou/go-mp3 v1.2.0/go.mod h1:/Rl7E/VQpWTQDTJgr69iYVSkS1BZEh4X/ABV1XvIpHA= github.com/mackerelio/go-osstat v0.2.6 h1:gs4U8BZeS1tjrL08tt5VUliVvSWP26Ai2Ob8Lr7f2i0= github.com/mackerelio/go-osstat v0.2.6/go.mod h1:lRy8V9ZuHpuRVZh+vyTkODeDPl3/d5MgXHtLSaqG8bA= github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg= github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-ieproxy v0.0.12 h1:OZkUFJC3ESNZPQ+6LzC3VJIFSnreeFLQyqvBWtvfL2M= github.com/mattn/go-ieproxy v0.0.12/go.mod h1:Vn+N61199DAnVeTgaF8eoB9PvLO8P3OBnG95ENh7B7c= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/maxbrunsfeld/counterfeiter/v6 v6.12.1 h1:D4O2wLxB384TS3ohBJMfolnxb4qGmoZ1PnWNtit8LYo= github.com/maxbrunsfeld/counterfeiter/v6 v6.12.1/go.mod h1:RuJdxo0oI6dClIaMzdl3hewq3a065RH65dofJP03h8I= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/moby/api v1.52.0 h1:00BtlJY4MXkkt84WhUZPRqt5TvPbgig2FZvTbe3igYg= github.com/moby/moby/api v1.52.0/go.mod h1:8mb+ReTlisw4pS6BRzCMts5M49W5M7bKt1cJy/YbAqc= github.com/moby/moby/client v0.1.0 h1:nt+hn6O9cyJQqq5UWnFGqsZRTS/JirUqzPjEl0Bdc/8= github.com/moby/moby/client v0.1.0/go.mod h1:O+/tw5d4a1Ha/ZA/tPxIZJapJRUS6LNZ1wiVRxYHyUE= github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/nats-io/nats.go v1.48.0 h1:pSFyXApG+yWU/TgbKCjmm5K4wrHu86231/w84qRVR+U= github.com/nats-io/nats.go v1.48.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g= github.com/nats-io/nkeys v0.4.15 h1:JACV5jRVO9V856KOapQ7x+EY8Jo3qw1vJt/9Jpwzkk4= github.com/nats-io/nkeys v0.4.15/go.mod h1:CpMchTXC9fxA5zrMo4KpySxNjiDVvr8ANOSZdiNfUrs= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/opencontainers/runc v1.3.3 h1:qlmBbbhu+yY0QM7jqfuat7M1H3/iXjju3VkP9lkFQr4= github.com/opencontainers/runc v1.3.3/go.mod h1:D7rL72gfWxVs9cJ2/AayxB0Hlvn9g0gaF1R7uunumSI= github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde h1:x0TT0RDC7UhAVbbWWBzr41ElhJx5tXPWkIHA2HWPRuw= github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= github.com/ory/dockertest/v3 v3.12.0 h1:3oV9d0sDzlSQfHtIaB5k6ghUCVMVLpAY8hwrqoCyRCw= github.com/ory/dockertest/v3 v3.12.0/go.mod h1:aKNDTva3cp8dwOWwb9cWuX84aH5akkxXRvO7KCwWVjE= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe h1:vHpqOnPlnkba8iSxU4j/CvDSS9J4+F4473esQsYLGoE= github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= github.com/pion/datachannel v1.6.0 h1:XecBlj+cvsxhAMZWFfFcPyUaDZtd7IJvrXqlXD/53i0= github.com/pion/datachannel v1.6.0/go.mod h1:ur+wzYF8mWdC+Mkis5Thosk+u/VOL287apDNEbFpsIk= github.com/pion/dtls/v3 v3.1.2 h1:gqEdOUXLtCGW+afsBLO0LtDD8GnuBBjEy6HRtyofZTc= github.com/pion/dtls/v3 v3.1.2/go.mod h1:Hw/igcX4pdY69z1Hgv5x7wJFrUkdgHwAn/Q/uo7YHRo= github.com/pion/ice/v4 v4.2.0 h1:jJC8S+CvXCCvIQUgx+oNZnoUpt6zwc34FhjWwCU4nlw= github.com/pion/ice/v4 v4.2.0/go.mod h1:EgjBGxDgmd8xB0OkYEVFlzQuEI7kWSCFu+mULqaisy4= github.com/pion/interceptor v0.1.44 h1:sNlZwM8dWXU9JQAkJh8xrarC0Etn8Oolcniukmuy0/I= github.com/pion/interceptor v0.1.44/go.mod h1:4atVlBkcgXuUP+ykQF0qOCGU2j7pQzX2ofvPRFsY5RY= github.com/pion/logging v0.2.4 h1:tTew+7cmQ+Mc1pTBLKH2puKsOvhm32dROumOZ655zB8= github.com/pion/logging v0.2.4/go.mod h1:DffhXTKYdNZU+KtJ5pyQDjvOAh/GsNSyv1lbkFbe3so= github.com/pion/mdns/v2 v2.1.0 h1:3IJ9+Xio6tWYjhN6WwuY142P/1jA0D5ERaIqawg/fOY= github.com/pion/mdns/v2 v2.1.0/go.mod h1:pcez23GdynwcfRU1977qKU0mDxSeucttSHbCSfFOd9A= github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA= github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= github.com/pion/rtcp v1.2.16 h1:fk1B1dNW4hsI78XUCljZJlC4kZOPk67mNRuQ0fcEkSo= github.com/pion/rtcp v1.2.16/go.mod h1:/as7VKfYbs5NIb4h6muQ35kQF/J0ZVNz2Z3xKoCBYOo= github.com/pion/rtp v1.10.1 h1:xP1prZcCTUuhO2c83XtxyOHJteISg6o8iPsE2acaMtA= github.com/pion/rtp v1.10.1/go.mod h1:rF5nS1GqbR7H/TCpKwylzeq6yDM+MM6k+On5EgeThEM= github.com/pion/sctp v1.9.2 h1:HxsOzEV9pWoeggv7T5kewVkstFNcGvhMPx0GvUOUQXo= github.com/pion/sctp v1.9.2/go.mod h1:OTOlsQ5EDQ6mQ0z4MUGXt2CgQmKyafBEXhUVqLRB6G8= github.com/pion/sdp/v3 v3.0.18 h1:l0bAXazKHpepazVdp+tPYnrsy9dfh7ZbT8DxesH5ZnI= github.com/pion/sdp/v3 v3.0.18/go.mod h1:ZREGo6A9ZygQ9XkqAj5xYCQtQpif0i6Pa81HOiAdqQ8= github.com/pion/srtp/v3 v3.0.10 h1:tFirkpBb3XccP5VEXLi50GqXhv5SKPxqrdlhDCJlZrQ= github.com/pion/srtp/v3 v3.0.10/go.mod h1:3mOTIB0cq9qlbn59V4ozvv9ClW/BSEbRp4cY0VtaR7M= github.com/pion/stun/v3 v3.1.1 h1:CkQxveJ4xGQjulGSROXbXq94TAWu8gIX2dT+ePhUkqw= github.com/pion/stun/v3 v3.1.1/go.mod h1:qC1DfmcCTQjl9PBaMa5wSn3x9IPmKxSdcCsxBcDBndM= github.com/pion/transport/v3 v3.1.1 h1:Tr684+fnnKlhPceU+ICdrw6KKkTms+5qHMgw6bIkYOM= github.com/pion/transport/v3 v3.1.1/go.mod h1:+c2eewC5WJQHiAA46fkMMzoYZSuGzA/7E2FPrOYHctQ= github.com/pion/transport/v4 v4.0.1 h1:sdROELU6BZ63Ab7FrOLn13M6YdJLY20wldXW2Cu2k8o= github.com/pion/transport/v4 v4.0.1/go.mod h1:nEuEA4AD5lPdcIegQDpVLgNoDGreqM/YqmEx3ovP4jM= github.com/pion/turn/v4 v4.1.4 h1:EU11yMXKIsK43FhcUnjLlrhE4nboHZq+TXBIi3QpcxQ= github.com/pion/turn/v4 v4.1.4/go.mod h1:ES1DXVFKnOhuDkqn9hn5VJlSWmZPaRJLyBXoOeO/BmQ= github.com/pion/webrtc/v4 v4.2.7 h1:NAdsMXzQk/2yN1uV06SGxXqqVrkpDmNe09st/u16rrY= github.com/pion/webrtc/v4 v4.2.7/go.mod h1:IzslI8Dkj2FFIre/Ua4TU86aXi+oC8g/nP1CW6Yuw34= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4= github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw= github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws= github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg= github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA= github.com/redis/go-redis/v9 v9.17.3 h1:fN29NdNrE17KttK5Ndf20buqfDZwGNgoUr9qjl1DQx4= github.com/redis/go-redis/v9 v9.17.3/go.mod h1:u410H11HMLoB+TP67dz8rL9s6QW2j76l0//kSOd3370= github.com/rodaine/protogofakeit v0.1.1 h1:ZKouljuRM3A+TArppfBqnH8tGZHOwM/pjvtXe9DaXH8= github.com/rodaine/protogofakeit v0.1.1/go.mod h1:pXn/AstBYMaSfc1/RqH3N82pBuxtWgejz1AlYpY1mI0= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8= github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM= github.com/shoenig/test v1.7.0 h1:eWcHtTXa6QLnBvm0jgEabMRN/uJ4DMV3M8xUGgRkZmk= github.com/shoenig/test v1.7.0/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/twitchtv/twirp v8.1.3+incompatible h1:+F4TdErPgSUbMZMwp13Q/KgDVuI7HJXP61mNV3/7iuU= github.com/twitchtv/twirp v8.1.3+incompatible/go.mod h1:RRJoFSAmTEh2weEqWtpPE3vFK5YBhA6bqp2l1kfCC5A= github.com/urfave/cli/v3 v3.3.9 h1:54roEDJcTWuucl6MSQ3B+pQqt1ePh/xOQokhEYl5Gfs= github.com/urfave/cli/v3 v3.3.9/go.mod h1:FJSKtM/9AiiTOJL4fJ6TbMUkxBXn7GO9guZqoZtpYpo= github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU= github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/xxh3 v1.1.0 h1:s7DLGDK45Dyfg7++yxI0khrfwq9661w9EN78eP/UZVs= github.com/zeebo/xxh3 v1.1.0/go.mod h1:IisAie1LELR4xhVinxWS5+zf1lA4p0MW4T+w+W07F5s= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= go.opentelemetry.io/contrib/detectors/gcp v1.39.0 h1:kWRNZMsfBHZ+uHjiH4y7Etn2FK26LAGkNFw7RHv1DhE= go.opentelemetry.io/contrib/detectors/gcp v1.39.0/go.mod h1:t/OGqzHBa5v6RHZwrDBJ2OirWc+4q/w2fTbLZwAKjTk= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1xzhKrotaHy3/KXdPhlWARrCgK+eqUY= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0/go.mod h1:dowW6UsM9MKbJq5JTz2AMVp3/5iW5I/TStsk8S+CfHw= go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go.uber.org/zap/exp v0.3.0 h1:6JYzdifzYkGmTdRR59oYH+Ng7k49H9qVpWwNSsGJj3U= go.uber.org/zap/exp v0.3.0/go.mod h1:5I384qq7XGxYyByIhHm6jg5CHkGY0nsTfbDLgDDlgJQ= go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a h1:ovFr6Z0MNmU7nH8VaX5xqw+05ST2uO1exVfZPVqRC5o= golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a/go.mod h1:K79w1Vqn7PoiZn+TkNpx3BUWUQksGO3JcVX6qIjytmA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60= golang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM= golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk= golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA= golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k= golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/api v0.238.0 h1:+EldkglWIg/pWjkq97sd+XxH7PxakNYoe/rkSTbnvOs= google.golang.org/api v0.238.0/go.mod h1:cOVEm2TpdAGHL2z+UwyS+kmlGr3bVWQQ6sYEqkKje50= google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78= google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk= google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 h1:JLQynH/LBHfCTSbDWl+py8C+Rg/k1OVH3xfcaiANuF0= google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:kSJwQxqmFXeo79zOmbrALdflXQeAYcUbgS7PbpMknCY= google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 h1:mWPCjDEyshlQYzBpMNHaEof6UX1PmHcaUODUywQ0uac= google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= google.golang.org/grpc v1.79.3 h1:sybAEdRIEtvcD68Gx7dmnwjZKlyfuc61Dyo9pGXXkKE= google.golang.org/grpc v1.79.3/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= ================================================ FILE: magefile.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build mage package main import ( "context" "encoding/json" "fmt" "os" "path" "runtime" "strings" "github.com/livekit/egress/version" "github.com/livekit/mageutil" ) const ( gstVersion = "1.24.12" libniceVersion = "0.1.21" chromiumVersion = "146.0.7680.177-1" dockerBuild = "docker build" dockerBuildX = "docker buildx build --push --platform linux/amd64,linux/arm64" ) type packageInfo struct { Dir string } func Proto() error { ctx := context.Background() fmt.Println("generating protobuf") // parse go mod output pkgOut, err := mageutil.Out(ctx, "go list -json -m github.com/livekit/protocol") if err != nil { return err } pi := packageInfo{} if err = json.Unmarshal(pkgOut, &pi); err != nil { return err } psrpcOut, err := mageutil.Out(ctx, "go list -json -m github.com/livekit/psrpc") if err != nil { return err } psrpcInfo := packageInfo{} if err = json.Unmarshal(psrpcOut, &psrpcInfo); err != nil { return err } _, err = mageutil.GetToolPath("protoc") if err != nil { return err } protocGoPath, err := mageutil.GetToolPath("protoc-gen-go") if err != nil { return err } protocGrpcGoPath, err := mageutil.GetToolPath("protoc-gen-go-grpc") if err != nil { return err } // generate grpc-related protos return mageutil.RunDir(ctx, "pkg/ipc", fmt.Sprintf( "protoc"+ " --go_out ."+ " --go-grpc_out ."+ " --go_opt=paths=source_relative"+ " --go-grpc_opt=paths=source_relative"+ " --plugin=go=%s"+ " --plugin=go-grpc=%s"+ " -I%s -I%s -I=. ipc.proto", protocGoPath, protocGrpcGoPath, pi.Dir+"/protobufs", psrpcInfo.Dir+"/protoc-gen-psrpc/options", )) } func EnsureMediaSamples() error { ctx := context.Background() const script = "build/test/fetch-media-samples.sh" if _, err := os.Stat(script); err != nil { return fmt.Errorf("missing %s: %w", script, err) } if err := mageutil.Run(ctx, script); err != nil { return err } if entries, _ := os.ReadDir("media-samples"); len(entries) == 0 { return fmt.Errorf("media-samples is empty after %s", script) } return nil } func Integration(configFile string) error { if err := EnsureMediaSamples(); err != nil { return err } ctx := context.Background() os.Setenv("DOCKER_BUILDKIT", "1") defer os.Unsetenv("DOCKER_BUILDKIT") if err := mageutil.Run(ctx, fmt.Sprintf("docker build --build-arg TEMPLATE_TAG=%s --build-arg DEADLOCK=1 -t egress-test -f build/test/Dockerfile .", version.TemplateVersion), ); err != nil { return err } return Retest(configFile) } func Retest(configFile string) error { if configFile != "" { if strings.HasPrefix(configFile, "test/") { configFile = configFile[5:] } else { oldLocation := configFile idx := strings.LastIndex(configFile, "/") if idx != -1 { configFile = configFile[idx+1:] } if err := os.Rename(oldLocation, "test/"+configFile); err != nil { return err } } configFile = "/out/" + configFile } defer Dotfiles() defer func() { // for some reason, these can't be deleted from within the docker container files, _ := os.ReadDir("test/output") for _, file := range files { if file.IsDir() { d, _ := os.ReadDir(path.Join("test/output", file.Name())) if len(d) == 0 { _ = os.RemoveAll(path.Join("test/output", file.Name())) } } } }() dir, err := os.Getwd() if err != nil { return err } return mageutil.Run(context.Background(), fmt.Sprintf("docker run --rm -e EGRESS_CONFIG_FILE=%s -v %s/test:/out egress-test", configFile, dir), ) } func Build() error { return mageutil.Run(context.Background(), fmt.Sprintf("docker pull livekit/chrome-installer:%s", chromiumVersion), fmt.Sprintf("docker pull livekit/gstreamer:%s-dev", gstVersion), fmt.Sprintf("docker build -t livekit/egress:latest --build-arg TEMPLATE_TAG=%s -f build/egress/Dockerfile .", version.TemplateVersion), ) } func BuildTemplate() error { return mageutil.Run(context.Background(), "docker pull ubuntu:24.04", "docker build -t livekit/egress-templates -f ./build/template/Dockerfile .", ) } func BuildGStreamer() error { return buildGstreamer(dockerBuild) } func buildGstreamer(cmd string) error { commands := []string{"docker pull ubuntu:23.10"} for _, build := range []string{"base", "dev", "prod", "prod-rs"} { commands = append(commands, fmt.Sprintf("%s"+ " --build-arg GSTREAMER_VERSION=%s"+ " --build-arg LIBNICE_VERSION=%s"+ " -t livekit/gstreamer:%s-%s"+ " -t livekit/gstreamer:%s-%s-%s"+ " -f build/gstreamer/Dockerfile-%s"+ " ./build/gstreamer", cmd, gstVersion, libniceVersion, gstVersion, build, gstVersion, build, runtime.GOARCH, build, )) } return mageutil.Run(context.Background(), commands...) } func Dotfiles() error { files, err := os.ReadDir("test/output") if err != nil { return err } dots := make(map[string]bool) pngs := make(map[string]bool) for _, file := range files { name := file.Name() if strings.HasSuffix(name, ".dot") { dots[name[:len(name)-4]] = true } else if strings.HasSuffix(file.Name(), ".png") { pngs[name[:len(name)-4]] = true } } for name := range dots { if !pngs[name] { if err := mageutil.Run(context.Background(), fmt.Sprintf( "dot -Tpng test/output/%s.dot -o test/output/%s.png", name, name, )); err != nil { return err } } } return nil } ================================================ FILE: pkg/config/base.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package config import ( "os" "strings" "time" "github.com/livekit/protocol/logger" "github.com/livekit/protocol/logger/medialogutils" "github.com/livekit/protocol/redis" lksdk "github.com/livekit/server-sdk-go/v2" "github.com/livekit/egress/pkg/types" ) const TmpDir = "/home/egress/tmp" type BaseConfig struct { NodeID string // do not supply - will be overwritten // required Redis *redis.RedisConfig `yaml:"redis"` // redis config ApiKey string `yaml:"api_key"` // (env LIVEKIT_API_KEY) ApiSecret string `yaml:"api_secret"` // (env LIVEKIT_API_SECRET) WsUrl string `yaml:"ws_url"` // (env LIVEKIT_WS_URL) // optional Logging *logger.Config `yaml:"logging"` // logging config TemplateBase string `yaml:"template_base"` // custom template base url ClusterID string `yaml:"cluster_id"` // cluster this instance belongs to EnableChromeSandbox bool `yaml:"enable_chrome_sandbox"` // enable Chrome sandbox, requires extra docker configuration MaxUploadQueue int `yaml:"max_upload_queue"` // maximum upload queue size, in minutes DisallowLocalStorage bool `yaml:"disallow_local_storage"` // require an upload config for all requests IOCreateTimeout time.Duration `yaml:"io_create_timeout"` // timeout for CreateEgress calls IOUpdateTimeout time.Duration `yaml:"io_update_timeout"` // timeout for UpdateEgress calls IOSelectionTimeout time.Duration `yaml:"io_selection_timeout"` // timeout for affinity stage of IO RPC IOWorkers int `yaml:"io_workers"` // number of IO update workers SessionLimits `yaml:"session_limits"` // session duration limits StorageConfig *StorageConfig `yaml:"storage,omitempty"` // storage config BackupConfig *StorageConfig `yaml:"backup,omitempty"` // backup config, for storage failures S3AssumeRoleKey string `yaml:"s3_assume_role_key"` // if set, this key is used for S3 uploads to assume the role defined in the assume_role_arn field of the S3 config S3AssumeRoleSecret string `yaml:"s3_assume_role_secret"` // if set, this secret is used for S3 uploads to assume the role defined in the assume_role_arn field of the S3 config S3AssumeRoleArn string `yaml:"s3_assume_role_arn"` // if set, this arn is used by default for S3 uploads S3AssumeRoleExternalID string `yaml:"s3_assume_role_external_id"` // if set, this external ID is used by default for S3 uploads // advanced Insecure bool `yaml:"insecure"` // allow chrome to connect to an insecure websocket, bypasses chrome LNA checks Debug DebugConfig `yaml:"debug"` // create dot file on internal error ChromeFlags map[string]interface{} `yaml:"chrome_flags"` // additional flags to pass to Chrome Latency LatencyConfig `yaml:"latency"` // gstreamer latencies, modifying these may break the service LatencyOverrides map[types.RequestType]LatencyConfig `yaml:"latency_overrides"` // latency overrides for different request types, experimental only, will be removed EnableOneShotSenderReportSync bool `yaml:"enable_one_shot_sender_report_sync"` // temporary rollout flag enabling one-shot sender report correction for room composite / track requests that previously used audio PTS adjustment disabling AudioTempoController AudioTempoController `yaml:"audio_tempo_controller"` // audio tempo controller TestOverrides TestOverrides `yaml:"test_overrides"` // set of config overrides for testing purposes } type SessionLimits struct { FileOutputMaxDuration time.Duration `yaml:"file_output_max_duration"` FileOutputMaxSize int64 `yaml:"file_output_max_size"` // max on-disk size in bytes before stopping; 0 to disable StreamOutputMaxDuration time.Duration `yaml:"stream_output_max_duration"` SegmentOutputMaxDuration time.Duration `yaml:"segment_output_max_duration"` ImageOutputMaxDuration time.Duration `yaml:"image_output_max_duration"` } type DebugConfig struct { EnableProfiling bool `yaml:"enable_profiling"` // create dot file and pprof on internal error EnableTrackLogging bool `yaml:"enable_track_logging"` // log packets and keyframes for each track EnableStreamLogging bool `yaml:"enable_stream_logging"` // log bytes and keyframes for each stream EnableChromeLogging bool `yaml:"enable_chrome_logging"` // log all chrome console events StorageConfig `yaml:",inline"` // upload config (S3, Azure, GCP, or AliOSS) } type LatencyConfig struct { JitterBufferLatency time.Duration `yaml:"jitter_buffer_latency"` // jitter buffer max latency for sdk egress AudioMixerLatency time.Duration `yaml:"audio_mixer_latency"` // audio mixer latency, must be greater than jitter buffer latency PipelineLatency time.Duration `yaml:"pipeline_latency"` // max latency for the entire pipeline RTPMaxAllowedTsDiff time.Duration `ymal:"rtp_max_allowed_ts_diff"` // max allowed PTS discont. for a RTP stream, before applying PTS alignment RTPMaxDriftAdjustment time.Duration `ymal:"rtp_max_drift_adjustment,omitempty"` // max allowed drift adjustment for a RTP stream RTPDriftAdjustmentWindowPercent float64 `ymal:"rtp_drift_adjustment_window_percent,omitempty"` // how much to throttle drift adjustment, 0.0 disables it OldPacketThreshold time.Duration `yaml:"old_packet_threshold,omitempty"` // syncrhonizer drops packets older than this, 0 to disable packet drops } type AudioTempoController struct { Enabled bool `yaml:"enabled"` // enable audio tempo adjustments for compensating PTS drift AdjustmentRate float64 `yaml:"adjustment_rate"` // rate at which to adjust the tempo to compensate for PTS drift } func (c *BaseConfig) initLogger(values ...interface{}) error { _, exists := os.LookupEnv("GST_DEBUG") // If GST_DEBUG is not set, use pre-defined values based on logging level if !exists { var gstDebug []string switch c.Logging.Level { case "debug": gstDebug = []string{"3"} case "info", "warn": gstDebug = []string{"2"} case "error": gstDebug = []string{"1"} } gstDebug = append(gstDebug, "rtmpclient:4", "srtlib:1", ) if err := os.Setenv("GST_DEBUG", strings.Join(gstDebug, ",")); err != nil { return err } } zl, err := logger.NewZapLogger(c.Logging) if err != nil { return err } l := zl.WithValues(values...) logger.SetLogger(l, "egress") lksdk.SetLogger(medialogutils.NewOverrideLogger(l.WithComponent("lksdk"))) return nil } func (c *BaseConfig) getLatencyConfig(requestType types.RequestType) LatencyConfig { if override, ok := c.LatencyOverrides[requestType]; ok { return override } return c.Latency } ================================================ FILE: pkg/config/config_test.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package config import ( "os" "testing" "github.com/stretchr/testify/require" "github.com/livekit/egress/pkg/types" "github.com/livekit/protocol/livekit" ) func TestSegmentNaming(t *testing.T) { t.Cleanup(func() { _ = os.RemoveAll("conf_test/") }) for _, test := range []struct { filenamePrefix string playlistName string livePlaylistName string expectedStorageDir string expectedPlaylistFilename string expectedLivePlaylistFilename string expectedSegmentPrefix string }{ { filenamePrefix: "", playlistName: "playlist", livePlaylistName: "", expectedStorageDir: "", expectedPlaylistFilename: "playlist.m3u8", expectedLivePlaylistFilename: "", expectedSegmentPrefix: "playlist", }, { filenamePrefix: "", playlistName: "conf_test/playlist", livePlaylistName: "conf_test/live_playlist", expectedStorageDir: "conf_test/", expectedPlaylistFilename: "playlist.m3u8", expectedLivePlaylistFilename: "live_playlist.m3u8", expectedSegmentPrefix: "playlist", }, { filenamePrefix: "filename", playlistName: "", livePlaylistName: "live_playlist2.m3u8", expectedStorageDir: "", expectedPlaylistFilename: "filename.m3u8", expectedLivePlaylistFilename: "live_playlist2.m3u8", expectedSegmentPrefix: "filename", }, { filenamePrefix: "filename", playlistName: "playlist", livePlaylistName: "", expectedStorageDir: "", expectedPlaylistFilename: "playlist.m3u8", expectedLivePlaylistFilename: "", expectedSegmentPrefix: "filename", }, { filenamePrefix: "filename", playlistName: "conf_test/", livePlaylistName: "", expectedStorageDir: "conf_test/", expectedPlaylistFilename: "filename.m3u8", expectedLivePlaylistFilename: "", expectedSegmentPrefix: "filename", }, { filenamePrefix: "filename", playlistName: "conf_test/playlist", livePlaylistName: "", expectedStorageDir: "conf_test/", expectedPlaylistFilename: "playlist.m3u8", expectedLivePlaylistFilename: "", expectedSegmentPrefix: "filename", }, { filenamePrefix: "conf_test/", playlistName: "playlist", livePlaylistName: "", expectedStorageDir: "conf_test/", expectedPlaylistFilename: "playlist.m3u8", expectedLivePlaylistFilename: "", expectedSegmentPrefix: "playlist", }, { filenamePrefix: "conf_test/filename", playlistName: "playlist", livePlaylistName: "", expectedStorageDir: "conf_test/", expectedPlaylistFilename: "playlist.m3u8", expectedLivePlaylistFilename: "", expectedSegmentPrefix: "filename", }, { filenamePrefix: "conf_test/filename", playlistName: "conf_test/playlist", livePlaylistName: "", expectedStorageDir: "conf_test/", expectedPlaylistFilename: "playlist.m3u8", expectedLivePlaylistFilename: "", expectedSegmentPrefix: "filename", }, { filenamePrefix: "conf_test_2/filename", playlistName: "conf_test/playlist", livePlaylistName: "", expectedStorageDir: "conf_test/", expectedPlaylistFilename: "playlist.m3u8", expectedLivePlaylistFilename: "", expectedSegmentPrefix: "conf_test_2/filename", }, } { p := &PipelineConfig{Info: &livekit.EgressInfo{EgressId: "egress_ID"}} seg := &livekit.SegmentedFileOutput{ FilenamePrefix: test.filenamePrefix, PlaylistName: test.playlistName, LivePlaylistName: test.livePlaylistName, } o, err := p.getSegmentConfig(seg, seg) require.NoError(t, err) require.Equal(t, test.expectedStorageDir, o.StorageDir) require.Equal(t, test.expectedPlaylistFilename, o.PlaylistFilename) require.Equal(t, test.expectedLivePlaylistFilename, o.LivePlaylistFilename) require.Equal(t, test.expectedSegmentPrefix, o.SegmentPrefix) } } func TestValidateAndUpdateOutputParamsRejectsHLSMP3(t *testing.T) { p := &PipelineConfig{ Outputs: map[types.EgressType][]OutputConfig{ types.EgressTypeSegments: { &SegmentConfig{outputConfig: outputConfig{OutputType: types.OutputTypeHLS}}, }, }, } p.AudioEnabled = true p.VideoEnabled = false p.AudioOutCodec = types.MimeTypeMP3 p.Info = &livekit.EgressInfo{} err := p.validateAndUpdateOutputParams() require.Error(t, err) require.ErrorContains(t, err, "format application/x-mpegurl incompatible with codec audio/mpeg") } func TestValidateAndUpdateOutputParamsRejectsVideoFileMP3(t *testing.T) { p := &PipelineConfig{ Outputs: map[types.EgressType][]OutputConfig{ types.EgressTypeFile: { &FileConfig{outputConfig: outputConfig{OutputType: types.OutputTypeMP3}}, }, }, } p.AudioEnabled = true p.VideoEnabled = true p.AudioOutCodec = types.MimeTypeMP3 p.VideoOutCodec = types.MimeTypeH264 p.Info = &livekit.EgressInfo{} err := p.validateAndUpdateOutputParams() require.Error(t, err) require.ErrorContains(t, err, "format audio/mpeg incompatible with codec video/h264") } ================================================ FILE: pkg/config/encoding.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package config import ( "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/types" "github.com/livekit/protocol/livekit" ) func (p *PipelineConfig) applyPreset(preset livekit.EncodingOptionsPreset) { switch preset { case livekit.EncodingOptionsPreset_H264_720P_30: p.Width = 1280 p.Height = 720 p.Framerate = 30 p.VideoBitrate = 3000 case livekit.EncodingOptionsPreset_H264_720P_60: p.Width = 1280 p.Height = 720 p.Framerate = 60 p.VideoBitrate = 4500 case livekit.EncodingOptionsPreset_H264_1080P_30: p.Width = 1920 p.Height = 1080 p.Framerate = 30 p.VideoBitrate = 4500 case livekit.EncodingOptionsPreset_H264_1080P_60: p.Width = 1920 p.Height = 1080 p.Framerate = 60 p.VideoBitrate = 6000 case livekit.EncodingOptionsPreset_PORTRAIT_H264_720P_30: p.Width = 720 p.Height = 1280 p.Framerate = 30 p.VideoBitrate = 3000 case livekit.EncodingOptionsPreset_PORTRAIT_H264_720P_60: p.Width = 720 p.Height = 1280 p.Framerate = 60 p.VideoBitrate = 4500 case livekit.EncodingOptionsPreset_PORTRAIT_H264_1080P_30: p.Width = 1080 p.Height = 1920 p.Framerate = 30 p.VideoBitrate = 4500 case livekit.EncodingOptionsPreset_PORTRAIT_H264_1080P_60: p.Width = 1080 p.Height = 1920 p.Framerate = 60 p.VideoBitrate = 6000 } } func (p *PipelineConfig) applyAdvanced(advanced *livekit.EncodingOptions) error { // audio switch advanced.AudioCodec { case livekit.AudioCodec_OPUS: p.AudioOutCodec = types.MimeTypeOpus case livekit.AudioCodec_AAC: p.AudioOutCodec = types.MimeTypeAAC case livekit.AudioCodec_AC_MP3: p.AudioOutCodec = types.MimeTypeMP3 } if advanced.AudioBitrate != 0 { p.AudioBitrate = advanced.AudioBitrate } if advanced.AudioFrequency != 0 { p.AudioFrequency = advanced.AudioFrequency } // video switch advanced.VideoCodec { case livekit.VideoCodec_H264_BASELINE: p.VideoOutCodec = types.MimeTypeH264 p.VideoProfile = types.ProfileBaseline case livekit.VideoCodec_H264_MAIN: p.VideoOutCodec = types.MimeTypeH264 case livekit.VideoCodec_H264_HIGH: p.VideoOutCodec = types.MimeTypeH264 p.VideoProfile = types.ProfileHigh } if advanced.Width > 0 { if advanced.Width < 16 || advanced.Width%2 == 1 { return errors.ErrInvalidInput("width") } p.Width = advanced.Width } if advanced.Height > 0 { if advanced.Height < 16 || advanced.Height%2 == 1 { return errors.ErrInvalidInput("height") } p.Height = advanced.Height } switch advanced.Depth { case 0: case 8, 16, 24: p.Depth = advanced.Depth default: return errors.ErrInvalidInput("depth") } if advanced.Framerate != 0 { p.Framerate = advanced.Framerate } if advanced.VideoBitrate != 0 { p.VideoBitrate = advanced.VideoBitrate } if advanced.KeyFrameInterval != 0 { p.KeyFrameInterval = advanced.KeyFrameInterval } return nil } ================================================ FILE: pkg/config/manifest.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package config import ( "bytes" "encoding/json" "time" "github.com/linkdata/deadlock" ) type Manifest struct { EgressID string `json:"egress_id,omitempty"` RoomID string `json:"room_id,omitempty"` RoomName string `json:"room_name,omitempty"` Url string `json:"url,omitempty"` StartedAt int64 `json:"started_at,omitempty"` EndedAt int64 `json:"ended_at,omitempty"` PublisherIdentity string `json:"publisher_identity,omitempty"` TrackID string `json:"track_id,omitempty"` TrackKind string `json:"track_kind,omitempty"` TrackSource string `json:"track_source,omitempty"` AudioTrackID string `json:"audio_track_id,omitempty"` VideoTrackID string `json:"video_track_id,omitempty"` mu deadlock.Mutex Files []*File `json:"files,omitempty"` Playlists []*Playlist `json:"playlists,omitempty"` Images []*Image `json:"images,omitempty"` } type File struct { Filename string `json:"filename,omitempty"` Location string `json:"location,omitempty"` } type Playlist struct { mu deadlock.Mutex Location string `json:"location,omitempty"` Segments []*Segment `json:"segments,omitempty"` } type Segment struct { Filename string `json:"filename,omitempty"` Location string `json:"location,omitempty"` } type Image struct { Filename string `json:"filename,omitempty"` Timestamp time.Time `json:"timestamp,omitempty"` Location string `json:"location,omitempty"` } func (p *PipelineConfig) initManifest() { if p.shouldCreateManifest() { p.Manifest = &Manifest{ EgressID: p.Info.EgressId, RoomID: p.Info.RoomId, RoomName: p.Info.RoomName, Url: p.WebUrl, StartedAt: p.Info.StartedAt, PublisherIdentity: p.Identity, TrackID: p.TrackID, TrackKind: p.TrackKind, TrackSource: p.TrackSource, AudioTrackID: p.AudioTrackID, VideoTrackID: p.VideoTrackID, } } } func (p *PipelineConfig) shouldCreateManifest() bool { if p.BackupConfig != nil { return true } if fc := p.GetFileConfig(); fc != nil && !fc.DisableManifest { return true } if sc := p.GetSegmentConfig(); sc != nil && !sc.DisableManifest { return true } for _, ic := range p.GetImageConfigs() { if !ic.DisableManifest { return true } } return false } func (m *Manifest) AddFile(filename, location string) { m.mu.Lock() m.Files = append(m.Files, &File{ Filename: filename, Location: location, }) m.mu.Unlock() } func (m *Manifest) AddPlaylist() *Playlist { p := &Playlist{} m.mu.Lock() m.Playlists = append(m.Playlists, p) m.mu.Unlock() return p } func (p *Playlist) UpdateLocation(location string) { p.mu.Lock() p.Location = location p.mu.Unlock() } func (p *Playlist) AddSegment(filename, location string) { p.mu.Lock() p.Segments = append(p.Segments, &Segment{ Filename: filename, Location: location, }) p.mu.Unlock() } func (m *Manifest) AddImage(filename string, ts time.Time, location string) { m.mu.Lock() m.Images = append(m.Images, &Image{ Filename: filename, Timestamp: ts, Location: location, }) m.mu.Unlock() } func (m *Manifest) Close(endedAt int64) ([]byte, error) { m.EndedAt = endedAt buf := bytes.NewBuffer(nil) enc := json.NewEncoder(buf) enc.SetEscapeHTML(false) if err := enc.Encode(m); err != nil { return nil, err } return buf.Bytes(), nil } ================================================ FILE: pkg/config/output.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package config import ( "net/url" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/types" "github.com/livekit/protocol/egress" "github.com/livekit/protocol/livekit" ) const StreamKeyframeInterval = 4.0 type OutputConfig interface { GetOutputType() types.OutputType } type outputConfig struct { types.OutputType } func (o outputConfig) GetOutputType() types.OutputType { return o.OutputType } func (p *PipelineConfig) updateEncodedOutputs(req egress.EncodedOutput) error { files := req.GetFileOutputs() streams := req.GetStreamOutputs() segments := req.GetSegmentOutputs() images := req.GetImageOutputs() // file output var file *livekit.EncodedFileOutput switch len(files) { case 0: if r, ok := req.(egress.EncodedOutputDeprecated); ok { file = r.GetFile() } case 1: file = files[0] default: return errors.ErrInvalidInput("multiple file outputs") } if file != nil { conf, err := p.getEncodedFileConfig(file) if err != nil { return err } p.Outputs[types.EgressTypeFile] = []OutputConfig{conf} p.OutputCount.Inc() p.FinalizationRequired = true if p.VideoEnabled { p.VideoEncoding = true } p.Info.FileResults = []*livekit.FileInfo{conf.FileInfo} if len(streams)+len(segments)+len(images) == 0 { p.Info.Result = &livekit.EgressInfo_File{File: conf.FileInfo} } } // stream output var stream *livekit.StreamOutput switch len(streams) { case 0: if r, ok := req.(egress.EncodedOutputDeprecated); ok { stream = r.GetStream() } case 1: stream = streams[0] default: return errors.ErrInvalidInput("multiple stream outputs") } if stream != nil { var outputType types.OutputType switch stream.Protocol { case livekit.StreamProtocol_DEFAULT_PROTOCOL: if len(stream.Urls) == 0 { return errors.ErrInvalidInput("stream protocol") } parsed, err := url.Parse(stream.Urls[0]) if err != nil { return errors.ErrInvalidUrl(stream.Urls[0], err.Error()) } var ok bool outputType, ok = types.StreamOutputTypes[parsed.Scheme] if !ok { return errors.ErrInvalidUrl(stream.Urls[0], "invalid protocol") } case livekit.StreamProtocol_RTMP: outputType = types.OutputTypeRTMP case livekit.StreamProtocol_SRT: outputType = types.OutputTypeSRT } conf, err := p.getStreamConfig(outputType, stream.Urls) if err != nil { return err } p.Outputs[types.EgressTypeStream] = []OutputConfig{conf} p.OutputCount.Add(int32(len(stream.Urls))) if p.VideoEnabled { p.VideoEncoding = true } streamInfoList := make([]*livekit.StreamInfo, 0, len(stream.Urls)) conf.Streams.Range(func(_, stream any) bool { streamInfoList = append(streamInfoList, stream.(*Stream).StreamInfo) return true }) p.Info.StreamResults = streamInfoList if len(files)+len(segments)+len(images) == 0 { // empty stream output only valid in combination with other outputs if len(stream.Urls) == 0 { return errors.ErrInvalidInput("stream url") } p.Info.Result = &livekit.EgressInfo_Stream{Stream: &livekit.StreamInfoList{Info: streamInfoList}} //nolint:staticcheck // keep deprecated field for older clients } } // segment output var segment *livekit.SegmentedFileOutput switch len(segments) { case 0: if r, ok := req.(egress.EncodedOutputDeprecated); ok { segment = r.GetSegments() } case 1: segment = segments[0] default: return errors.ErrInvalidInput("multiple segmented file outputs") } if segment != nil { conf, err := p.getSegmentConfig(segment, segment) if err != nil { return err } p.Outputs[types.EgressTypeSegments] = []OutputConfig{conf} p.OutputCount.Inc() p.FinalizationRequired = true if p.VideoEnabled { p.VideoEncoding = true } p.Info.SegmentResults = []*livekit.SegmentsInfo{conf.SegmentsInfo} if len(streams)+len(files)+len(images) == 0 { p.Info.Result = &livekit.EgressInfo_Segments{Segments: conf.SegmentsInfo} } } if segmentConf := p.Outputs[types.EgressTypeSegments]; segmentConf != nil { if stream != nil && p.KeyFrameInterval > 0 { // segment duration must match keyframe interval - use the lower of the two conf := segmentConf[0].(*SegmentConfig) conf.SegmentDuration = min(int(p.KeyFrameInterval), conf.SegmentDuration) } p.KeyFrameInterval = 0 } else if p.KeyFrameInterval == 0 && p.Outputs[types.EgressTypeStream] != nil { // default 4s for streams p.KeyFrameInterval = StreamKeyframeInterval } // image output if len(images) > 0 { if !p.VideoEnabled { return errors.ErrInvalidInput("audio_only images") } if len(p.Outputs) == 0 { // enforce video only p.AudioEnabled = false p.AudioTrackID = "" p.AudioTranscoding = false } for _, img := range images { conf, err := p.getImageConfig(img, img) if err != nil { return err } p.Outputs[types.EgressTypeImages] = append(p.Outputs[types.EgressTypeImages], conf) p.OutputCount.Inc() p.FinalizationRequired = true p.Info.ImageResults = append(p.Info.ImageResults, conf.ImagesInfo) } } if p.OutputCount.Load() == 0 { return errors.ErrInvalidInput("output") } return nil } func (p *PipelineConfig) updateOutputs(req *livekit.ExportReplayRequest) error { if len(req.Outputs) == 0 { return errors.ErrInvalidInput("output") } // Non-live pipelines produce data faster than realtime. Stream outputs // (RTMP, WebSocket) cannot ingest faster than 1x playback speed. if !p.Live { for _, output := range req.Outputs { if _, ok := output.Config.(*livekit.Output_Stream); ok { return errors.ErrNotSupported("stream output for non-live pipeline") } } } var hasFile, hasStream, hasSegments bool var fileCount, streamCount, segmentCount int for _, output := range req.Outputs { storage := resolveStorageConfig(output.Storage, req.Storage) switch o := output.Config.(type) { case *livekit.Output_File: fileCount++ if fileCount > 1 { return errors.ErrInvalidInput("multiple file outputs") } hasFile = true conf, err := p.getFileConfig(fileTypeToOutputType(o.File.FileType), o.File.GetFilepath(), o.File.GetDisableManifest(), storage) if err != nil { return err } p.Outputs[types.EgressTypeFile] = []OutputConfig{conf} p.OutputCount.Inc() p.FinalizationRequired = true if p.VideoEnabled { p.VideoEncoding = true } p.Info.FileResults = []*livekit.FileInfo{conf.FileInfo} case *livekit.Output_Stream: stream := o.Stream streamCount++ if streamCount > 1 { return errors.ErrInvalidInput("multiple stream outputs") } hasStream = true var outputType types.OutputType var egressType types.EgressType switch stream.Protocol { case livekit.StreamProtocol_DEFAULT_PROTOCOL: if len(stream.Urls) == 0 { return errors.ErrInvalidInput("stream protocol") } parsed, err := url.Parse(stream.Urls[0]) if err != nil { return errors.ErrInvalidUrl(stream.Urls[0], err.Error()) } var ok bool outputType, ok = types.StreamOutputTypes[parsed.Scheme] if !ok { return errors.ErrInvalidUrl(stream.Urls[0], "invalid protocol") } if parsed.Scheme == "ws" || parsed.Scheme == "wss" { egressType = types.EgressTypeWebsocket } else { egressType = types.EgressTypeStream } case livekit.StreamProtocol_RTMP: outputType = types.OutputTypeRTMP egressType = types.EgressTypeStream case livekit.StreamProtocol_SRT: outputType = types.OutputTypeSRT egressType = types.EgressTypeStream case livekit.StreamProtocol_WEBSOCKET: outputType = types.OutputTypeRaw egressType = types.EgressTypeWebsocket } // websocket is audio-only if egressType == types.EgressTypeWebsocket && p.VideoEnabled && p.AudioEnabled { p.VideoEnabled = false p.VideoDecoding = false } conf, err := p.getStreamConfig(outputType, stream.Urls) if err != nil { return err } p.Outputs[egressType] = []OutputConfig{conf} p.OutputCount.Add(int32(len(stream.Urls))) if p.VideoEnabled { p.VideoEncoding = true } streamInfoList := make([]*livekit.StreamInfo, 0, len(stream.Urls)) conf.Streams.Range(func(_, s any) bool { streamInfoList = append(streamInfoList, s.(*Stream).StreamInfo) return true }) p.Info.StreamResults = streamInfoList case *livekit.Output_Segments: segmentCount++ if segmentCount > 1 { return errors.ErrInvalidInput("multiple segmented file outputs") } hasSegments = true conf, err := p.getSegmentConfig(o.Segments, storage) if err != nil { return err } p.Outputs[types.EgressTypeSegments] = []OutputConfig{conf} p.OutputCount.Inc() p.FinalizationRequired = true if p.VideoEnabled { p.VideoEncoding = true } p.Info.SegmentResults = []*livekit.SegmentsInfo{conf.SegmentsInfo} case *livekit.Output_Images: if !p.VideoEnabled { return errors.ErrInvalidInput("audio_only images") } conf, err := p.getImageConfig(o.Images, storage) if err != nil { return err } p.Outputs[types.EgressTypeImages] = append(p.Outputs[types.EgressTypeImages], conf) p.OutputCount.Inc() p.FinalizationRequired = true p.Info.ImageResults = append(p.Info.ImageResults, conf.ImagesInfo) default: return errors.ErrInvalidInput("output config") } } if p.OutputCount.Load() == 0 { return errors.ErrInvalidInput("output") } // image-only: enforce video only if !hasFile && !hasStream && !hasSegments && len(p.Outputs[types.EgressTypeImages]) > 0 { p.AudioEnabled = false p.AudioTranscoding = false } // populate deprecated single-result field for older clients if hasFile && !hasStream && !hasSegments && len(p.Outputs[types.EgressTypeImages]) == 0 { if fc := p.GetFileConfig(); fc != nil { p.Info.Result = &livekit.EgressInfo_File{File: fc.FileInfo} } } else if hasStream && !hasFile && !hasSegments && len(p.Outputs[types.EgressTypeImages]) == 0 { if len(p.Info.StreamResults) > 0 { p.Info.Result = &livekit.EgressInfo_Stream{Stream: &livekit.StreamInfoList{Info: p.Info.StreamResults}} //nolint:staticcheck } } else if hasSegments && !hasFile && !hasStream && len(p.Outputs[types.EgressTypeImages]) == 0 { if sc := p.GetSegmentConfig(); sc != nil { p.Info.Result = &livekit.EgressInfo_Segments{Segments: sc.SegmentsInfo} } } // keyframe interval handling if segmentConf := p.Outputs[types.EgressTypeSegments]; segmentConf != nil { if hasStream && p.KeyFrameInterval > 0 { conf := segmentConf[0].(*SegmentConfig) conf.SegmentDuration = min(int(p.KeyFrameInterval), conf.SegmentDuration) } p.KeyFrameInterval = 0 } else if p.KeyFrameInterval == 0 && p.Outputs[types.EgressTypeStream] != nil { p.KeyFrameInterval = StreamKeyframeInterval } return nil } func (p *PipelineConfig) updateDirectOutput(req *livekit.TrackEgressRequest) error { switch o := req.Output.(type) { case *livekit.TrackEgressRequest_File: conf, err := p.getDirectFileConfig(o.File) if err != nil { return err } p.Info.FileResults = []*livekit.FileInfo{conf.FileInfo} p.Info.Result = &livekit.EgressInfo_File{File: conf.FileInfo} p.Outputs[types.EgressTypeFile] = []OutputConfig{conf} p.OutputCount.Inc() p.FinalizationRequired = true case *livekit.TrackEgressRequest_WebsocketUrl: conf, err := p.getStreamConfig(types.OutputTypeRaw, []string{o.WebsocketUrl}) if err != nil { return err } streamInfoList := make([]*livekit.StreamInfo, 0, 1) conf.Streams.Range(func(_, stream any) bool { streamInfoList = append(streamInfoList, stream.(*Stream).StreamInfo) return true }) p.Info.StreamResults = streamInfoList p.Info.Result = &livekit.EgressInfo_Stream{Stream: &livekit.StreamInfoList{Info: streamInfoList}} //nolint:staticcheck // keep deprecated field for older clients p.Outputs[types.EgressTypeWebsocket] = []OutputConfig{conf} p.OutputCount.Inc() default: return errors.ErrInvalidInput("output") } return nil } ================================================ FILE: pkg/config/output_file.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package config import ( "fmt" "path" "strings" "time" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/types" "github.com/livekit/protocol/egress" "github.com/livekit/protocol/livekit" ) type FileConfig struct { outputConfig FileInfo *livekit.FileInfo LocalFilepath string StorageFilepath string DisableManifest bool StorageConfig *StorageConfig } func (p *PipelineConfig) GetFileConfig() *FileConfig { o, ok := p.Outputs[types.EgressTypeFile] if !ok || len(o) == 0 { return nil } return o[0].(*FileConfig) } func (p *PipelineConfig) getEncodedFileConfig(file *livekit.EncodedFileOutput) (*FileConfig, error) { return p.getFileConfig(fileTypeToOutputType(file.FileType), file.GetFilepath(), file.GetDisableManifest(), file) } func (p *PipelineConfig) getDirectFileConfig(file *livekit.DirectFileOutput) (*FileConfig, error) { return p.getFileConfig(types.OutputTypeUnknownFile, file.GetFilepath(), file.GetDisableManifest(), file) } func fileTypeToOutputType(ft livekit.EncodedFileType) types.OutputType { switch ft { case livekit.EncodedFileType_MP4: return types.OutputTypeMP4 case livekit.EncodedFileType_OGG: return types.OutputTypeOGG case livekit.EncodedFileType_MP3: return types.OutputTypeMP3 default: return types.OutputTypeUnknownFile } } func (p *PipelineConfig) getFileConfig(outputType types.OutputType, filepath string, disableManifest bool, upload egress.UploadRequest) (*FileConfig, error) { sc, err := p.getStorageConfig(upload) if err != nil { return nil, err } filepath = clean(filepath) // On retry, explicit paths must include {retry} to avoid overwriting previous attempt. // Empty or directory-only paths are auto-generated with retry count appended. if p.Info.RetryCount > 0 && filepath != "" && !strings.HasSuffix(filepath, "/") && !strings.Contains(filepath, "{retry}") { return nil, errors.ErrNonRetryableOutput } conf := &FileConfig{ outputConfig: outputConfig{OutputType: outputType}, FileInfo: &livekit.FileInfo{}, StorageFilepath: filepath, DisableManifest: disableManifest, StorageConfig: sc, } // filename identifier, replacements := p.getFilenameInfo() if conf.OutputType != types.OutputTypeUnknownFile { err := conf.updateFilepath(p, identifier, replacements) if err != nil { return nil, err } } else { conf.StorageFilepath = stringReplace(conf.StorageFilepath, replacements) } return conf, nil } func (p *PipelineConfig) getFilenameInfo() (string, map[string]string) { now := time.Now() utc := fmt.Sprintf("%s%03d", now.Format("20060102150405"), now.UnixMilli()%1000) replacements := make(map[string]string) if p.Info.RetryCount > 0 { replacements["{retry}"] = fmt.Sprintf("%d", p.Info.RetryCount) } if p.Info.RoomName != "" { replacements["{room_name}"] = p.Info.RoomName replacements["{room_id}"] = p.Info.RoomId replacements["{time}"] = now.Format("2006-01-02T150405") replacements["{utc}"] = utc return p.Info.RoomName, replacements } replacements["{time}"] = now.Format("2006-01-02T150405") replacements["{utc}"] = utc return "web", replacements } func (o *FileConfig) updateFilepath(p *PipelineConfig, identifier string, replacements map[string]string) error { o.StorageFilepath = stringReplace(o.StorageFilepath, replacements) // get file extension ext := types.FileExtensionForOutputType[o.OutputType] if o.StorageFilepath == "" || strings.HasSuffix(o.StorageFilepath, "/") { // generate filepath baseName := fmt.Sprintf("%s-%s", identifier, time.Now().Format("2006-01-02T150405")) if p.Info.RetryCount > 0 { baseName = fmt.Sprintf("%s-%d", baseName, p.Info.RetryCount) } o.StorageFilepath = fmt.Sprintf("%s%s%s", o.StorageFilepath, baseName, ext) } else if !strings.HasSuffix(o.StorageFilepath, string(ext)) { // check for existing (incorrect) extension if extIdx := strings.LastIndex(o.StorageFilepath, "."); extIdx > -1 { existingExt := types.FileExtension(o.StorageFilepath[extIdx:]) if _, ok := types.FileExtensions[existingExt]; ok { o.StorageFilepath = o.StorageFilepath[:extIdx] } } // add file extension o.StorageFilepath = o.StorageFilepath + string(ext) } // update filename o.FileInfo.Filename = o.StorageFilepath // get local filepath _, filename := path.Split(o.StorageFilepath) // write to tmp dir o.LocalFilepath = path.Join(p.TmpDir, filename) return nil } func clean(filepath string) string { hasEndingSlash := strings.HasSuffix(filepath, "/") filepath = path.Clean(filepath) for strings.HasPrefix(filepath, "../") { filepath = filepath[3:] } if filepath == "" || filepath == "." || filepath == ".." { return "" } if hasEndingSlash { return filepath + "/" } return filepath } ================================================ FILE: pkg/config/output_image.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package config import ( "fmt" "os" "path" "time" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/types" "github.com/livekit/protocol/egress" "github.com/livekit/protocol/livekit" "github.com/livekit/protocol/utils" ) type ImageConfig struct { outputConfig Id string // Used internally to map a gst Bin/element back to a sink and as part of the path ImagesInfo *livekit.ImagesInfo LocalDir string StorageDir string ImagePrefix string ImageSuffix livekit.ImageFileSuffix ImageExtension types.FileExtension DisableManifest bool StorageConfig *StorageConfig CaptureInterval uint32 Width int32 Height int32 ImageOutCodec types.MimeType } func (p *PipelineConfig) GetImageConfigs() []*ImageConfig { o := p.Outputs[types.EgressTypeImages] var configs []*ImageConfig for _, c := range o { configs = append(configs, c.(*ImageConfig)) } return configs } func (p *PipelineConfig) getImageConfig(images *livekit.ImageOutput, upload egress.UploadRequest) (*ImageConfig, error) { outCodec, outputType, err := getMimeTypes(images.ImageCodec) if err != nil { return nil, err } sc, err := p.getStorageConfig(upload) if err != nil { return nil, err } filenamePrefix := clean(images.FilenamePrefix) conf := &ImageConfig{ outputConfig: outputConfig{ OutputType: outputType, }, Id: utils.NewGuid(""), ImagesInfo: &livekit.ImagesInfo{ FilenamePrefix: filenamePrefix, }, ImagePrefix: filenamePrefix, ImageSuffix: images.FilenameSuffix, DisableManifest: images.DisableManifest, StorageConfig: sc, CaptureInterval: images.CaptureInterval, Width: images.Width, Height: images.Height, ImageOutCodec: outCodec, } if conf.CaptureInterval == 0 { // 10s by default conf.CaptureInterval = 10 } // Set default dimensions for RoomComposite and Web. For all SDKs input, default will be // set from the track dimensions switch req := p.Info.Request.(type) { case *livekit.EgressInfo_RoomComposite, *livekit.EgressInfo_Web: if conf.Width == 0 { conf.Width = p.Width } if conf.Height == 0 { conf.Height = p.Height } case *livekit.EgressInfo_Replay: switch req.Replay.Source.(type) { case *livekit.ExportReplayRequest_Template, *livekit.ExportReplayRequest_Web: if conf.Width == 0 { conf.Width = p.Width } if conf.Height == 0 { conf.Height = p.Height } } } // filename err = conf.updatePrefix(p) if err != nil { return nil, err } return conf, nil } func (o *ImageConfig) updatePrefix(p *PipelineConfig) error { identifier, replacements := p.getFilenameInfo() o.ImagePrefix = stringReplace(o.ImagePrefix, replacements) o.ImagesInfo.FilenamePrefix = stringReplace(o.ImagesInfo.FilenamePrefix, replacements) o.ImageExtension = types.FileExtensionForOutputType[o.OutputType] imagesDir, imagesPrefix := path.Split(o.ImagePrefix) o.StorageDir = imagesDir // ensure playlistName if imagesPrefix == "" { imagesPrefix = fmt.Sprintf("%s-%s", identifier, time.Now().Format("2006-01-02T150405")) if p.Info.RetryCount > 0 { imagesPrefix = fmt.Sprintf("%s-%d", imagesPrefix, p.Info.RetryCount) } } // update config o.ImagePrefix = imagesPrefix // Prepend the configuration base directory and the egress Id, and slug to prevent conflict if // there is more than one image output // os.ModeDir creates a directory with mode 000 when mapping the directory outside the container o.LocalDir = path.Join(p.TmpDir, o.Id) return os.MkdirAll(o.LocalDir, 0755) } func getMimeTypes(imageCodec livekit.ImageCodec) (types.MimeType, types.OutputType, error) { switch imageCodec { case livekit.ImageCodec_IC_DEFAULT, livekit.ImageCodec_IC_JPEG: return types.MimeTypeJPEG, types.OutputTypeJPEG, nil default: return "", "", errors.ErrNoCompatibleCodec } } ================================================ FILE: pkg/config/output_segment.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package config import ( "fmt" "os" "path" "strings" "time" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/types" "github.com/livekit/protocol/egress" "github.com/livekit/protocol/livekit" ) type SegmentConfig struct { outputConfig SegmentsInfo *livekit.SegmentsInfo LocalDir string StorageDir string PlaylistFilename string LivePlaylistFilename string SegmentPrefix string SegmentSuffix livekit.SegmentedFileSuffix SegmentDuration int DisableManifest bool StorageConfig *StorageConfig } func (p *PipelineConfig) GetSegmentConfig() *SegmentConfig { o, ok := p.Outputs[types.EgressTypeSegments] if !ok || len(o) == 0 { return nil } return o[0].(*SegmentConfig) } // segments should always be added last, so we can check keyframe interval from file/stream func (p *PipelineConfig) getSegmentConfig(segments *livekit.SegmentedFileOutput, upload egress.UploadRequest) (*SegmentConfig, error) { sc, err := p.getStorageConfig(upload) if err != nil { return nil, err } prefix := clean(segments.FilenamePrefix) playlist := clean(segments.PlaylistName) // On retry, segment filenames are "{prefix}_{index}.ts" so prefix must contain {retry} // to avoid overwriting. When prefix is empty it derives from playlist name, so playlist // must contain {retry}. When both are empty, names are auto-generated with retry count. if p.Info.RetryCount > 0 { if prefix != "" && !strings.Contains(prefix, "{retry}") { return nil, errors.ErrNonRetryableOutput } if prefix == "" && playlist != "" && !strings.Contains(playlist, "{retry}") { return nil, errors.ErrNonRetryableOutput } } conf := &SegmentConfig{ SegmentsInfo: &livekit.SegmentsInfo{}, SegmentPrefix: prefix, SegmentSuffix: segments.FilenameSuffix, PlaylistFilename: playlist, LivePlaylistFilename: clean(segments.LivePlaylistName), SegmentDuration: int(segments.SegmentDuration), DisableManifest: segments.DisableManifest, StorageConfig: sc, } if conf.SegmentDuration == 0 { conf.SegmentDuration = 4 } switch segments.Protocol { case livekit.SegmentedFileProtocol_DEFAULT_SEGMENTED_FILE_PROTOCOL, livekit.SegmentedFileProtocol_HLS_PROTOCOL: conf.OutputType = types.OutputTypeHLS } // filename if err = conf.updatePrefixAndPlaylist(p); err != nil { return nil, err } return conf, nil } func removeKnownExtension(filename string) string { if extIdx := strings.LastIndex(filename, "."); extIdx > -1 { existingExt := types.FileExtension(filename[extIdx:]) if _, ok := types.FileExtensions[existingExt]; ok { filename = filename[:extIdx] } filename = filename[:extIdx] } return filename } func (o *SegmentConfig) updatePrefixAndPlaylist(p *PipelineConfig) error { identifier, replacements := p.getFilenameInfo() o.SegmentPrefix = stringReplace(o.SegmentPrefix, replacements) o.PlaylistFilename = stringReplace(o.PlaylistFilename, replacements) o.LivePlaylistFilename = stringReplace(o.LivePlaylistFilename, replacements) ext := types.FileExtensionForOutputType[o.OutputType] playlistDir, playlistName := path.Split(o.PlaylistFilename) livePlaylistDir, livePlaylistName := path.Split(o.LivePlaylistFilename) segmentDir, segmentPrefix := path.Split(o.SegmentPrefix) // force live playlist to be in the same directory as the main playlist if livePlaylistDir != "" && livePlaylistDir != playlistDir { return errors.ErrInvalidInput("live_playlist_name must be in same directory as playlist_name") } // remove extension from playlist name playlistName = removeKnownExtension(playlistName) livePlaylistName = removeKnownExtension(livePlaylistName) // only keep segmentDir if it is a subdirectory of playlistDir if segmentDir != "" { switch playlistDir { case segmentDir: segmentDir = "" case "": playlistDir = segmentDir segmentDir = "" } } o.StorageDir = playlistDir // ensure playlistName if playlistName == "" { if segmentPrefix != "" { playlistName = segmentPrefix } else { playlistName = fmt.Sprintf("%s-%s", identifier, time.Now().Format("2006-01-02T150405")) if p.Info.RetryCount > 0 { playlistName = fmt.Sprintf("%s-%d", playlistName, p.Info.RetryCount) } } } // live playlist disabled by default // ensure filePrefix if segmentPrefix == "" { segmentPrefix = playlistName } // update config o.StorageDir = playlistDir o.PlaylistFilename = fmt.Sprintf("%s%s", playlistName, ext) if livePlaylistName != "" { o.LivePlaylistFilename = fmt.Sprintf("%s%s", livePlaylistName, ext) } o.SegmentPrefix = fmt.Sprintf("%s%s", segmentDir, segmentPrefix) if o.PlaylistFilename == o.LivePlaylistFilename { return errors.ErrInvalidInput("live_playlist_name cannot be identical to playlist_name") } // Prepend the configuration base directory and the egress Id // os.ModeDir creates a directory with mode 000 when mapping the directory outside the container o.LocalDir = p.TmpDir if segmentDir != "" { if err := os.MkdirAll(path.Join(o.LocalDir, segmentDir), 0755); err != nil { return err } } o.SegmentsInfo.PlaylistName = path.Join(o.StorageDir, o.PlaylistFilename) if o.LivePlaylistFilename != "" { o.SegmentsInfo.LivePlaylistName = path.Join(o.StorageDir, o.LivePlaylistFilename) } return nil } ================================================ FILE: pkg/config/output_stream.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package config import ( "sync" "sync/atomic" "time" "github.com/livekit/egress/pkg/types" "github.com/livekit/protocol/livekit" "github.com/livekit/protocol/logger" ) type StreamConfig struct { outputConfig // url -> Stream Streams sync.Map twitchTemplate string } type Stream struct { Name string // gstreamer stream ID ParsedUrl string // parsed/validated url RedactedUrl string // url with stream key removed StreamID string // stream ID used by rtmpconnection StreamInfo *livekit.StreamInfo lastRetryUpdate atomic.Int64 } func (p *PipelineConfig) GetStreamConfig() *StreamConfig { o, ok := p.Outputs[types.EgressTypeStream] if !ok || len(o) == 0 { return nil } return o[0].(*StreamConfig) } func (p *PipelineConfig) GetWebsocketConfig() *StreamConfig { o, ok := p.Outputs[types.EgressTypeWebsocket] if !ok || len(o) == 0 { return nil } return o[0].(*StreamConfig) } func (p *PipelineConfig) getStreamConfig(outputType types.OutputType, urls []string) (*StreamConfig, error) { conf := &StreamConfig{ outputConfig: outputConfig{OutputType: outputType}, } for _, rawUrl := range urls { _, err := conf.AddStream(rawUrl, outputType) if err != nil { return nil, err } } switch outputType { case types.OutputTypeRTMP, types.OutputTypeSRT: p.AudioOutCodec = types.MimeTypeAAC p.VideoOutCodec = types.MimeTypeH264 case types.OutputTypeRaw: p.AudioOutCodec = types.MimeTypeRawAudio } return conf, nil } func (s *Stream) UpdateEndTime(endedAt int64) { s.StreamInfo.EndedAt = endedAt if s.StreamInfo.StartedAt == 0 { if s.StreamInfo.Status != livekit.StreamInfo_FAILED { logger.Warnw("stream missing start time", nil, "url", s.RedactedUrl) } s.StreamInfo.StartedAt = endedAt } else { s.StreamInfo.Duration = endedAt - s.StreamInfo.StartedAt } } func (s *Stream) ShouldSendRetryUpdate(now time.Time, minInterval time.Duration) bool { last := s.lastRetryUpdate.Load() if last == 0 || now.UnixNano()-last >= int64(minInterval) { s.lastRetryUpdate.Store(now.UnixNano()) return true } return false } ================================================ FILE: pkg/config/pipeline.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package config import ( "context" "fmt" "net/url" "path" "strings" "time" "github.com/go-gst/go-gst/gst/app" "github.com/pion/webrtc/v4" "go.opentelemetry.io/otel" "go.uber.org/atomic" "google.golang.org/protobuf/proto" "gopkg.in/yaml.v3" "github.com/livekit/protocol/egress" "github.com/livekit/protocol/livekit" "github.com/livekit/protocol/logger" "github.com/livekit/protocol/rpc" lksdk "github.com/livekit/server-sdk-go/v2" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/pipeline/tempo" "github.com/livekit/egress/pkg/types" ) type PipelineConfig struct { BaseConfig `yaml:",inline"` HandlerID string `yaml:"handler_id"` TmpDir string `yaml:"tmp_dir"` types.RequestType `yaml:"-"` SourceConfig `yaml:"-"` AudioConfig `yaml:"-"` VideoConfig `yaml:"-"` Outputs map[types.EgressType][]OutputConfig `yaml:"-"` OutputCount atomic.Int32 `yaml:"-"` FinalizationRequired bool `yaml:"-"` Info *livekit.EgressInfo `yaml:"-"` Manifest *Manifest `yaml:"-"` Live bool `yaml:"-"` StorageObserver StorageObserver `yaml:"-"` } // IsReplay returns true when this is a replay/export pipeline. Use this for // replay-specific integration points (IPC calls, storage access). For generic // pipeline behavior (is-live, leaky queues, backpressure) use the Live field. func (p *PipelineConfig) IsReplay() bool { return !p.Live } type StorageObserver interface { OnStorageEvent(egressID, operation, path string, size, lifetimeDays int64) } var ( tracer = otel.Tracer("github.com/livekit/egress/pkg/config") ) type SourceConfig struct { SourceType types.SourceType WebSourceParams SDKSourceParams } type WebSourceParams struct { AwaitStartSignal bool Display string Layout string Token string BaseUrl string WebUrl string } type SDKSourceParams struct { TrackID string AudioTrackID string VideoTrackID string Identity string TrackSource string TrackKind string ScreenShare bool VideoInCodec types.MimeType AudioTracks []*TrackSource VideoTrack *TrackSource AudioRoutes []AudioRouteConfig } type AudioRouteConfig struct { Match AudioRouteMatch Channel livekit.AudioChannel } type AudioRouteMatch struct { TrackID string ParticipantIdentity string ParticipantKind *lksdk.ParticipantKind } type TrackSource struct { TrackID string TrackKind lksdk.TrackKind ParticipantKind lksdk.ParticipantKind AudioChannel *livekit.AudioChannel AppSrc *app.Source MimeType types.MimeType PayloadType webrtc.PayloadType ClockRate uint32 TempoController *tempo.Controller OnKeyframeRequired func() } type AudioConfig struct { AudioEnabled bool AudioTranscoding bool AudioOutCodec types.MimeType AudioBitrate int32 AudioFrequency int32 AudioMixing livekit.AudioMixing } type VideoConfig struct { VideoEnabled bool VideoDecoding bool VideoEncoding bool VideoOutCodec types.MimeType VideoProfile types.Profile Width int32 Height int32 Depth int32 Framerate int32 VideoBitrate int32 KeyFrameInterval float64 } func NewPipelineConfig(confString string, req *rpc.StartEgressRequest) (*PipelineConfig, error) { p := &PipelineConfig{ BaseConfig: BaseConfig{ Logging: &logger.Config{ Level: "info", }, }, Outputs: make(map[types.EgressType][]OutputConfig), Live: true, } if err := yaml.Unmarshal([]byte(confString), p); err != nil { return nil, errors.ErrCouldNotParseConfig(err) } if err := p.initLogger( "nodeID", p.NodeID, "handlerID", p.HandlerID, "clusterID", p.ClusterID, "egressID", req.EgressId, ); err != nil { return nil, err } return p, p.Update(req) } func GetValidatedPipelineConfig(conf *ServiceConfig, req *rpc.StartEgressRequest) (*PipelineConfig, error) { _, span := tracer.Start(context.Background(), "config.GetValidatedPipelineConfig") defer span.End() p := &PipelineConfig{ BaseConfig: conf.BaseConfig, TmpDir: path.Join(TmpDir, req.EgressId), Outputs: make(map[types.EgressType][]OutputConfig), Live: true, } return p, p.Update(req) } func (p *PipelineConfig) Update(request *rpc.StartEgressRequest) error { if request.EgressId == "" { return errors.ErrInvalidInput("egressID") } // start with defaults now := time.Now().UnixNano() p.Info = &livekit.EgressInfo{ EgressId: request.EgressId, RoomId: request.RoomId, RoomName: request.RoomName, Status: livekit.EgressStatus_EGRESS_STARTING, StartedAt: now, UpdatedAt: now, RetryCount: request.RetryCount, } p.AudioConfig = AudioConfig{ AudioBitrate: 128, AudioFrequency: 44100, } p.VideoConfig = VideoConfig{ VideoProfile: types.ProfileMain, Width: 1280, Height: 720, Depth: 24, Framerate: 30, VideoBitrate: 3000, } connectionInfoRequired := true switch req := request.Request.(type) { case *rpc.StartEgressRequest_RoomComposite: p.RequestType = types.RequestTypeRoomComposite clone := proto.Clone(req.RoomComposite).(*livekit.RoomCompositeEgressRequest) p.Info.Request = &livekit.EgressInfo_RoomComposite{ RoomComposite: clone, } egress.RedactEncodedOutputs(clone) if ShouldUseSDKSource(req.RoomComposite) { p.AudioMixing = req.RoomComposite.AudioMixing p.SourceType = types.SourceTypeSDK } else { p.SourceType = types.SourceTypeWeb } p.AwaitStartSignal = true p.Info.RoomName = req.RoomComposite.RoomName p.Layout = req.RoomComposite.Layout if req.RoomComposite.CustomBaseUrl != "" { p.BaseUrl = req.RoomComposite.CustomBaseUrl } else { p.BaseUrl = p.TemplateBase } baseUrl, err := url.Parse(p.BaseUrl) if err != nil || !isHttp(baseUrl) { return errors.ErrInvalidInput("template base url") } if !req.RoomComposite.VideoOnly { p.AudioEnabled = true p.AudioTranscoding = true } if !req.RoomComposite.AudioOnly { p.VideoEnabled = true p.VideoInCodec = types.MimeTypeRawVideo p.VideoDecoding = true } if !p.AudioEnabled && !p.VideoEnabled { return errors.ErrInvalidInput("audio_only and video_only") } // encoding options switch opts := req.RoomComposite.Options.(type) { case *livekit.RoomCompositeEgressRequest_Preset: p.applyPreset(opts.Preset) case *livekit.RoomCompositeEgressRequest_Advanced: if err = p.applyAdvanced(opts.Advanced); err != nil { return err } } // output params if err = p.updateEncodedOutputs(req.RoomComposite); err != nil { return err } case *rpc.StartEgressRequest_Web: p.RequestType = types.RequestTypeWeb clone := proto.Clone(req.Web).(*livekit.WebEgressRequest) p.Info.Request = &livekit.EgressInfo_Web{ Web: clone, } egress.RedactEncodedOutputs(clone) connectionInfoRequired = false p.SourceType = types.SourceTypeWeb p.AwaitStartSignal = req.Web.AwaitStartSignal p.WebUrl = req.Web.Url webUrl, err := url.Parse(p.WebUrl) if err != nil || !isHttp(webUrl) { return errors.ErrInvalidInput("web url") } if !req.Web.VideoOnly { p.AudioEnabled = true p.AudioTranscoding = true } if !req.Web.AudioOnly { p.VideoEnabled = true p.VideoInCodec = types.MimeTypeRawVideo p.VideoDecoding = true } if !p.AudioEnabled && !p.VideoEnabled { return errors.ErrInvalidInput("audio_only and video_only") } // encoding options switch opts := req.Web.Options.(type) { case *livekit.WebEgressRequest_Preset: p.applyPreset(opts.Preset) case *livekit.WebEgressRequest_Advanced: if err = p.applyAdvanced(opts.Advanced); err != nil { return err } } // output params if err = p.updateEncodedOutputs(req.Web); err != nil { return err } case *rpc.StartEgressRequest_Participant: p.RequestType = types.RequestTypeParticipant clone := proto.Clone(req.Participant).(*livekit.ParticipantEgressRequest) p.Info.Request = &livekit.EgressInfo_Participant{ Participant: clone, } egress.RedactEncodedOutputs(clone) p.SourceType = types.SourceTypeSDK p.Info.RoomName = req.Participant.RoomName p.AudioEnabled = true p.AudioTranscoding = true p.VideoEnabled = true p.VideoDecoding = true p.Identity = req.Participant.Identity p.ScreenShare = req.Participant.ScreenShare if p.Identity == "" { return errors.ErrInvalidInput("identity") } // encoding options switch opts := req.Participant.Options.(type) { case *livekit.ParticipantEgressRequest_Preset: p.applyPreset(opts.Preset) case *livekit.ParticipantEgressRequest_Advanced: if err := p.applyAdvanced(opts.Advanced); err != nil { return err } } // output params if err := p.updateEncodedOutputs(req.Participant); err != nil { return err } case *rpc.StartEgressRequest_TrackComposite: p.RequestType = types.RequestTypeTrackComposite clone := proto.Clone(req.TrackComposite).(*livekit.TrackCompositeEgressRequest) p.Info.Request = &livekit.EgressInfo_TrackComposite{ TrackComposite: clone, } egress.RedactEncodedOutputs(clone) p.SourceType = types.SourceTypeSDK p.Info.RoomName = req.TrackComposite.RoomName if audioTrackID := req.TrackComposite.AudioTrackId; audioTrackID != "" { p.AudioEnabled = true p.AudioTrackID = audioTrackID p.AudioTranscoding = true } if videoTrackID := req.TrackComposite.VideoTrackId; videoTrackID != "" { p.VideoEnabled = true p.VideoTrackID = videoTrackID p.VideoDecoding = true } if !p.AudioEnabled && !p.VideoEnabled { return errors.ErrInvalidInput("audio_track_id or video_track_id") } // encoding options switch opts := req.TrackComposite.Options.(type) { case *livekit.TrackCompositeEgressRequest_Preset: p.applyPreset(opts.Preset) case *livekit.TrackCompositeEgressRequest_Advanced: if err := p.applyAdvanced(opts.Advanced); err != nil { return err } } // output params if err := p.updateEncodedOutputs(req.TrackComposite); err != nil { return err } case *rpc.StartEgressRequest_Track: p.RequestType = types.RequestTypeTrack clone := proto.Clone(req.Track).(*livekit.TrackEgressRequest) p.Info.Request = &livekit.EgressInfo_Track{ Track: clone, } egress.RedactDirectOutputs(clone) p.SourceType = types.SourceTypeSDK p.Info.RoomName = req.Track.RoomName p.TrackID = req.Track.TrackId if p.TrackID == "" { return errors.ErrInvalidInput("track_id") } if err := p.updateDirectOutput(req.Track); err != nil { return err } case *rpc.StartEgressRequest_Replay: replayReq := req.Replay clone := proto.Clone(replayReq).(*livekit.ExportReplayRequest) p.Info.Request = &livekit.EgressInfo_Replay{ Replay: clone, } egress.RedactStartEgressRequest(clone) switch source := replayReq.Source.(type) { case *livekit.ExportReplayRequest_Template: tmpl := source.Template p.RequestType = types.RequestTypeTemplate if ShouldUseSDKSource(tmpl) { p.SourceType = types.SourceTypeSDK } else { p.SourceType = types.SourceTypeWeb } p.AwaitStartSignal = true p.Layout = tmpl.Layout if tmpl.CustomBaseUrl != "" { p.BaseUrl = tmpl.CustomBaseUrl } else { p.BaseUrl = p.TemplateBase } baseUrl, err := url.Parse(p.BaseUrl) if err != nil || !isHttp(baseUrl) { return errors.ErrInvalidInput("template base url") } if !tmpl.VideoOnly { p.AudioEnabled = true p.AudioTranscoding = true } if !tmpl.AudioOnly { p.VideoEnabled = true p.VideoInCodec = types.MimeTypeRawVideo p.VideoDecoding = true } if !p.AudioEnabled && !p.VideoEnabled { return errors.ErrInvalidInput("audio_only and video_only") } case *livekit.ExportReplayRequest_Web: web := source.Web p.RequestType = types.RequestTypeWeb connectionInfoRequired = false p.SourceType = types.SourceTypeWeb p.AwaitStartSignal = web.AwaitStartSignal p.WebUrl = web.Url webUrl, err := url.Parse(p.WebUrl) if err != nil || !isHttp(webUrl) { return errors.ErrInvalidInput("web url") } if !web.VideoOnly { p.AudioEnabled = true p.AudioTranscoding = true } if !web.AudioOnly { p.VideoEnabled = true p.VideoInCodec = types.MimeTypeRawVideo p.VideoDecoding = true } if !p.AudioEnabled && !p.VideoEnabled { return errors.ErrInvalidInput("audio_only and video_only") } case *livekit.ExportReplayRequest_Media: media := source.Media p.RequestType = types.RequestTypeMedia p.SourceType = types.SourceTypeSDK // data config not yet supported if media.Data != nil { return errors.ErrFeatureDisabled("data track egress") } // video switch v := media.Video.(type) { case *livekit.MediaSource_VideoTrackId: p.VideoEnabled = true p.VideoDecoding = true p.VideoTrackID = v.VideoTrackId case *livekit.MediaSource_ParticipantVideo: p.VideoEnabled = true p.VideoDecoding = true p.Identity = v.ParticipantVideo.Identity p.ScreenShare = v.ParticipantVideo.PreferScreenShare } // audio if media.Audio != nil { p.AudioEnabled = true p.AudioTranscoding = true for _, route := range media.Audio.Routes { arc := AudioRouteConfig{ Channel: route.Channel, } switch m := route.Match.(type) { case *livekit.AudioRoute_TrackId: arc.Match.TrackID = m.TrackId case *livekit.AudioRoute_ParticipantIdentity: arc.Match.ParticipantIdentity = m.ParticipantIdentity case *livekit.AudioRoute_ParticipantKind: kind := lksdk.ParticipantKind(m.ParticipantKind) arc.Match.ParticipantKind = &kind } p.AudioRoutes = append(p.AudioRoutes, arc) } } if !p.AudioEnabled && !p.VideoEnabled { return errors.ErrInvalidInput("audio or video") } default: return errors.ErrInvalidInput("source") } // encoding options switch opts := replayReq.Encoding.(type) { case *livekit.ExportReplayRequest_Preset: p.applyPreset(opts.Preset) case *livekit.ExportReplayRequest_Advanced: if err := p.applyAdvanced(opts.Advanced); err != nil { return err } } // output params if err := p.updateOutputs(replayReq); err != nil { return err } default: return errors.ErrInvalidInput("request") } switch p.SourceType { case types.SourceTypeWeb: p.Info.SourceType = livekit.EgressSourceType_EGRESS_SOURCE_TYPE_WEB case types.SourceTypeSDK: p.Info.SourceType = livekit.EgressSourceType_EGRESS_SOURCE_TYPE_SDK } // connection info if connectionInfoRequired { // token if request.Token != "" { p.Token = request.Token } else if p.ApiKey != "" && p.ApiSecret != "" && p.Info.RoomName != "" { token, err := egress.BuildEgressToken(p.Info.EgressId, p.ApiKey, p.ApiSecret, p.Info.RoomName) if err != nil { return err } p.Token = token } else { return errors.ErrInvalidInput("token or api key/secret") } // url if request.WsUrl != "" { p.WsUrl = request.WsUrl } else if p.WsUrl == "" { return errors.ErrInvalidInput("ws_url") } } p.Latency = p.getLatencyConfig(p.RequestType) applyLatencyDefaults(&p.Latency) if p.RequestType != types.RequestTypeTrack { err := p.validateAndUpdateOutputParams() if err != nil { return err } } p.initManifest() return nil } func ShouldUseSDKSource(req interface { GetLayout() string GetAudioOnly() bool GetCustomBaseUrl() string }) bool { return req.GetAudioOnly() && req.GetLayout() == "" && req.GetCustomBaseUrl() == "" } func (p *PipelineConfig) validateAndUpdateOutputParams() error { compatibleAudioCodecs, compatibleVideoCodecs, err := p.validateAndUpdateOutputCodecs() if err != nil { return err } // Find a compatible file format if not set err = p.updateOutputType(compatibleAudioCodecs, compatibleVideoCodecs) if err != nil { return err } // Select a codec compatible with all outputs if p.AudioEnabled { for _, o := range p.GetEncodedOutputs() { if compatibleAudioCodecs[types.DefaultAudioCodecs[o.GetOutputType()]] { p.AudioOutCodec = types.DefaultAudioCodecs[o.GetOutputType()] break } } if p.AudioOutCodec == "" { // No default codec found. Pick a random compatible one for k := range compatibleAudioCodecs { p.AudioOutCodec = k } } } if p.VideoEnabled { for _, o := range p.GetEncodedOutputs() { if compatibleVideoCodecs[types.DefaultVideoCodecs[o.GetOutputType()]] { p.VideoOutCodec = types.DefaultVideoCodecs[o.GetOutputType()] break } } if p.VideoOutCodec == "" { // No default codec found. Pick a random compatible one for k := range compatibleVideoCodecs { p.VideoOutCodec = k } } } return nil } func (p *PipelineConfig) validateAndUpdateOutputCodecs() (compatibleAudioCodecs map[types.MimeType]bool, compatibleVideoCodecs map[types.MimeType]bool, err error) { compatibleAudioCodecs = make(map[types.MimeType]bool) compatibleVideoCodecs = make(map[types.MimeType]bool) // Find video and audio codecs compatible with all outputs if p.AudioEnabled { if p.AudioOutCodec == "" { compatibleAudioCodecs = types.AllOutputAudioCodecs } else { compatibleAudioCodecs[p.AudioOutCodec] = true } for _, o := range p.GetEncodedOutputs() { compatibleAudioCodecs = types.GetMapIntersection(compatibleAudioCodecs, types.CodecCompatibility[o.GetOutputType()]) if len(compatibleAudioCodecs) == 0 { if p.AudioOutCodec == "" { return nil, nil, errors.ErrNoCompatibleCodec } // Return a more specific error if a codec was provided return nil, nil, errors.ErrIncompatible(o.GetOutputType(), p.AudioOutCodec) } } } if p.VideoEnabled { if p.VideoOutCodec == "" { compatibleVideoCodecs = types.AllOutputVideoCodecs } else { compatibleVideoCodecs[p.VideoOutCodec] = true } for _, o := range p.GetEncodedOutputs() { compatibleVideoCodecs = types.GetMapIntersection(compatibleVideoCodecs, types.CodecCompatibility[o.GetOutputType()]) if len(compatibleVideoCodecs) == 0 { if p.AudioOutCodec == "" { return nil, nil, errors.ErrNoCompatibleCodec } // Return a more specific error if a codec was provided return nil, nil, errors.ErrIncompatible(o.GetOutputType(), p.VideoOutCodec) } } } return compatibleAudioCodecs, compatibleVideoCodecs, nil } func (p *PipelineConfig) updateOutputType(compatibleAudioCodecs map[types.MimeType]bool, compatibleVideoCodecs map[types.MimeType]bool) error { o := p.GetFileConfig() if o == nil || o.GetOutputType() != types.OutputTypeUnknownFile { return nil } if !p.VideoEnabled { ot := types.GetOutputTypeCompatibleWithCodecs(types.AudioOnlyFileOutputTypes, compatibleAudioCodecs, nil) if ot == types.OutputTypeUnknownFile { return errors.ErrNoCompatibleFileOutputType } o.OutputType = ot } else if !p.AudioEnabled { ot := types.GetOutputTypeCompatibleWithCodecs(types.VideoOnlyFileOutputTypes, nil, compatibleVideoCodecs) if ot == types.OutputTypeUnknownFile { return errors.ErrNoCompatibleFileOutputType } o.OutputType = ot } else { ot := types.GetOutputTypeCompatibleWithCodecs(types.AudioVideoFileOutputTypes, compatibleAudioCodecs, compatibleVideoCodecs) if ot == types.OutputTypeUnknownFile { return errors.ErrNoCompatibleFileOutputType } o.OutputType = ot } identifier, replacements := p.getFilenameInfo() err := o.updateFilepath(p, identifier, replacements) if err != nil { return err } return nil } // UpdateInfoFromSDK - updates the pipeline config with the identifier, replacements, width, and height func (p *PipelineConfig) UpdateInfoFromSDK(identifier string, replacements map[string]string, w, h uint32) error { if p.Info.RetryCount > 0 { replacements["{retry}"] = fmt.Sprintf("%d", p.Info.RetryCount) } var err error for egressType, c := range p.Outputs { if len(c) == 0 { continue } switch egressType { case types.EgressTypeFile: err = c[0].(*FileConfig).updateFilepath(p, identifier, replacements) case types.EgressTypeSegments: o := c[0].(*SegmentConfig) o.LocalDir = stringReplace(o.LocalDir, replacements) o.StorageDir = stringReplace(o.StorageDir, replacements) o.PlaylistFilename = stringReplace(o.PlaylistFilename, replacements) o.LivePlaylistFilename = stringReplace(o.LivePlaylistFilename, replacements) o.SegmentPrefix = stringReplace(o.SegmentPrefix, replacements) o.SegmentsInfo.PlaylistName = stringReplace(o.SegmentsInfo.PlaylistName, replacements) o.SegmentsInfo.LivePlaylistName = stringReplace(o.SegmentsInfo.LivePlaylistName, replacements) case types.EgressTypeImages: for _, ci := range c { o := ci.(*ImageConfig) o.LocalDir = stringReplace(o.LocalDir, replacements) o.StorageDir = stringReplace(o.StorageDir, replacements) o.ImagePrefix = stringReplace(o.ImagePrefix, replacements) o.ImagesInfo.FilenamePrefix = stringReplace(o.ImagesInfo.FilenamePrefix, replacements) if o.Width == 0 { if w != 0 { o.Width = int32(w) } else { o.Width = p.Width } } if o.Height == 0 { if h != 0 { o.Height = int32(h) } else { o.Height = p.Height } } } } } return err } func (p *PipelineConfig) GetEncodedOutputs() []OutputConfig { ret := make([]OutputConfig, 0) for _, k := range []types.EgressType{types.EgressTypeFile, types.EgressTypeSegments, types.EgressTypeStream, types.EgressTypeWebsocket} { ret = append(ret, p.Outputs[k]...) } return ret } func isHttp(parsedUrl *url.URL) bool { return parsedUrl.Scheme == "http" || parsedUrl.Scheme == "https" } func stringReplace(s string, replacements map[string]string) string { for template, value := range replacements { s = strings.ReplaceAll(s, template, value) } return s } ================================================ FILE: pkg/config/retry_test.go ================================================ // Copyright 2026 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package config import ( "testing" "github.com/stretchr/testify/require" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/types" "github.com/livekit/protocol/livekit" ) func TestFileOutputRetrySafety(t *testing.T) { for _, test := range []struct { name string retryCount int32 filepath string expectErr bool }{ { name: "first attempt with explicit path", retryCount: 0, filepath: "recordings/my-file.mp4", expectErr: false, }, { name: "retry with empty path (auto-generated)", retryCount: 1, filepath: "", expectErr: false, }, { name: "retry with directory path (auto-generated)", retryCount: 1, filepath: "recordings/", expectErr: false, }, { name: "retry with {retry} placeholder", retryCount: 1, filepath: "recordings/my-file-{retry}.mp4", expectErr: false, }, { name: "retry with explicit path missing {retry}", retryCount: 1, filepath: "recordings/my-file.mp4", expectErr: true, }, { name: "retry with {retry} in directory part", retryCount: 1, filepath: "recordings/{retry}/my-file.mp4", expectErr: false, }, { name: "second retry with {retry} placeholder", retryCount: 2, filepath: "recordings/my-file-{retry}.mp4", expectErr: false, }, { name: "second retry with explicit path missing {retry}", retryCount: 2, filepath: "recordings/my-file.mp4", expectErr: true, }, } { t.Run(test.name, func(t *testing.T) { p := &PipelineConfig{ Info: &livekit.EgressInfo{RoomName: "test-room", RetryCount: test.retryCount}, TmpDir: t.TempDir(), Outputs: make(map[types.EgressType][]OutputConfig), } _, err := p.getEncodedFileConfig(&livekit.EncodedFileOutput{ FileType: livekit.EncodedFileType_MP4, Filepath: test.filepath, }) if test.expectErr { require.ErrorIs(t, err, errors.ErrNonRetryableOutput) } else { require.NoError(t, err) } }) } } func TestSegmentOutputRetrySafety(t *testing.T) { for _, test := range []struct { name string retryCount int32 prefix string playlist string expectErr bool }{ { name: "first attempt with explicit prefix", retryCount: 0, prefix: "segments/my-stream", playlist: "segments/playlist", expectErr: false, }, { name: "retry with both empty (auto-generated)", retryCount: 1, prefix: "", playlist: "", expectErr: false, }, { name: "retry with {retry} in prefix only", retryCount: 1, prefix: "segments/my-stream-{retry}", playlist: "segments/playlist", expectErr: false, }, { name: "retry with {retry} in playlist only (prefix explicit)", retryCount: 1, prefix: "segments/my-stream", playlist: "segments/playlist-{retry}", expectErr: true, }, { name: "retry with {retry} in both", retryCount: 1, prefix: "segments/my-stream-{retry}", playlist: "segments/playlist-{retry}", expectErr: false, }, { name: "retry with explicit prefix missing {retry}", retryCount: 1, prefix: "segments/my-stream", playlist: "", expectErr: true, }, { name: "retry with {retry} in playlist only (prefix empty, derives from playlist)", retryCount: 1, prefix: "", playlist: "segments/playlist-{retry}", expectErr: false, }, { name: "retry with explicit playlist missing {retry}", retryCount: 1, prefix: "", playlist: "segments/playlist", expectErr: true, }, { name: "retry with both explicit and neither has {retry}", retryCount: 1, prefix: "segments/my-stream", playlist: "segments/playlist", expectErr: true, }, } { t.Run(test.name, func(t *testing.T) { p := &PipelineConfig{ Info: &livekit.EgressInfo{EgressId: "test_egress", RoomName: "test-room", RetryCount: test.retryCount}, TmpDir: t.TempDir(), Outputs: make(map[types.EgressType][]OutputConfig), } seg := &livekit.SegmentedFileOutput{ FilenamePrefix: test.prefix, PlaylistName: test.playlist, } _, err := p.getSegmentConfig(seg, seg) if test.expectErr { require.ErrorIs(t, err, errors.ErrNonRetryableOutput) } else { require.NoError(t, err) } }) } } ================================================ FILE: pkg/config/service.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package config import ( "fmt" "os" "time" "github.com/prometheus/client_golang/prometheus" "gopkg.in/yaml.v3" "github.com/livekit/egress/pkg/errors" "github.com/livekit/protocol/logger" "github.com/livekit/protocol/rpc" "github.com/livekit/protocol/utils" ) const ( roomCompositeCpuCost = 4 audioRoomCompositeCpuCost = 1 webCpuCost = 4 audioWebCpuCost = 1 participantCpuCost = 2 trackCompositeCpuCost = 1 trackCpuCost = 0.5 maxCpuUtilization = 0.8 maxUploadQueue = 60 defaultTemplatePort = 7980 defaultTemplateBaseTemplate = "http://localhost:%d/" defaultIOCreateTimeout = time.Second * 15 defaultIOUpdateTimeout = time.Second * 30 defaultIOWorkers = 5 defaultJitterBufferLatency = time.Second * 2 defaultAudioMixerLatency = time.Millisecond * 2750 defaultPipelineLatency = time.Second * 3 defaultRTPMaxDriftAdjustment = time.Millisecond * 5 defaultOldPacketThreshold = 2200 * time.Millisecond defaultRTPMaxAllowedTsDiff = time.Second * 5 defaultAudioTempoControllerAdjustmentRate = 0.05 defaultMaxPulseClients = 60 ) type ServiceConfig struct { BaseConfig `yaml:",inline"` HealthPort int `yaml:"health_port"` // health check port TemplatePort int `yaml:"template_port"` // room composite template server port PrometheusPort int `yaml:"prometheus_port"` // prometheus handler port DebugHandlerPort int `yaml:"debug_handler_port"` // egress debug handler port *CPUCostConfig `yaml:"cpu_cost"` // CPU costs for the different egress types } // MemorySource defines how memory usage is measured for admission and kill decisions. type MemorySource string const ( // MemorySourceProcRSS uses per-process RSS sum from hwstats (existing behavior). MemorySourceProcRSS MemorySource = "proc_rss" // MemorySourceCgroup uses cgroup-aware memory usage (working set). MemorySourceCgroup MemorySource = "cgroup" ) type CPUCostConfig struct { MaxCpuUtilization float64 `yaml:"max_cpu_utilization"` // maximum allowed CPU utilization when deciding to accept a request. Default to 80% MaxMemory float64 `yaml:"max_memory"` // maximum allowed memory usage in GB. 0 to disable MemoryCost float64 `yaml:"memory_cost"` // minimum memory in GB RoomCompositeCpuCost float64 `yaml:"room_composite_cpu_cost"` AudioRoomCompositeCpuCost float64 `yaml:"audio_room_composite_cpu_cost"` WebCpuCost float64 `yaml:"web_cpu_cost"` AudioWebCpuCost float64 `yaml:"audio_web_cpu_cost"` ParticipantCpuCost float64 `yaml:"participant_cpu_cost"` TrackCompositeCpuCost float64 `yaml:"track_composite_cpu_cost"` TrackCpuCost float64 `yaml:"track_cpu_cost"` MaxPulseClients int `yaml:"max_pulse_clients"` // pulse client limit for launching chrome // Memory source configuration (cgroup-aware memory accounting) MemorySource MemorySource `yaml:"memory_source"` // memory measurement source: proc_rss, cgroup MemoryKillGraceSec int `yaml:"memory_kill_grace_sec"` // grace period in update cycles before kill (0 = immediate) } func NewServiceConfig(confString string) (*ServiceConfig, error) { conf := &ServiceConfig{ BaseConfig: BaseConfig{ Logging: &logger.Config{ Level: "info", }, ApiKey: os.Getenv("LIVEKIT_API_KEY"), ApiSecret: os.Getenv("LIVEKIT_API_SECRET"), WsUrl: os.Getenv("LIVEKIT_WS_URL"), }, CPUCostConfig: &CPUCostConfig{}, } if confString != "" { if err := yaml.Unmarshal([]byte(confString), conf); err != nil { return nil, errors.ErrCouldNotParseConfig(err) } } // always create a new node ID conf.NodeID = utils.NewGuid("NE_") conf.InitDefaults() rpc.InitPSRPCStats(prometheus.Labels{"node_id": conf.NodeID, "node_type": "EGRESS"}) if err := conf.initLogger("nodeID", conf.NodeID, "clusterID", conf.ClusterID); err != nil { return nil, err } return conf, nil } func (c *ServiceConfig) InitDefaults() { if c.CPUCostConfig == nil { c.CPUCostConfig = new(CPUCostConfig) } if c.TemplatePort == 0 { c.TemplatePort = defaultTemplatePort } if c.TemplateBase == "" { c.TemplateBase = fmt.Sprintf(defaultTemplateBaseTemplate, c.TemplatePort) } if c.IOCreateTimeout == 0 { c.IOCreateTimeout = defaultIOCreateTimeout } if c.IOUpdateTimeout == 0 { c.IOUpdateTimeout = defaultIOUpdateTimeout } if c.IOWorkers <= 0 { c.IOWorkers = defaultIOWorkers } // Setting CPU costs from config. Ensure that CPU costs are positive if c.MaxCpuUtilization <= 0 || c.MaxCpuUtilization > 1 { c.MaxCpuUtilization = maxCpuUtilization } if c.RoomCompositeCpuCost <= 0 { c.RoomCompositeCpuCost = roomCompositeCpuCost } if c.AudioRoomCompositeCpuCost <= 0 { c.AudioRoomCompositeCpuCost = audioRoomCompositeCpuCost } if c.WebCpuCost <= 0 { c.WebCpuCost = webCpuCost } if c.AudioWebCpuCost <= 0 { c.AudioWebCpuCost = audioWebCpuCost } if c.ParticipantCpuCost <= 0 { c.ParticipantCpuCost = participantCpuCost } if c.TrackCompositeCpuCost <= 0 { c.TrackCompositeCpuCost = trackCompositeCpuCost } if c.TrackCpuCost <= 0 { c.TrackCpuCost = trackCpuCost } if c.MaxPulseClients == 0 { c.MaxPulseClients = defaultMaxPulseClients } // Memory source defaults to proc_rss (preserves existing behavior) if c.MemorySource == "" { c.MemorySource = MemorySourceProcRSS } // Validate memory source switch c.MemorySource { case MemorySourceProcRSS, MemorySourceCgroup: // valid default: logger.Warnw("unknown memory_source, falling back to proc_rss", nil, "memorySource", c.MemorySource) c.MemorySource = MemorySourceProcRSS } if c.MaxUploadQueue <= 0 { c.MaxUploadQueue = maxUploadQueue } applyLatencyDefaults(&c.Latency) if c.AudioTempoController.Enabled { if c.AudioTempoController.AdjustmentRate > 0.2 || c.AudioTempoController.AdjustmentRate <= 0 { c.AudioTempoController.AdjustmentRate = defaultAudioTempoControllerAdjustmentRate } } } func applyLatencyDefaults(latency *LatencyConfig) { if latency.JitterBufferLatency == 0 { latency.JitterBufferLatency = defaultJitterBufferLatency } if latency.AudioMixerLatency == 0 { latency.AudioMixerLatency = defaultAudioMixerLatency } if latency.PipelineLatency == 0 { latency.PipelineLatency = defaultPipelineLatency } if latency.RTPMaxAllowedTsDiff == 0 { latency.RTPMaxAllowedTsDiff = defaultRTPMaxAllowedTsDiff } if latency.RTPMaxAllowedTsDiff < latency.JitterBufferLatency { // RTP max allowed ts diff must be equal or greater than jitter buffer latency to absorb the jitter buffer burst latency.RTPMaxAllowedTsDiff = latency.JitterBufferLatency } if latency.RTPMaxDriftAdjustment == 0 { latency.RTPMaxDriftAdjustment = defaultRTPMaxDriftAdjustment } if latency.OldPacketThreshold == 0 { latency.OldPacketThreshold = defaultOldPacketThreshold } } ================================================ FILE: pkg/config/storage.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package config import ( "time" "github.com/livekit/egress/pkg/errors" "github.com/livekit/protocol/egress" "github.com/livekit/protocol/livekit" "github.com/livekit/storage" ) type StorageConfig struct { Prefix string `yaml:"prefix"` // prefix applied to all filenames GeneratePresignedUrl bool `yaml:"generate_presigned_url"` S3 *storage.S3Config `yaml:"s3"` // upload to s3 Azure *storage.AzureConfig `yaml:"azure"` // upload to azure GCP *storage.GCPConfig `yaml:"gcp"` // upload to gcp AliOSS *storage.AliOSSConfig `yaml:"alioss"` // upload to aliyun } func (p *PipelineConfig) getStorageConfig(req egress.UploadRequest) (*StorageConfig, error) { sc := &StorageConfig{} if p.StorageConfig != nil { sc.Prefix = p.StorageConfig.Prefix sc.GeneratePresignedUrl = p.StorageConfig.GeneratePresignedUrl } if s3 := req.GetS3(); s3 != nil { sc.S3 = &storage.S3Config{ AccessKey: s3.AccessKey, Secret: s3.Secret, SessionToken: s3.SessionToken, AssumeRoleArn: s3.AssumeRoleArn, AssumeRoleExternalId: s3.AssumeRoleExternalId, Region: s3.Region, Endpoint: s3.Endpoint, Bucket: s3.Bucket, ForcePathStyle: s3.ForcePathStyle, Metadata: s3.Metadata, Tagging: s3.Tagging, ContentDisposition: s3.ContentDisposition, } if p.StorageConfig != nil && p.StorageConfig.S3 != nil { sc.S3.MaxRetries = p.StorageConfig.S3.MaxRetries sc.S3.MaxRetryDelay = p.StorageConfig.S3.MaxRetryDelay sc.S3.MinRetryDelay = p.StorageConfig.S3.MinRetryDelay } if sc.S3.AssumeRoleArn == "" { sc.S3.AssumeRoleArn = p.S3AssumeRoleArn sc.S3.AssumeRoleExternalId = p.S3AssumeRoleExternalID } if sc.S3.AssumeRoleArn != "" && sc.S3.AccessKey == "" { if p.S3AssumeRoleKey == "" { return nil, errors.ErrFeatureDisabled("S3 upload using AssumeRole") } // If an AssummedRole is set but not any AccessKey, default to using the one from conf. This is useful for uploading to S3 // using an external account. sc.S3.AccessKey = p.S3AssumeRoleKey sc.S3.Secret = p.S3AssumeRoleSecret } if s3.Proxy != nil { sc.S3.ProxyConfig = &storage.ProxyConfig{ Url: s3.Proxy.Url, Username: s3.Proxy.Username, Password: s3.Proxy.Password, } } if sc.S3.MaxRetries == 0 { sc.S3.MaxRetries = 5 } if sc.S3.MaxRetryDelay == 0 { sc.S3.MaxRetryDelay = time.Second * 5 } if sc.S3.MinRetryDelay == 0 { sc.S3.MinRetryDelay = time.Millisecond * 100 } return sc, nil } if gcp := req.GetGcp(); gcp != nil { sc.GCP = &storage.GCPConfig{ CredentialsJSON: gcp.Credentials, Bucket: gcp.Bucket, } if gcp.Proxy != nil { sc.GCP.ProxyConfig = &storage.ProxyConfig{ Url: gcp.Proxy.Url, Username: gcp.Proxy.Username, Password: gcp.Proxy.Password, } } return sc, nil } if azure := req.GetAzure(); azure != nil { sc.Azure = &storage.AzureConfig{ AccountName: azure.AccountName, AccountKey: azure.AccountKey, ContainerName: azure.ContainerName, } return sc, nil } if ali := req.GetAliOSS(); ali != nil { sc.AliOSS = &storage.AliOSSConfig{ AccessKey: ali.AccessKey, Secret: ali.Secret, Endpoint: ali.Endpoint, Bucket: ali.Bucket, } return sc, nil } sc = p.StorageConfig if p.DisallowLocalStorage && (sc == nil || sc.IsLocal()) { return nil, errors.ErrInvalidInput("output") } return sc, nil } func (c *StorageConfig) IsLocal() bool { return c.S3 == nil && c.GCP == nil && c.Azure == nil && c.AliOSS == nil } // resolveStorageConfig returns the first non-nil StorageConfig from the chain: // per-output override -> request-level default. // Server config fallback is handled by getStorageConfig when result is nil. func resolveStorageConfig(outputStorage, requestStorage *livekit.StorageConfig) *livekit.StorageConfig { if outputStorage != nil { return outputStorage } return requestStorage } ================================================ FILE: pkg/config/test_overrides.go ================================================ package config // TestOverrides is used to override the default configuration for testing purposes. type TestOverrides struct { // inject failure for rooms containing this substring, useful for testing failure conditions FailureInjectionRoom string `yaml:"failure_injection_room"` } ================================================ FILE: pkg/config/urls.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package config import ( "fmt" "net/http" "net/url" "regexp" "strings" "time" "github.com/go-jose/go-jose/v4/json" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/types" "github.com/livekit/protocol/livekit" "github.com/livekit/protocol/utils" ) // rtmp urls must be of format rtmp(s)://{host}(/{path})/{app}/{stream_key}( live=1) var ( rtmpRegexp = regexp.MustCompile(`^(rtmps?://)(.*/)(.*/)(\S*)( live=1)?$`) twitchEndpoint = regexp.MustCompile(`^rtmps?://.*\.contribute\.live-video\.net/app/(.*)( live=1)?$`) ) func (o *StreamConfig) AddStream(rawUrl string, outputType types.OutputType) (*Stream, error) { parsed, redacted, streamID, err := o.ValidateUrl(rawUrl, outputType) if err != nil { return nil, err } stream := &Stream{ ParsedUrl: parsed, RedactedUrl: redacted, StreamID: streamID, StreamInfo: &livekit.StreamInfo{ Url: redacted, Status: livekit.StreamInfo_ACTIVE, }, } if outputType != types.OutputTypeRTMP { stream.StreamInfo.StartedAt = time.Now().UnixNano() } o.Streams.Store(parsed, stream) return stream, nil } func (o *StreamConfig) ValidateUrl(rawUrl string, outputType types.OutputType) ( parsed string, redacted string, streamID string, err error, ) { parsedUrl, err := url.Parse(rawUrl) if err != nil { err = errors.ErrInvalidUrl(rawUrl, err.Error()) return } if types.StreamOutputTypes[parsedUrl.Scheme] != outputType { err = errors.ErrInvalidUrl(rawUrl, "invalid scheme") return } switch outputType { case types.OutputTypeRTMP: if parsedUrl.Scheme == "mux" { parsed = fmt.Sprintf("rtmps://global-live.mux.com:443/app/%s", parsedUrl.Host) } else if parsedUrl.Scheme == "twitch" { parsed, err = o.updateTwitchURL(parsedUrl.Host) if err != nil { return } } else if match := twitchEndpoint.FindStringSubmatch(rawUrl); len(match) > 0 { if updated, err := o.updateTwitchURL(match[1]); err == nil { parsed = updated } } else { parsed = rawUrl } var ok bool redacted, streamID, ok = redactStreamKey(parsed) if !ok { err = errors.ErrInvalidUrl(rawUrl, "rtmp urls must be of format rtmp(s)://{host}(/{path})/{app}/{stream_key}( live=1)") } return case types.OutputTypeSRT, types.OutputTypeRaw: parsed = rawUrl redacted = rawUrl return default: err = errors.ErrInvalidInput("stream output type") return } } func (o *StreamConfig) GetStream(rawUrl string) (*Stream, error) { parsedUrl, err := url.Parse(rawUrl) if err != nil { return nil, errors.ErrInvalidUrl(rawUrl, err.Error()) } var parsed string if parsedUrl.Scheme == "mux" { parsed = fmt.Sprintf("rtmps://global-live.mux.com:443/app/%s", parsedUrl.Host) } else if parsedUrl.Scheme == "twitch" { parsed, err = o.updateTwitchURL(parsedUrl.Host) if err != nil { return nil, err } } else if match := twitchEndpoint.FindStringSubmatch(rawUrl); len(match) > 0 { parsed, err = o.updateTwitchURL(match[1]) if err != nil { return nil, err } } else { parsed = rawUrl } stream, ok := o.Streams.Load(parsed) if !ok { return nil, errors.ErrStreamNotFound(rawUrl) } return stream.(*Stream), nil } func (o *StreamConfig) updateTwitchURL(key string) (string, error) { if err := o.updateTwitchTemplate(); err != nil { return "", err } return strings.ReplaceAll(o.twitchTemplate, "{stream_key}", key), nil } func (o *StreamConfig) updateTwitchTemplate() error { if o.twitchTemplate != "" { return nil } resp, err := http.Get("https://ingest.twitch.tv/ingests") if err != nil { return err } defer resp.Body.Close() var body struct { Ingests []struct { Name string `json:"name"` URLTemplate string `json:"url_template"` URLTemplateSecure string `json:"url_template_secure"` Priority int `json:"priority"` } `json:"ingests"` } if err = json.NewDecoder(resp.Body).Decode(&body); err != nil { return err } for _, ingest := range body.Ingests { if ingest.URLTemplateSecure != "" { o.twitchTemplate = ingest.URLTemplateSecure return nil } else if ingest.URLTemplate != "" { o.twitchTemplate = ingest.URLTemplate return nil } } return errors.New("no ingest found") } func redactStreamKey(url string) (string, string, bool) { match := rtmpRegexp.FindStringSubmatch(url) if len(match) != 6 { return url, "", false } streamID := match[4] match[4] = utils.RedactIdentifier(match[4]) return strings.Join(match[1:], ""), streamID, true } ================================================ FILE: pkg/config/urls_test.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package config import ( "regexp" "strings" "testing" "github.com/stretchr/testify/require" "github.com/livekit/egress/pkg/types" ) func TestValidateUrl(t *testing.T) { var twitchUpdated = regexp.MustCompile("rtmps://(.*).contribute.live-video.net/app/streamkey") var twitchRedacted = regexp.MustCompile(`rtmps://(.*).contribute.live-video.net/app/\{str\.\.\.key}`) o := &StreamConfig{} for _, test := range []struct { url string twitch bool parsed string redacted string }{ { url: "mux://streamkey", parsed: "rtmps://global-live.mux.com:443/app/streamkey", redacted: "rtmps://global-live.mux.com:443/app/{str...key}", }, { url: "twitch://streamkey", twitch: true, }, { url: "rtmp://fake.contribute.live-video.net/app/streamkey", twitch: true, }, { url: "rtmp://localhost:1935/live/streamkey", parsed: "rtmp://localhost:1935/live/streamkey", redacted: "rtmp://localhost:1935/live/{str...key}", }, { url: "rtmps://localhost:1935/live/streamkey", parsed: "rtmps://localhost:1935/live/streamkey", redacted: "rtmps://localhost:1935/live/{str...key}", }, } { parsed, redacted, streamID, err := o.ValidateUrl(test.url, types.OutputTypeRTMP) require.NoError(t, err) require.NotEmpty(t, streamID) if test.twitch { require.NotEmpty(t, twitchUpdated.FindString(parsed), parsed) require.NotEmpty(t, twitchRedacted.FindString(redacted), redacted) } else { require.Equal(t, test.parsed, parsed) require.Equal(t, test.redacted, redacted) } } } func TestGetUrl(t *testing.T) { o := &StreamConfig{} require.NoError(t, o.updateTwitchTemplate()) parsedTwitchUrl := strings.ReplaceAll(o.twitchTemplate, "{stream_key}", "streamkey") urls := []string{ "rtmps://global-live.mux.com:443/app/streamkey", parsedTwitchUrl, parsedTwitchUrl, "rtmp://localhost:1935/live/streamkey", } for _, url := range []string{urls[0], urls[1], urls[3]} { _, err := o.AddStream(url, types.OutputTypeRTMP) require.NoError(t, err) } for i, rawUrl := range []string{ "mux://streamkey", "twitch://streamkey", "rtmp://any.contribute.live-video.net/app/streamkey", "rtmp://localhost:1935/live/streamkey", } { stream, err := o.GetStream(rawUrl) require.NoError(t, err) require.Equal(t, urls[i], stream.ParsedUrl) } } ================================================ FILE: pkg/errors/errors.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package errors import ( "errors" "strings" "github.com/livekit/psrpc" ) func New(err string) error { return errors.New(err) } func Is(err, target error) bool { return errors.Is(err, target) } func As(err error, target any) bool { return errors.As(err, target) } type ErrArray struct { errs []error } func (e *ErrArray) AppendErr(err error) { e.errs = append(e.errs, err) } func (e *ErrArray) Check(err error) { if err != nil { e.errs = append(e.errs, err) } } func (e *ErrArray) ToError() psrpc.Error { if len(e.errs) == 0 { return nil } code := psrpc.Unknown var errStr []string // Return the code for the first error of type psrpc.Error for _, err := range e.errs { var psrpcErr psrpc.Error if code == psrpc.Unknown && errors.As(err, &psrpcErr) { code = psrpcErr.Code() } errStr = append(errStr, err.Error()) } return psrpc.NewErrorf(code, "%s", strings.Join(errStr, "\n")) } // internal errors var ( ErrNoConfig = psrpc.NewErrorf(psrpc.Internal, "missing config") ErrGhostPadFailed = psrpc.NewErrorf(psrpc.Internal, "failed to add ghost pad to bin") ErrBinAlreadyAdded = psrpc.NewErrorf(psrpc.Internal, "bin already added to pipeline") ErrWrongHierarchy = psrpc.NewErrorf(psrpc.Internal, "pipeline can contain bins or elements, not both") ErrPipelineFrozen = psrpc.NewErrorf(psrpc.Internal, "pipeline frozen") ErrSinkNotFound = psrpc.NewErrorf(psrpc.Internal, "sink not found") ) func ErrPadLinkFailed(src, sink, status string) error { return psrpc.NewErrorf(psrpc.Internal, "failed to link %s to %s: %s", src, sink, status) } func ErrGstPipelineError(err error) error { return psrpc.NewError(psrpc.Internal, err) } func ErrProcessFailed(process string, err error) error { return psrpc.NewErrorf(psrpc.Internal, "failed to launch %s: %v", process, err) } func ChromeError(err error) error { return psrpc.NewError(psrpc.Internal, err) } // other errors var ( ErrNonStreamingPipeline = psrpc.NewErrorf(psrpc.InvalidArgument, "UpdateStream called on non-streaming egress") ErrNoCompatibleCodec = psrpc.NewErrorf(psrpc.InvalidArgument, "no supported codec is compatible with all outputs") ErrNoCompatibleFileOutputType = psrpc.NewErrorf(psrpc.InvalidArgument, "no supported file output type is compatible with the selected codecs") ErrEgressNotFound = psrpc.NewErrorf(psrpc.NotFound, "egress not found") ErrEgressAlreadyExists = psrpc.NewErrorf(psrpc.AlreadyExists, "egress already exists") ErrSubscriptionFailed = psrpc.NewErrorf(psrpc.Unavailable, "failed to subscribe to track") ErrNotEnoughCPU = psrpc.NewErrorf(psrpc.Unavailable, "not enough CPU") ErrShuttingDown = psrpc.NewErrorf(psrpc.Unavailable, "server is shutting down") ErrNonRetryableOutput = psrpc.NewErrorf(psrpc.FailedPrecondition, "output configuration does not support retry") ErrHandlerFailedToStart = psrpc.NewErrorf(psrpc.Internal, "handler failed to start") ) func PageLoadError(err string) error { err = strings.TrimPrefix(err, "page load error ") return psrpc.NewErrorf(psrpc.InvalidArgument, "page load error: %s", err) } func TemplateError(err string) error { return psrpc.NewErrorf(psrpc.InvalidArgument, "template error: %s", err) } func ErrCouldNotParseConfig(err error) error { return psrpc.NewErrorf(psrpc.InvalidArgument, "could not parse config: %v", err) } func ErrNotSupported(feature string) error { return psrpc.NewErrorf(psrpc.InvalidArgument, "%s is not yet supported", feature) } func ErrIncompatible(format, codec interface{}) error { return psrpc.NewErrorf(psrpc.InvalidArgument, "format %v incompatible with codec %v", format, codec) } func ErrInvalidInput(field string) error { return psrpc.NewErrorf(psrpc.InvalidArgument, "request has missing or invalid field: %s", field) } func ErrInvalidUrl(url string, reason string) error { return psrpc.NewErrorf(psrpc.InvalidArgument, "invalid url %s: %s", url, reason) } func ErrUploadFailed(location string, err error) error { return psrpc.NewErrorf(psrpc.InvalidArgument, "%s upload failed: %v", location, err) } func ErrParticipantNotFound(identity string) error { return psrpc.NewErrorf(psrpc.NotFound, "participant %s not found", identity) } func ErrStreamNotFound(url string) error { return psrpc.NewErrorf(psrpc.NotFound, "stream %s not found", url) } func ErrTrackNotFound(trackID string) error { return psrpc.NewErrorf(psrpc.NotFound, "track %s not found", trackID) } func ErrFeatureDisabled(feature string) error { return psrpc.NewErrorf(psrpc.PermissionDenied, "%s is disabled for this account", feature) } func ErrCPUExhausted(usage float64) error { return psrpc.NewErrorf(psrpc.PermissionDenied, "CPU exhausted: %.2f cores used", usage) } func ErrOOM(usage float64) error { return psrpc.NewErrorf(psrpc.PermissionDenied, "OOM: %.2f GB used", usage) } ================================================ FILE: pkg/gstreamer/bin.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package gstreamer import ( "fmt" "slices" "sync" "time" "github.com/go-gst/go-glib/glib" "github.com/go-gst/go-gst/gst" "github.com/linkdata/deadlock" "go.uber.org/atomic" "github.com/livekit/egress/pkg/errors" "github.com/livekit/protocol/logger" ) const ( removeSourceBinTimeout = 3 * time.Second ) // Locking rules for Bin/StateManager (maintainer reference): // 1. if both state and bin data are needed, take StateManager lock first // (LockState/LockStateShared), then Bin.mu. // 2. for multi-bin operations, take the "owner"/parent bin mutex before peer // bin mutexes (for example: b.mu -> src.mu -> sink.mu). // 3. do not introduce paths that acquire locks in the reverse order // (peer/child -> parent), or AB-BA deadlocks are possible. // 4. for work executed later on the GLib loop (IdleAdd callbacks), snapshot // fields while holding the lock that protects them: // - State under StateManager lock. // - Bin fields (`srcs`, `sinks`, `elements`, `pads`, etc.) under `b.mu`. // Then unlock before scheduling the callback. Avoid holding these locks // while waiting for the callback to run on the GLib loop. // Exception: ForceRemoveSourceBin intentionally holds a shared state lock // across the wait to prevent state-mutation races from concurrent Stop/ // state-transition calls while forced detach is in progress. // // Bin is designed to hold a single stream, with any number of sources and sinks type Bin struct { *Callbacks *StateManager pipeline *gst.Pipeline mu deadlock.Mutex bin *gst.Bin latency time.Duration linkFunc func([]*gst.Element) error shouldLink func(string) bool eosFunc func() bool getSrcPad func(string) *gst.Pad getSinkPad func(string) *gst.Pad added bool srcs []*Bin // source bins elements []*gst.Element // elements within this bin queues map[string]*gst.Element // used with BinTypeMultiStream pads map[string]*gst.GhostPad // ghost pads by bin name eosSeen map[string]*atomic.Bool // downstream EOS seen per peer bin name sinks []*Bin // sink bins } func (b *Bin) NewBin(name string) *Bin { return &Bin{ Callbacks: b.Callbacks, StateManager: b.StateManager, pipeline: b.pipeline, bin: gst.NewBin(name), pads: make(map[string]*gst.GhostPad), eosSeen: make(map[string]*atomic.Bool), } } func (b *Bin) GetName() string { return b.bin.GetName() } // AddSourceBin - adds src as a source of b. This should only be called once for each source bin func (b *Bin) AddSourceBin(src *Bin) error { logger.Debugw(fmt.Sprintf("adding src %s to %s", src.bin.GetName(), b.bin.GetName())) return b.addBin(src, gst.PadDirectionSource) } // AddSinkBin - adds sink as a sink of b. This should only be called once for each sink bin func (b *Bin) AddSinkBin(sink *Bin) error { logger.Debugw(fmt.Sprintf("adding sink %s to %s", sink.bin.GetName(), b.bin.GetName())) return b.addBin(sink, gst.PadDirectionSink) } func (b *Bin) addBin(bin *Bin, direction gst.PadDirection) error { bin.mu.Lock() alreadyAdded := bin.added bin.added = true bin.mu.Unlock() if alreadyAdded { return errors.ErrBinAlreadyAdded } b.LockStateShared() defer b.UnlockStateShared() state := b.GetStateLocked() if state > StateRunning { return nil } b.mu.Lock() defer b.mu.Unlock() if direction == gst.PadDirectionSource { b.srcs = append(b.srcs, bin) } else { b.sinks = append(b.sinks, bin) } if err := b.pipeline.Add(bin.bin.Element); err != nil { return errors.ErrGstPipelineError(err) } if state == StateBuilding { return nil } if err := bin.link(); err != nil { return err } var err error bin.mu.Lock() if direction == gst.PadDirectionSource { err = linkPeersLocked(bin, b) } else { err = linkPeersLocked(b, bin) } bin.mu.Unlock() if err != nil { return err } return nil } // AddElement - adds element to the bin. Elements will be linked in the order they are added func (b *Bin) AddElement(e *gst.Element) error { b.mu.Lock() defer b.mu.Unlock() b.elements = append(b.elements, e) if err := b.bin.Add(e); err != nil { return errors.ErrGstPipelineError(err) } return nil } // AddElements - adds elements to the bin. Elements will be linked in the order they are added func (b *Bin) AddElements(elements ...*gst.Element) error { b.mu.Lock() defer b.mu.Unlock() b.elements = append(b.elements, elements...) if err := b.bin.AddMany(elements...); err != nil { return errors.ErrGstPipelineError(err) } return nil } // ForceRemoveSourceBin synchronously removes a source bin without waiting for EOS. // This is used for FlowFlushing recovery where EOS will never propagate from a stuck appsrc. // The removal runs on the GLib main loop thread via glib.IdleAdd and blocks until complete. func (b *Bin) ForceRemoveSourceBin(name string) error { logger.Infow("force removing source bin", "src", name, "from", b.bin.GetName()) b.LockStateShared() defer b.UnlockStateShared() state := b.GetStateLocked() if state > StateRunning { return nil } b.mu.Lock() idx := slices.IndexFunc(b.srcs, func(s *Bin) bool { return s.bin.GetName() == name }) if idx == -1 { b.mu.Unlock() return nil } src := b.srcs[idx] src.mu.Lock() srcGhostPad, sinkGhostPad, ok := deleteGhostPadsLocked(src, b) src.mu.Unlock() if !ok { b.mu.Unlock() return errors.New("ghost pads not found for force removal") } // Now safe to remove from the tracking slice b.srcs = slices.Delete(b.srcs, idx, idx+1) // Capture references before releasing the lock. // These fields are set during construction and never modified, so safe to use after unlock. peerElement := b.elements[0] parentBin := b.bin pipeline := b.pipeline b.mu.Unlock() // Execute removal synchronously on the GLib main loop thread done := make(chan error, 1) if _, err := glib.IdleAdd(func() bool { logger.Debugw("force removing source bin on GLib thread", "bin", src.bin.GetName()) done <- detachSourceBin(src, srcGhostPad, sinkGhostPad, peerElement, parentBin, pipeline) return false }); err != nil { return errors.ErrGstPipelineError(err) } return <-done } func (b *Bin) RemoveSourceBin(name string) error { logger.Debugw(fmt.Sprintf("removing src %s from %s", name, b.bin.GetName())) return b.removeBin(name, gst.PadDirectionSource) } func (b *Bin) RemoveSinkBin(name string) error { logger.Debugw(fmt.Sprintf("removing sink %s from %s", name, b.bin.GetName())) return b.removeBin(name, gst.PadDirectionSink) } func (b *Bin) removeSourceLocked(name string) *Bin { for i, s := range b.srcs { if s.bin.GetName() == name { b.srcs = append(b.srcs[:i], b.srcs[i+1:]...) return s } } return nil } func (b *Bin) removeBin(name string, direction gst.PadDirection) error { b.LockStateShared() defer b.UnlockStateShared() state := b.GetStateLocked() if state > StateRunning { return nil } b.mu.Lock() defer b.mu.Unlock() var bin *Bin if direction == gst.PadDirectionSource { bin = b.removeSourceLocked(name) } else { for i, s := range b.sinks { if s.bin.GetName() == name { bin = s b.sinks = append(b.sinks[:i], b.sinks[i+1:]...) break } } } if bin == nil { return nil } if state == StateBuilding { if err := b.pipeline.Remove(bin.bin.Element); err != nil { return errors.ErrGstPipelineError(err) } return nil } if direction == gst.PadDirectionSource { b.probeRemoveSource(bin) } else { b.probeRemoveSink(bin) } return nil } func (b *Bin) probeRemoveSource(src *Bin) { src.mu.Lock() srcGhostPad, sinkGhostPad, ok := deleteGhostPadsLocked(src, b) src.mu.Unlock() if !ok { return } var removed atomic.Bool var removalScheduled atomic.Bool srcPad := srcGhostPad.GetTarget() sinkPad := sinkGhostPad.GetTarget() var eosSeen *atomic.Bool src.mu.Lock() if seen, ok := src.eosSeen[b.bin.GetName()]; ok { eosSeen = seen } src.mu.Unlock() scheduleRemoval := func(reason string) { if !removalScheduled.CompareAndSwap(false, true) { return } if _, err := glib.IdleAdd(func() bool { removed.Store(true) logger.Debugw("removing source bin", "bin", src.bin.GetName(), "reason", reason) if err := detachSourceBin(src, srcGhostPad, sinkGhostPad, b.elements[0], b.bin, b.pipeline); err != nil { logger.Errorw("failed to detach source bin", err, "bin", src.bin.GetName()) } return false }); err != nil { logger.Errorw("failed to schedule source bin removal", err, "bin", src.bin.GetName()) } } probe := func(_ *gst.Pad, info *gst.PadProbeInfo) gst.PadProbeReturn { if removed.Load() { return gst.PadProbeRemove } if info.Type()&gst.PadProbeTypeEventDownstream != 0 { if event := info.GetEvent(); event != nil && event.Type() == gst.EventTypeEOS { logger.Debugw("received EOS", "bin", src.bin.GetName()) if eosSeen != nil { eosSeen.Store(true) } scheduleRemoval("eos") } } return gst.PadProbeOK } srcPad.AddProbe(gst.PadProbeTypeEventDownstream, probe) sinkPad.AddProbe(gst.PadProbeTypeEventDownstream, probe) if eosSeen != nil && eosSeen.Load() { logger.Debugw("eos already seen, removing source bin", "bin", src.bin.GetName(), "reason", "eos-seen-after-probe") scheduleRemoval("eos-seen-after-probe") return } time.AfterFunc(removeSourceBinTimeout, func() { if removalScheduled.Load() { return } logger.Warnw("timeout waiting for EOS before removing source bin", nil, "bin", src.bin.GetName()) scheduleRemoval("timeout") }) } func (b *Bin) probeRemoveSink(sink *Bin) { sink.mu.Lock() srcGhostPad, sinkGhostPad, ok := deleteGhostPadsLocked(b, sink) sink.mu.Unlock() if !ok { return } srcGhostPad.AddProbe(gst.PadProbeTypeAllBoth, func(_ *gst.Pad, _ *gst.PadProbeInfo) gst.PadProbeReturn { srcGhostPad.Unlink(sinkGhostPad.Pad) sinkGhostPad.SendEvent(gst.NewEOSEvent()) b.mu.Lock() err := b.pipeline.Remove(sink.bin.Element) b.mu.Unlock() if err != nil { b.OnError(errors.ErrGstPipelineError(err)) return gst.PadProbeRemove } if err = sink.SetState(gst.StateNull); err != nil { logger.Warnw(fmt.Sprintf("failed to change %s state", sink.bin.GetName()), err) } b.elements[len(b.elements)-1].ReleaseRequestPad(srcGhostPad.GetTarget()) b.bin.RemovePad(srcGhostPad.Pad) return gst.PadProbeOK }) } // detachSourceBin performs the GStreamer operations to disconnect and remove a source bin. // Must be called on the GLib main loop thread. func detachSourceBin(src *Bin, srcGhostPad, sinkGhostPad *gst.GhostPad, peerElement *gst.Element, parentBin *gst.Bin, pipeline *gst.Pipeline) error { sinkPad := sinkGhostPad.GetTarget() peerElement.ReleaseRequestPad(sinkPad) srcGhostPad.Unlink(sinkGhostPad.Pad) parentBin.RemovePad(sinkGhostPad.Pad) if err := pipeline.Remove(src.bin.Element); err != nil { logger.Warnw("failed to remove bin", err, "bin", src.bin.GetName()) return errors.ErrGstPipelineError(err) } if err := src.bin.SetState(gst.StateNull); err != nil { logger.Warnw("failed to change bin state", err, "bin", src.bin.GetName()) return errors.ErrGstPipelineError(err) } return nil } func deleteGhostPadsLocked(src, sink *Bin) (*gst.GhostPad, *gst.GhostPad, bool) { srcPad, srcOK := src.pads[sink.bin.GetName()] if !srcOK { logger.Errorw("source pad missing", nil, "bin", src.bin.GetName()) } delete(src.pads, sink.bin.GetName()) // keep eosSeen so probeRemoveSource can still detect prior EOS when called after pad deletion sinkPad, sinkOK := sink.pads[src.bin.GetName()] if !sinkOK { logger.Errorw("sink pad missing", nil, "bin", sink.bin.GetName()) } delete(sink.pads, src.bin.GetName()) return srcPad, sinkPad, srcOK && sinkOK } func (b *Bin) SetState(state gst.State) error { stateErr := make(chan error, 1) go func() { stateErr <- b.bin.SetState(state) }() select { case <-time.After(stateChangeTimeout): return errors.ErrPipelineFrozen case err := <-stateErr: if err != nil { return errors.ErrGstPipelineError(err) } } return nil } // SetLinkFunc - sets a custom linking function for this bin's elements (used when you need to modify chain functions) func (b *Bin) SetLinkFunc(f func([]*gst.Element) error) { b.mu.Lock() defer b.mu.Unlock() b.linkFunc = f } func (b *Bin) SetShouldLink(f func(string) bool) { b.mu.Lock() defer b.mu.Unlock() b.shouldLink = f } // SetGetSrcPad - sets a custom linking function which returns a pad for the named src bin func (b *Bin) SetGetSrcPad(f func(srcName string) *gst.Pad) { b.mu.Lock() defer b.mu.Unlock() b.getSrcPad = f } // SetGetSinkPad - sets a custom linking function which returns a pad for the named sink bin func (b *Bin) SetGetSinkPad(f func(sinkName string) *gst.Pad) { b.mu.Lock() defer b.mu.Unlock() b.getSinkPad = f } // SetEOSFunc - sets a custom EOS function (used for appsrc, input-selector). If it returns true, EOS will also be sent to src bins func (b *Bin) SetEOSFunc(f func() bool) { b.mu.Lock() defer b.mu.Unlock() b.eosFunc = f } func (b *Bin) sendEOS() { b.mu.Lock() eosFunc := b.eosFunc srcs := b.srcs b.mu.Unlock() if eosFunc != nil && !eosFunc() { return } if len(srcs) > 0 { var wg sync.WaitGroup wg.Add(len(b.srcs)) for _, src := range srcs { go func(s *Bin) { s.sendEOS() wg.Done() }(src) } wg.Wait() } else if len(b.elements) > 0 { b.bin.SendEvent(gst.NewEOSEvent()) } } // AddOnEOSReceived adds a callback to be called when EOS is received on every pad of the last element in the bin func (b *Bin) AddOnEOSReceived(f func()) error { b.mu.Lock() defer b.mu.Unlock() if len(b.elements) == 0 { return nil } sink := b.elements[len(b.elements)-1] sinkPads, err := sink.GetSinkPads() if err != nil { return err } var expecting atomic.Int32 expecting.Add(int32(len(sinkPads))) for _, sinkPad := range sinkPads { sinkPad.AddProbe(gst.PadProbeTypeEventDownstream, func(_ *gst.Pad, info *gst.PadProbeInfo) gst.PadProbeReturn { if event := info.GetEvent(); event != nil && event.Type() == gst.EventTypeEOS { if expecting.Dec() == 0 { f() } return gst.PadProbeRemove } return gst.PadProbeOK }) } return nil } // ----- Internal ----- func (b *Bin) link() error { b.mu.Lock() defer b.mu.Unlock() for _, src := range b.srcs { if err := src.link(); err != nil { return err } } for _, sink := range b.sinks { if err := sink.link(); err != nil { return err } } if len(b.elements) > 0 { if b.linkFunc != nil { if err := b.linkFunc(b.elements); err != nil { return err } } else { // link elements if err := gst.ElementLinkMany(b.elements...); err != nil { return errors.ErrGstPipelineError(err) } } for _, src := range getPeerSrcs(b.srcs) { src.mu.Lock() err := linkPeersLocked(src, b) src.mu.Unlock() if err != nil { return err } } for _, sink := range getPeerSinks(b.sinks) { sink.mu.Lock() err := linkPeersLocked(b, sink) sink.mu.Unlock() if err != nil { return err } } } else { // link src bins to sink bins srcs := getPeerSrcs(b.srcs) sinks := getPeerSinks(b.sinks) addQueues := len(sinks) > 1 for _, src := range srcs { src.mu.Lock() for _, sink := range sinks { sink.mu.Lock() var err error if addQueues { err = b.queueLinkPeersLocked(src, sink) } else { err = linkPeersLocked(src, sink) } sink.mu.Unlock() if err != nil { src.mu.Unlock() return err } } src.mu.Unlock() } } return nil } func linkPeersLocked(src, sink *Bin) error { srcPad, sinkPad, err := createGhostPadsLocked(src, sink, nil) if err != nil { return err } srcState := src.bin.GetCurrentState() sinkState := sink.bin.GetCurrentState() if srcState != sinkState { if srcState == gst.StateNull { srcPad.AddProbe(gst.PadProbeTypeBlockDownstream, func(_ *gst.Pad, _ *gst.PadProbeInfo) gst.PadProbeReturn { if padReturn := srcPad.Link(sinkPad.Pad); padReturn != gst.PadLinkOK { logger.Errorw("failed to link", errors.ErrPadLinkFailed(src.bin.GetName(), sink.bin.GetName(), padReturn.String())) } return gst.PadProbeRemove }) return src.SetState(gst.StatePlaying) } if sinkState == gst.StateNull { srcPad.AddProbe(gst.PadProbeTypeBlockDownstream, func(_ *gst.Pad, _ *gst.PadProbeInfo) gst.PadProbeReturn { if err = sink.SetState(gst.StatePlaying); err != nil { src.OnError(errors.ErrGstPipelineError(err)) return gst.PadProbeHandled } return gst.PadProbeRemove }) } } if padReturn := srcPad.Link(sinkPad.Pad); padReturn != gst.PadLinkOK { return errors.ErrPadLinkFailed(src.bin.GetName(), sink.bin.GetName(), padReturn.String()) } return nil } func (b *Bin) queueLinkPeersLocked(src, sink *Bin) error { srcName := src.bin.GetName() sinkName := sink.bin.GetName() if (src.shouldLink != nil && !src.shouldLink(sinkName)) || (sink.shouldLink != nil && !sink.shouldLink(srcName)) { return nil } queueName := fmt.Sprintf("%s_%s_queue", srcName, sinkName) queue, err := BuildQueue(queueName, b.latency, true) if err != nil { return err } b.queues[queueName] = queue if err = sink.bin.Add(queue); err != nil { return err } srcPad, sinkPad, err := createGhostPadsLocked(src, sink, queue) if err != nil { return err } if padReturn := srcPad.Link(sinkPad.Pad); padReturn != gst.PadLinkOK { return errors.ErrPadLinkFailed(srcName, queueName, padReturn.String()) } return nil } func getPeerSrcs(srcs []*Bin) []*Bin { flattened := make([]*Bin, 0, len(srcs)) for _, src := range srcs { if len(src.elements) > 0 { flattened = append(flattened, src) } else { flattened = append(flattened, getPeerSrcs(src.srcs)...) } } return flattened } func getPeerSinks(sinks []*Bin) []*Bin { flattened := make([]*Bin, 0, len(sinks)) for _, sink := range sinks { if len(sink.elements) > 0 { flattened = append(flattened, sink) } else { flattened = append(flattened, getPeerSinks(sink.sinks)...) } } return flattened } ================================================ FILE: pkg/gstreamer/builder.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package gstreamer import ( "time" "github.com/go-gst/go-gst/gst" "github.com/livekit/egress/pkg/errors" ) func BuildQueue(name string, latency time.Duration, leaky bool) (*gst.Element, error) { queue, err := gst.NewElementWithName("queue", name) if err != nil { return nil, errors.ErrGstPipelineError(err) } if latency > 0 { if err = queue.SetProperty("max-size-time", uint64(latency)); err != nil { return nil, errors.ErrGstPipelineError(err) } if err = queue.SetProperty("max-size-bytes", uint(0)); err != nil { return nil, errors.ErrGstPipelineError(err) } if err = queue.SetProperty("max-size-buffers", uint(0)); err != nil { return nil, errors.ErrGstPipelineError(err) } } if leaky { queue.SetArg("leaky", "downstream") NewLeakyQueueMonitor(name, queue) } return queue, nil } func BuildAudioRate(name string, tolerance time.Duration) (*gst.Element, error) { audioRate, err := gst.NewElementWithName("audiorate", name) if err != nil { return nil, errors.ErrGstPipelineError(err) } if err = audioRate.SetProperty("skip-to-first", true); err != nil { return nil, errors.ErrGstPipelineError(err) } if err = audioRate.SetProperty("tolerance", uint64(tolerance)); err != nil { return nil, errors.ErrGstPipelineError(err) } return audioRate, nil } ================================================ FILE: pkg/gstreamer/callbacks.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package gstreamer import ( "github.com/frostbyte73/core" "github.com/linkdata/deadlock" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/errors" ) type Callbacks struct { mu deadlock.RWMutex GstReady chan struct{} BuildReady chan struct{} // upstream callbacks onError func(error) onStop []func() error onDebugDotRequest func(string) // source callbacks onTrackAdded []func(*config.TrackSource) onTrackMuted []func(string) onTrackUnmuted []func(string) onTrackRemoved []func(string) onSourceBinReset []func(*config.TrackSource) error onEOSSent func() pipelinePaused core.Fuse } func (c *Callbacks) SetOnError(f func(error)) { c.mu.Lock() c.onError = f c.mu.Unlock() } func (c *Callbacks) OnError(err error) { c.mu.RLock() onError := c.onError c.mu.RUnlock() if onError != nil { onError(err) } } func (c *Callbacks) SetOnDebugDotRequest(f func(string)) { c.mu.Lock() c.onDebugDotRequest = f c.mu.Unlock() } func (c *Callbacks) OnDebugDotRequest(reason string) { c.mu.RLock() onDebugDotRequest := c.onDebugDotRequest c.mu.RUnlock() if onDebugDotRequest != nil { onDebugDotRequest(reason) } } func (c *Callbacks) PipelinePaused() <-chan struct{} { return c.pipelinePaused.Watch() } func (c *Callbacks) OnPipelinePaused() { c.pipelinePaused.Break() } func (c *Callbacks) AddOnStop(f func() error) { c.mu.Lock() c.onStop = append(c.onStop, f) c.mu.Unlock() } func (c *Callbacks) OnStop() error { c.mu.RLock() onStop := c.onStop c.mu.RUnlock() errArray := &errors.ErrArray{} for _, f := range onStop { errArray.Check(f()) } return errArray.ToError() } func (c *Callbacks) AddOnTrackAdded(f func(*config.TrackSource)) { c.mu.Lock() c.onTrackAdded = append(c.onTrackAdded, f) c.mu.Unlock() } func (c *Callbacks) OnTrackAdded(ts *config.TrackSource) { c.mu.RLock() onTrackAdded := c.onTrackAdded c.mu.RUnlock() for _, f := range onTrackAdded { f(ts) } } func (c *Callbacks) AddOnTrackMuted(f func(string)) { c.mu.Lock() c.onTrackMuted = append(c.onTrackMuted, f) c.mu.Unlock() } func (c *Callbacks) OnTrackMuted(trackID string) { c.mu.RLock() onTrackMuted := c.onTrackMuted c.mu.RUnlock() for _, f := range onTrackMuted { f(trackID) } } func (c *Callbacks) AddOnTrackUnmuted(f func(string)) { c.mu.Lock() c.onTrackUnmuted = append(c.onTrackUnmuted, f) c.mu.Unlock() } func (c *Callbacks) OnTrackUnmuted(trackID string) { c.mu.RLock() onTrackUnmuted := c.onTrackUnmuted c.mu.RUnlock() for _, f := range onTrackUnmuted { f(trackID) } } func (c *Callbacks) AddOnTrackRemoved(f func(string)) { c.mu.Lock() c.onTrackRemoved = append(c.onTrackRemoved, f) c.mu.Unlock() } func (c *Callbacks) OnTrackRemoved(trackID string) { c.mu.RLock() onTrackRemoved := c.onTrackRemoved c.mu.RUnlock() for _, f := range onTrackRemoved { f(trackID) } } func (c *Callbacks) AddOnSourceBinReset(f func(*config.TrackSource) error) { c.mu.Lock() c.onSourceBinReset = append(c.onSourceBinReset, f) c.mu.Unlock() } // OnSourceBinReset calls registered handlers to force-remove a stuck source bin and // replace it with a new one. Each handler checks the track kind and returns nil if // not applicable. The first handler that returns a non-nil error aborts the operation. // On success, ts.AppSrc is updated to the new appsrc by the handler. func (c *Callbacks) OnSourceBinReset(ts *config.TrackSource) error { c.mu.RLock() handlers := c.onSourceBinReset c.mu.RUnlock() for _, f := range handlers { if err := f(ts); err != nil { return err } } return nil } func (c *Callbacks) SetOnEOSSent(f func()) { c.mu.Lock() c.onEOSSent = f c.mu.Unlock() } func (c *Callbacks) OnEOSSent() { c.mu.RLock() onEOSSent := c.onEOSSent c.mu.RUnlock() if onEOSSent != nil { onEOSSent() } } ================================================ FILE: pkg/gstreamer/pads.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package gstreamer import ( "fmt" "strings" "github.com/go-gst/go-gst/gst" "go.uber.org/atomic" "github.com/livekit/egress/pkg/errors" "github.com/livekit/protocol/logger" ) type padTemplate struct { element *gst.Element template *gst.PadTemplate capsNames map[string]struct{} dataTypes map[string]struct{} } func (p *padTemplate) toPad() *gst.Pad { if p.template.Presence() == gst.PadPresenceAlways { return p.element.GetStaticPad(p.template.Name()) } return p.element.GetRequestPad(p.template.Name()) } func (p *padTemplate) findDirectMatch(others []*padTemplate) *padTemplate { for _, other := range others { for capsName := range p.capsNames { if _, ok := other.capsNames[capsName]; ok { return other } } for dataType := range p.dataTypes { if _, ok := other.dataTypes[dataType]; ok { return other } } } return nil } func (p *padTemplate) findAnyMatch(others []*padTemplate) *padTemplate { for _, other := range others { if _, ok := p.dataTypes["ANY"]; ok { return other } if _, ok := other.dataTypes["ANY"]; ok { return other } } return nil } func createGhostPadsLocked(src, sink *Bin, queue *gst.Element) (*gst.GhostPad, *gst.GhostPad, error) { srcName := src.bin.GetName() sinkName := sink.bin.GetName() srcPad, sinkPad, err := matchPadsLocked(src, sink) if err != nil { return nil, nil, err } eosSeen := &atomic.Bool{} src.eosSeen[sinkName] = eosSeen srcPad.AddProbe(gst.PadProbeTypeEventDownstream, func(_ *gst.Pad, info *gst.PadProbeInfo) gst.PadProbeReturn { if event := info.GetEvent(); event != nil && event.Type() == gst.EventTypeEOS { eosSeen.Store(true) } return gst.PadProbeOK }) srcGhostPad := gst.NewGhostPad(fmt.Sprintf("%s_%s_sink", srcName, sinkName), srcPad) src.pads[sinkName] = srcGhostPad src.bin.AddPad(srcGhostPad.Pad) if queue != nil { if padReturn := queue.GetStaticPad("src").Link(sinkPad); padReturn != gst.PadLinkOK { return nil, nil, errors.ErrPadLinkFailed(queue.GetName(), sinkName, padReturn.String()) } sinkGhostPad := gst.NewGhostPad(fmt.Sprintf("%s_%s_src", srcName, sinkName), queue.GetStaticPad("sink")) sink.pads[srcName] = sinkGhostPad sink.bin.AddPad(sinkGhostPad.Pad) return srcGhostPad, sinkGhostPad, nil } sinkGhostPad := gst.NewGhostPad(fmt.Sprintf("%s_%s_src", srcName, sinkName), sinkPad) sink.pads[srcName] = sinkGhostPad sink.bin.AddPad(sinkGhostPad.Pad) return srcGhostPad, sinkGhostPad, nil } func matchPadsLocked(src, sink *Bin) (*gst.Pad, *gst.Pad, error) { var srcPad, sinkPad *gst.Pad var srcTemplates, sinkTemplates []*padTemplate if src.getSinkPad != nil { srcPad = src.getSinkPad(sink.bin.GetName()) } else { srcTemplates = src.getPadTemplatesLocked(gst.PadDirectionSource) } if sink.getSrcPad != nil { sinkPad = sink.getSrcPad(src.bin.GetName()) } else { sinkTemplates = sink.getPadTemplatesLocked(gst.PadDirectionSink) } switch { case srcPad != nil && sinkPad != nil: return srcPad, sinkPad, nil case srcPad != nil && len(sinkTemplates) == 1: return srcPad, sinkTemplates[0].toPad(), nil case sinkPad != nil && len(srcTemplates) == 1: return srcTemplates[0].toPad(), sinkPad, nil case len(srcTemplates) > 0 && len(sinkTemplates) > 0: for _, srcTemplate := range srcTemplates { if sinkTemplate := srcTemplate.findDirectMatch(sinkTemplates); sinkTemplate != nil { return srcTemplate.toPad(), sinkTemplate.toPad(), nil } } for _, srcTemplate := range srcTemplates { if sinkTemplate := srcTemplate.findAnyMatch(sinkTemplates); sinkTemplate != nil { return srcTemplate.toPad(), sinkTemplate.toPad(), nil } } } logger.Warnw("could not match pads", nil, "src", src.bin.GetName(), "sink", sink.bin.GetName(), "srcTemplates", srcTemplates, "sinkTemplates", sinkTemplates) return nil, nil, errors.ErrGhostPadFailed } func (b *Bin) getPadTemplatesLocked(direction gst.PadDirection) []*padTemplate { var element *gst.Element if direction == gst.PadDirectionSource { element = b.elements[len(b.elements)-1] } else { element = b.elements[0] } allTemplates := element.GetPadTemplates() templates := make([]*padTemplate, 0) for _, template := range allTemplates { if template.Direction() == direction { t := &padTemplate{ element: element, template: template, capsNames: make(map[string]struct{}), dataTypes: make(map[string]struct{}), } caps := template.Caps() if caps.IsAny() { if strings.HasPrefix(template.Name(), direction.String()) { // src/src_%u/sink/sink_%u pad capsNames, dataTypes, ok := b.getTypesLocked(direction) if ok { t.capsNames = capsNames t.dataTypes = dataTypes } else { t.dataTypes["ANY"] = struct{}{} } } else { // audio/audio_%u/video/video_%u pad dataType := template.Name() dataType = strings.TrimSuffix(dataType, "_%u") t.dataTypes[dataType] = struct{}{} } } else { // pad has caps splitCaps := strings.Split(caps.String(), "; ") for _, c := range splitCaps { capsName := strings.SplitN(c, ",", 2)[0] t.capsNames[capsName] = struct{}{} t.dataTypes[strings.Split(capsName, "/")[0]] = struct{}{} } } templates = append(templates, t) } } return templates } func (b *Bin) getTypesLocked(direction gst.PadDirection) (map[string]struct{}, map[string]struct{}, bool) { var i int if direction == gst.PadDirectionSource { i = len(b.elements) - 1 } for i >= 0 && i < len(b.elements) { allTemplates := b.elements[i].GetPadTemplates() for _, template := range allTemplates { if template.Direction() == gst.PadDirectionSource { if caps := template.Caps(); !caps.IsAny() { capsNames := make(map[string]struct{}) dataTypes := make(map[string]struct{}) splitCaps := strings.Split(caps.String(), ";") for _, c := range splitCaps { capsName := strings.SplitN(c, ",", 2)[0] capsNames[capsName] = struct{}{} dataTypes[strings.Split(capsName, "/")[0]] = struct{}{} } return capsNames, dataTypes, true } } } if direction == gst.PadDirectionSource { i-- } else { i++ } } if direction == gst.PadDirectionSource { for _, src := range b.srcs { src.mu.Lock() capsNames, dataTypes, ok := src.getTypesLocked(direction) src.mu.Unlock() if ok { return capsNames, dataTypes, true } } } else { for _, sink := range b.sinks { sink.mu.Lock() capsNames, dataTypes, ok := sink.getTypesLocked(direction) sink.mu.Unlock() if ok { return capsNames, dataTypes, true } } } return nil, nil, false } ================================================ FILE: pkg/gstreamer/pipeline.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package gstreamer import ( "math" "time" "github.com/go-gst/go-glib/glib" "github.com/go-gst/go-gst/gst" "github.com/livekit/egress/pkg/errors" "github.com/livekit/protocol/logger" ) const ( stateChangeTimeout = time.Second * 15 ) type Pipeline struct { *Bin loop *glib.MainLoop binsAdded bool elementsAdded bool } // A pipeline can have either elements or src and sink bins. If you add both you will get a wrong hierarchy error // Bins can contain both elements and src and sink bins func NewPipeline(name string, latency time.Duration, callbacks *Callbacks) (*Pipeline, error) { pipeline, err := gst.NewPipeline(name) if err != nil { return nil, err } return &Pipeline{ Bin: &Bin{ Callbacks: callbacks, StateManager: &StateManager{}, pipeline: pipeline, bin: pipeline.Bin, latency: latency, queues: make(map[string]*gst.Element), }, loop: glib.NewMainLoop(glib.MainContextDefault(), false), }, nil } func (p *Pipeline) AddSourceBin(src *Bin) error { if p.elementsAdded { return errors.ErrWrongHierarchy } p.binsAdded = true return p.Bin.AddSourceBin(src) } func (p *Pipeline) AddSinkBin(sink *Bin) error { if p.elementsAdded { return errors.ErrWrongHierarchy } p.binsAdded = true return p.Bin.AddSinkBin(sink) } func (p *Pipeline) AddElement(e *gst.Element) error { if p.binsAdded { return errors.ErrWrongHierarchy } p.elementsAdded = true return p.Bin.AddElement(e) } func (p *Pipeline) AddElements(elements ...*gst.Element) error { if p.binsAdded { return errors.ErrWrongHierarchy } p.elementsAdded = true return p.Bin.AddElements(elements...) } func (p *Pipeline) Link() error { return p.link() } func (p *Pipeline) SetWatch(watch func(msg *gst.Message) bool) { p.pipeline.GetPipelineBus().AddWatch(watch) } func (p *Pipeline) SetState(state gst.State) error { p.mu.Lock() defer p.mu.Unlock() stateErr := make(chan error, 1) go func() { stateErr <- p.pipeline.SetState(state) }() select { case <-time.After(stateChangeTimeout): return errors.ErrPipelineFrozen case err := <-stateErr: if err != nil { return errors.ErrGstPipelineError(err) } } return nil } func (p *Pipeline) Run() error { if err := p.SetState(gst.StatePlaying); err != nil { return err } if _, ok := p.UpgradeState(StateRunning); ok { p.loop.Run() } return nil } func (p *Pipeline) SendEOS() { old, ok := p.UpgradeState(StateEOS) if ok { if old >= StateRunning { p.sendEOS() } else { p.Stop() } } } func (p *Pipeline) Stop() { logger.Debugw("stopping pipeline") old, ok := p.UpgradeState(StateStopping) if !ok { return } if err := p.OnStop(); err != nil { p.OnError(err) } if err := p.SetState(gst.StateNull); err != nil { logger.Errorw("failed to set pipeline to null", err) } if old >= StateRunning { p.loop.Quit() } p.UpgradeState(StateFinished) } func (p *Pipeline) DebugBinToDotData(details gst.DebugGraphDetails) string { return p.pipeline.DebugBinToDotData(details) } // RunningTime returns the running time of the gst pipeline func (p *Pipeline) RunningTime() (time.Duration, bool) { clock := p.pipeline.GetPipelineClock() if clock == nil { return 0, false } clockTime := clock.GetTime() if clockTime == gst.ClockTimeNone { return 0, false } baseTime := p.pipeline.GetBaseTime() if baseTime == gst.ClockTimeNone { return 0, false } clockValue := uint64(clockTime) baseValue := uint64(baseTime) if clockValue < baseValue { return 0, false } delta := clockValue - baseValue if delta > uint64(math.MaxInt64) { return time.Duration(math.MaxInt64), false } return time.Duration(int64(delta)), true } // PlayheadPosition returns the playhead position of the gst pipeline // It is equivalent to the last timestamp seen by a sink element func (p *Pipeline) PlayheadPosition() (time.Duration, bool) { ok, position := p.pipeline.QueryPosition(gst.FormatTime) if !ok || position < 0 { return 0, false } return time.Duration(position), true } ================================================ FILE: pkg/gstreamer/queue_monitor.go ================================================ // Copyright 2026 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package gstreamer import ( "github.com/go-gst/go-gst/gst" "go.uber.org/atomic" "github.com/livekit/protocol/logger" ) // LeakyQueueMonitor tracks buffer flow through a leaky queue to detect dropped buffers. // It uses pad probes to count buffers in and out, then calculates drops as: // dropped = inCount - outCount type LeakyQueueMonitor struct { name string queue *gst.Element inCount atomic.Uint64 outCount atomic.Uint64 eosSeen atomic.Bool } // NewLeakyQueueMonitor creates a monitor for the given queue element and attaches // pad probes to track buffer flow. func NewLeakyQueueMonitor(name string, queue *gst.Element) { m := &LeakyQueueMonitor{ name: name, queue: queue, } sinkPad := queue.GetStaticPad("sink") if sinkPad != nil { sinkPad.AddProbe(gst.PadProbeTypeBuffer, func(_ *gst.Pad, _ *gst.PadProbeInfo) gst.PadProbeReturn { m.inCount.Inc() return gst.PadProbeOK }) } else { logger.Warnw("failed to get sink pad for queue monitor", nil, "queue", name) } srcPad := queue.GetStaticPad("src") if srcPad != nil { srcPad.AddProbe(gst.PadProbeTypeBuffer, func(_ *gst.Pad, _ *gst.PadProbeInfo) gst.PadProbeReturn { m.outCount.Inc() return gst.PadProbeOK }) srcPad.AddProbe(gst.PadProbeTypeEventDownstream, func(_ *gst.Pad, info *gst.PadProbeInfo) gst.PadProbeReturn { if event := info.GetEvent(); event != nil && event.Type() == gst.EventTypeEOS { if !m.eosSeen.Swap(true) { m.postEOSStats() } return gst.PadProbeRemove } return gst.PadProbeOK }) } else { logger.Warnw("failed to get src pad for queue monitor", nil, "queue", name) } } const LeakyQueueStatsMessage = "LeakyQueueStats" func (m *LeakyQueueMonitor) postEOSStats() { if m.queue == nil { return } inCount := m.inCount.Load() outCount := m.outCount.Load() dropped := uint64(0) if outCount <= inCount { dropped = inCount - outCount } st := gst.NewStructure(LeakyQueueStatsMessage) err := st.SetValue("queue", m.name) if err != nil { logger.Debugw("failed to set queue name", err, "queue", m.name) return } err = st.SetValue("in", inCount) if err != nil { logger.Debugw("failed to set in count", err, "queue", m.name) return } err = st.SetValue("out", outCount) if err != nil { logger.Debugw("failed to set out count", err, "queue", m.name) return } err = st.SetValue("dropped", dropped) if err != nil { logger.Debugw("failed to set dropped count", err, "queue", m.name) return } msg := gst.NewElementMessage(m.queue, st) if msg == nil { logger.Debugw("failed to build leaky queue stats message", nil, "queue", m.name) return } if ok := m.queue.PostMessage(msg); !ok { logger.Debugw("failed to post leaky queue stats message", nil, "queue", m.name) } } // Name returns the name of the monitored queue func (m *LeakyQueueMonitor) Name() string { return m.name } ================================================ FILE: pkg/gstreamer/state.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package gstreamer import ( "fmt" "github.com/linkdata/deadlock" "github.com/livekit/protocol/logger" ) type State int const ( StateBuilding State = iota StateStarted StateRunning StateEOS StateStopping StateFinished ) type StateManager struct { lock deadlock.RWMutex state State } func (s *StateManager) GetState() State { s.lock.RLock() defer s.lock.RUnlock() return s.state } func (s *StateManager) GetStateLocked() State { return s.state } func (s *StateManager) LockState() { s.lock.Lock() } func (s *StateManager) UnlockState() { s.lock.Unlock() } func (s *StateManager) LockStateShared() { s.lock.RLock() } func (s *StateManager) UnlockStateShared() { s.lock.RUnlock() } func (s *StateManager) UpgradeState(state State) (State, bool) { s.lock.Lock() defer s.lock.Unlock() old := s.state if old >= state { return old, false } logger.Debugw(fmt.Sprintf("pipeline state %v -> %v", old, state)) s.state = state return old, true } func (s State) String() string { switch s { case StateBuilding: return "building" case StateStarted: return "starting" case StateRunning: return "running" case StateEOS: return "eos" case StateStopping: return "stopping" case StateFinished: return "finished" default: return "unknown" } } ================================================ FILE: pkg/gstreamer/time_provider.go ================================================ // Copyright 2025 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package gstreamer import ( "time" ) // TimeProvider supplies the running time and playhead position of a pipeline. type TimeProvider interface { RunningTime() (time.Duration, bool) PlayheadPosition() (time.Duration, bool) } var _ TimeProvider = (*nopTimeProvider)(nil) type nopTimeProvider struct{} // NopTimeProvider returns a TimeProvider that always reports unavailable times. func NopTimeProvider() TimeProvider { return &nopTimeProvider{} } func (n *nopTimeProvider) RunningTime() (time.Duration, bool) { return 0, false } func (n *nopTimeProvider) PlayheadPosition() (time.Duration, bool) { return 0, false } ================================================ FILE: pkg/handler/handler.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package handler import ( "context" "errors" "path" "strings" "time" "github.com/frostbyte73/core" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/collectors" "google.golang.org/grpc" "go.opentelemetry.io/otel" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/ipc" "github.com/livekit/egress/pkg/pipeline" "github.com/livekit/protocol/livekit" "github.com/livekit/protocol/logger" "github.com/livekit/protocol/rpc" "github.com/livekit/psrpc" ) type Handler struct { ipc.UnimplementedEgressHandlerServer conf *config.PipelineConfig controller *pipeline.Controller rpcServer rpc.EgressHandlerServer ipcHandlerServer *grpc.Server ipcServiceClient ipc.EgressServiceClient initialized core.Fuse kill core.Fuse } var ( tracer = otel.Tracer("github.com/livekit/egress/pkg/handler") ) func NewHandler(conf *config.PipelineConfig, bus psrpc.MessageBus) (*Handler, error) { // Register all GO process metrics prometheus.Unregister(collectors.NewGoCollector()) prometheus.MustRegister(collectors.NewGoCollector(collectors.WithGoCollectorRuntimeMetrics(collectors.MetricsAll))) ipcClient, err := ipc.NewServiceClient(path.Join(config.TmpDir, conf.NodeID)) if err != nil { return nil, err } conf.StorageObserver = &ipcStorageObserver{client: ipcClient} h := &Handler{ conf: conf, ipcHandlerServer: grpc.NewServer(), ipcServiceClient: ipcClient, } ipc.RegisterEgressHandlerServer(h.ipcHandlerServer, h) if err = ipc.StartHandlerListener(h.ipcHandlerServer, path.Join(config.TmpDir, conf.HandlerID)); err != nil { return nil, err } rpcServer, err := rpc.NewEgressHandlerServer(h, bus) if err != nil { return nil, err } if err = rpcServer.RegisterUpdateStreamTopic(conf.Info.EgressId); err != nil { return nil, err } if err = rpcServer.RegisterStopEgressTopic(conf.Info.EgressId); err != nil { return nil, err } if err = rpcServer.RegisterUpdateEgressTopic(conf.Info.EgressId); err != nil { return nil, err } h.rpcServer = rpcServer _, err = h.ipcServiceClient.HandlerReady(context.Background(), &ipc.HandlerReadyRequest{EgressId: conf.Info.EgressId}) if err != nil { logger.Errorw("failed to notify service", err) return nil, err } return h, nil } func (h *Handler) Run() { ctx, span := tracer.Start(context.Background(), "Handler.Run") defer span.End() defer func() { h.rpcServer.Shutdown() h.ipcHandlerServer.Stop() }() var err error egressID := h.conf.Info.EgressId if h.shouldInjectEgressFailure() { logger.Infow("injecting egress failure", "egressID", egressID) err = errors.New("test failure injection") h.conf.Info.SetFailed(err) _, err = h.ipcServiceClient.HandlerUpdate(context.Background(), h.conf.Info) if err != nil { logger.Errorw("egress update ipc call failed", err, "egressID", egressID) } return } h.controller, err = pipeline.New(context.Background(), h.conf, h.ipcServiceClient) h.initialized.Break() if err != nil { h.conf.Info.SetFailed(err) _, err = h.ipcServiceClient.HandlerUpdate(context.Background(), h.conf.Info) if err != nil { logger.Errorw("egress update ipc call failed", err, "egressID", egressID) } return } // Replay coordination: signal ready and get timing if h.conf.IsReplay() { rctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) resp, err := h.ipcServiceClient.ReplayReady(rctx, &rpc.EgressReadyRequest{ EgressId: h.conf.Info.EgressId, }) cancel() if err != nil { h.conf.Info.SetFailed(err) _, _ = h.ipcServiceClient.HandlerUpdate(context.Background(), h.conf.Info) return } h.controller.SetReplayTiming(resp.StartAt, resp.DurationMs) } // start egress res := h.controller.Run(ctx) m, err := h.GenerateMetrics(ctx) if err != nil { logger.Errorw("failed to generate handler metrics", err, "egressID", egressID) } _, err = h.ipcServiceClient.HandlerFinished(ctx, &ipc.HandlerFinishedRequest{ EgressId: egressID, Metrics: m, Info: res, }) if err != nil { logger.Errorw("egress finished ipc call failed", err, "egressID", egressID) } } func (h *Handler) Kill() { <-h.initialized.Watch() if h.controller == nil { return } h.controller.SendEOS(context.Background(), livekit.EndReasonKilled) } func (h *Handler) shouldInjectEgressFailure() bool { if h.conf.TestOverrides.FailureInjectionRoom == "" { return false } if h.conf.Info.RetryCount > 0 { return false } return strings.Contains(h.conf.Info.RoomName, h.conf.TestOverrides.FailureInjectionRoom) } type ipcStorageObserver struct { client ipc.EgressServiceClient } func (o *ipcStorageObserver) OnStorageEvent(egressID, operation, path string, size, lifetimeDays int64) { _, err := o.client.StorageEvent(context.Background(), &ipc.StorageEventRequest{ EgressId: egressID, Operation: operation, Path: path, Size: size, LifetimeDays: lifetimeDays, }) if err != nil { logger.Errorw("storage event ipc call failed", err, "egressID", egressID) } } ================================================ FILE: pkg/handler/handler_ipc.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package handler import ( "context" "strings" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" "google.golang.org/protobuf/types/known/emptypb" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/ipc" "github.com/livekit/protocol/livekit" "github.com/livekit/protocol/logger" "github.com/livekit/protocol/pprof" "github.com/livekit/psrpc" ) func (h *Handler) GetPipelineDot(ctx context.Context, _ *ipc.GstPipelineDebugDotRequest) (*ipc.GstPipelineDebugDotResponse, error) { _, span := tracer.Start(ctx, "Handler.GetPipelineDot") defer span.End() <-h.initialized.Watch() if h.controller == nil { // egress handler is shutting down on error return nil, errors.ErrEgressNotFound } r, err := h.controller.GetGstPipelineDebugDot() if err != nil { return nil, err } return &ipc.GstPipelineDebugDotResponse{ DotFile: r, }, nil } func (h *Handler) GetPProf(ctx context.Context, req *ipc.PProfRequest) (*ipc.PProfResponse, error) { ctx, span := tracer.Start(ctx, "Handler.GetPProf") defer span.End() <-h.initialized.Watch() if h.controller == nil { // egress handler is shutting down on error return nil, errors.ErrEgressNotFound } b, err := pprof.GetProfileData(ctx, req.ProfileName, int(req.Timeout), int(req.Debug)) if err != nil { return nil, err } return &ipc.PProfResponse{ PprofFile: b, }, nil } // GetMetrics implement the handler-side gathering of metrics to return over IPC func (h *Handler) GetMetrics(ctx context.Context, _ *ipc.MetricsRequest) (*ipc.MetricsResponse, error) { ctx, span := tracer.Start(ctx, "Handler.GetMetrics") defer span.End() metricsAsString, err := h.GenerateMetrics(ctx) if err != nil { return nil, err } return &ipc.MetricsResponse{ Metrics: metricsAsString, }, nil } func (h *Handler) GenerateMetrics(_ context.Context) (string, error) { metrics, err := prometheus.DefaultGatherer.Gather() if err != nil { return "", err } metricsAsString, err := renderMetrics(metrics) if err != nil { return "", err } return metricsAsString, nil } func renderMetrics(metrics []*dto.MetricFamily) (string, error) { // Create a StringWriter to render the metrics into text format writer := &strings.Builder{} totalCnt := 0 for _, metric := range metrics { // Write each metric family to text cnt, err := expfmt.MetricFamilyToText(writer, metric) if err != nil { logger.Errorw("error writing metric family", err) return "", err } totalCnt += cnt } // Get the rendered metrics as a string from the StringWriter return writer.String(), nil } func (h *Handler) KillEgress(ctx context.Context, req *ipc.KillEgressRequest) (*emptypb.Empty, error) { ctx, span := tracer.Start(ctx, "Handler.KillEgress") defer span.End() <-h.initialized.Watch() if h.controller == nil { // failed to start controller return &emptypb.Empty{}, nil } h.controller.SendEOS(ctx, livekit.EndReasonKilled) h.controller.Info.SetFailed(psrpc.NewErrorf(psrpc.PermissionDenied, "%s", req.Error)) return &emptypb.Empty{}, nil } ================================================ FILE: pkg/handler/handler_rpc.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package handler import ( "context" "github.com/livekit/egress/pkg/errors" "github.com/livekit/protocol/livekit" ) func (h *Handler) UpdateStream(ctx context.Context, req *livekit.UpdateStreamRequest) (*livekit.EgressInfo, error) { ctx, span := tracer.Start(ctx, "Handler.UpdateStream") defer span.End() <-h.initialized.Watch() if h.controller == nil { return nil, errors.ErrEgressNotFound } err := h.controller.UpdateStream(ctx, req) if err != nil { return nil, err } return h.controller.Info, nil } func (h *Handler) UpdateEgress(ctx context.Context, req *livekit.UpdateEgressRequest) (*livekit.EgressInfo, error) { ctx, span := tracer.Start(ctx, "Handler.UpdateEgress") defer span.End() <-h.initialized.Watch() if h.controller == nil { return nil, errors.ErrEgressNotFound } err := h.controller.UpdateEgress(ctx, req) if err != nil { return nil, err } return h.controller.Info, nil } func (h *Handler) StopEgress(ctx context.Context, _ *livekit.StopEgressRequest) (*livekit.EgressInfo, error) { ctx, span := tracer.Start(ctx, "Handler.StopEgress") defer span.End() <-h.initialized.Watch() if h.controller == nil { return nil, errors.ErrEgressNotFound } h.controller.SendEOS(ctx, livekit.EndReasonAPI) return h.controller.Info, nil } ================================================ FILE: pkg/info/io.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package info import ( "context" "hash/fnv" "math" "strings" "time" "github.com/frostbyte73/core" "github.com/linkdata/deadlock" "github.com/livekit/protocol/egress" "github.com/livekit/protocol/livekit" "github.com/livekit/protocol/logger" "github.com/livekit/protocol/rpc" "github.com/livekit/psrpc" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/errors" ) const ( numWorkers = 5 maxBackoff = time.Minute * 1 unhealthyShutdownWatchdogDelay = 20 * time.Second // TODO change to 10 min once we understand PSRPC failures ) type SessionReporter interface { CreateEgress(ctx context.Context, info *livekit.EgressInfo) chan error UpdateEgress(ctx context.Context, info *livekit.EgressInfo) error UpdateMetrics(ctx context.Context, req *rpc.UpdateMetricsRequest) error IsHealthy() bool SetWatchdogHandler(w func()) Drain() } type sessionReporter struct { rpc.IOInfoClient createTimeout time.Duration updateTimeout time.Duration workers []*worker healthyLock deadlock.Mutex healthy bool healthyWatchdogHandler func() healthyTimer *time.Timer draining core.Fuse done core.Fuse } type worker struct { mu deadlock.Mutex creating map[string]*update updates map[string]*update queue chan string } type update struct { ctx context.Context info *livekit.EgressInfo } func NewSessionReporter(conf *config.BaseConfig, bus psrpc.MessageBus) (SessionReporter, error) { client, err := rpc.NewIOInfoClient(bus, psrpc.WithClientSelectTimeout(conf.IOSelectionTimeout), rpc.WithClientObservability(logger.GetLogger())) if err != nil { return nil, err } c := &sessionReporter{ IOInfoClient: client, createTimeout: conf.IOCreateTimeout, updateTimeout: conf.IOUpdateTimeout, workers: make([]*worker, conf.IOWorkers), } c.healthy = true c.healthyTimer = time.AfterFunc(time.Duration(math.MaxInt64), func() { c.healthyLock.Lock() defer c.healthyLock.Unlock() logger.Errorw("io client watchdog triggered", errors.New("io client unhealthy")) if c.healthyWatchdogHandler != nil { c.healthyWatchdogHandler() } // Do not wait for the event queue to drain c.done.Break() }) for i := 0; i < conf.IOWorkers; i++ { c.workers[i] = &worker{ creating: make(map[string]*update), updates: make(map[string]*update), queue: make(chan string, 500), } go c.runWorker(c.workers[i]) } return c, nil } func (c *sessionReporter) CreateEgress(ctx context.Context, info *livekit.EgressInfo) chan error { u := &update{} w := c.getWorker(info.EgressId) w.mu.Lock() w.creating[info.EgressId] = u w.mu.Unlock() errChan := make(chan error, 1) go func() { _, err := c.IOInfoClient.CreateEgress(ctx, info, psrpc.WithRequestTimeout(c.createTimeout)) w.mu.Lock() delete(w.creating, info.EgressId) if err != nil { logger.Errorw("failed to create egress", err, "egressID", info.EgressId) delete(w.updates, info.EgressId) } else if u.info != nil { err = w.submit(u) } w.mu.Unlock() errChan <- err }() return errChan } func (c *sessionReporter) UpdateEgress(ctx context.Context, info *livekit.EgressInfo) error { ctx = context.WithoutCancel(ctx) w := c.getWorker(info.EgressId) w.mu.Lock() defer w.mu.Unlock() u := w.creating[info.EgressId] if u == nil { u = w.updates[info.EgressId] } if u != nil { u.ctx = ctx u.info = info return nil } return w.submit(&update{ ctx: ctx, info: info, }) } func (c *sessionReporter) UpdateMetrics(_ context.Context, _ *rpc.UpdateMetricsRequest) error { return nil } func (c *sessionReporter) SetWatchdogHandler(w func()) { c.healthyLock.Lock() defer c.healthyLock.Unlock() c.healthyWatchdogHandler = w } func (c *sessionReporter) IsHealthy() bool { c.healthyLock.Lock() defer c.healthyLock.Unlock() return c.healthy } func (c *sessionReporter) Drain() { c.draining.Break() <-c.done.Watch() } func (c *sessionReporter) runWorker(w *worker) { draining := c.draining.Watch() for { select { case egressID := <-w.queue: c.handleUpdate(w, egressID) case <-draining: for { select { case egressID := <-w.queue: c.handleUpdate(w, egressID) default: c.done.Break() return } } } } } func (c *sessionReporter) getWorker(egressID string) *worker { h := fnv.New32a() _, _ = h.Write([]byte(egressID)) return c.workers[int(h.Sum32())%len(c.workers)] } func (w *worker) submit(u *update) error { w.updates[u.info.EgressId] = u select { case w.queue <- u.info.EgressId: return nil default: delete(w.updates, u.info.EgressId) return errors.New("queue is full") } } func (c *sessionReporter) handleUpdate(w *worker, egressID string) { w.mu.Lock() u := w.updates[egressID] delete(w.updates, egressID) w.mu.Unlock() if u == nil { return } d := time.Millisecond * 250 for { if _, err := c.IOInfoClient.UpdateEgress(u.ctx, u.info, psrpc.WithRequestTimeout(c.updateTimeout)); err != nil { if isRetryableError(err) { if c.setHealthy(false) { logger.Warnw("io connection unhealthy", err, "egressID", u.info.EgressId) } logger.Debugw("psrpc IO request failed", "error", err, "egressID", u.info.EgressId) d = min(d*2, maxBackoff) time.Sleep(d) select { case <-u.ctx.Done(): logger.Infow("failed to update egress on expired context", "egressID", u.info.EgressId) return default: continue } } logger.Errorw("failed to update egress", err, "egressID", u.info.EgressId) return } if !c.setHealthy(true) { logger.Infow("io connection restored", "egressID", u.info.EgressId) } var typesInput any = u.info.Request if e, ok := u.info.Request.(*livekit.EgressInfo_Replay); ok { typesInput = e.Replay } requestType, outputType := egress.GetTypes(typesInput) logger.Infow(strings.ToLower(u.info.Status.String()), "egressID", u.info.EgressId, "requestType", requestType, "outputType", outputType, "error", u.info.Error, "code", u.info.ErrorCode, "details", u.info.Details, ) return } } func (c *sessionReporter) setHealthy(isHealthy bool) bool { c.healthyLock.Lock() defer c.healthyLock.Unlock() oldHealthy := c.healthy switch c.healthy { case true: if !isHealthy { c.healthyTimer.Reset(unhealthyShutdownWatchdogDelay) } case false: if isHealthy { c.healthyTimer.Reset(time.Duration(math.MaxInt64)) } } c.healthy = isHealthy return oldHealthy } func isRetryableError(err error) bool { return errors.Is(err, psrpc.ErrRequestTimedOut) || errors.Is(err, psrpc.ErrNoResponse) } ================================================ FILE: pkg/ipc/conn.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ipc import ( "net" "path" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" "github.com/livekit/protocol/logger" ) const ( network = "unix" handlerAddress = "handler_ipc.sock" serviceAddress = "service_ipc.sock" ) type EgressHandlerClientWrapper struct { EgressHandlerClient conn *grpc.ClientConn } func StartServiceListener(ipcServer *grpc.Server, serviceTmpDir string) error { listener, err := net.Listen(network, path.Join(serviceTmpDir, serviceAddress)) if err != nil { return err } go func() { if err = ipcServer.Serve(listener); err != nil { logger.Errorw("failed to start grpc handler", err) } }() return nil } func NewHandlerClient(handlerTmpDir string) (*EgressHandlerClientWrapper, error) { socketAddr := "unix://" + path.Join(handlerTmpDir, handlerAddress) conn, err := grpc.NewClient(socketAddr, grpc.WithTransportCredentials(insecure.NewCredentials()), ) if err != nil { logger.Errorw("could not dial grpc handler", err) return nil, err } return &EgressHandlerClientWrapper{EgressHandlerClient: NewEgressHandlerClient(conn), conn: conn}, nil } func StartHandlerListener(ipcServer *grpc.Server, handlerTmpDir string) error { listener, err := net.Listen(network, path.Join(handlerTmpDir, handlerAddress)) if err != nil { return err } go func() { if err = ipcServer.Serve(listener); err != nil { logger.Errorw("failed to start grpc handler", err) } }() return nil } func NewServiceClient(serviceTmpDir string) (EgressServiceClient, error) { socketAddr := "unix://" + path.Join(serviceTmpDir, serviceAddress) conn, err := grpc.NewClient(socketAddr, grpc.WithTransportCredentials(insecure.NewCredentials()), ) if err != nil { logger.Errorw("could not dial grpc handler", err) return nil, err } return NewEgressServiceClient(conn), nil } func (c EgressHandlerClientWrapper) Close() error { return c.conn.Close() } ================================================ FILE: pkg/ipc/ipc.pb.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.36.6 // protoc v6.33.0 // source: ipc.proto package ipc import ( livekit "github.com/livekit/protocol/livekit" rpc "github.com/livekit/protocol/rpc" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" emptypb "google.golang.org/protobuf/types/known/emptypb" reflect "reflect" sync "sync" unsafe "unsafe" ) const ( // Verify that this generated code is sufficiently up-to-date. _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) // Verify that runtime/protoimpl is sufficiently up-to-date. _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) type HandlerReadyRequest struct { state protoimpl.MessageState `protogen:"open.v1"` EgressId string `protobuf:"bytes,1,opt,name=egress_id,json=egressId,proto3" json:"egress_id,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *HandlerReadyRequest) Reset() { *x = HandlerReadyRequest{} mi := &file_ipc_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } func (x *HandlerReadyRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*HandlerReadyRequest) ProtoMessage() {} func (x *HandlerReadyRequest) ProtoReflect() protoreflect.Message { mi := &file_ipc_proto_msgTypes[0] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use HandlerReadyRequest.ProtoReflect.Descriptor instead. func (*HandlerReadyRequest) Descriptor() ([]byte, []int) { return file_ipc_proto_rawDescGZIP(), []int{0} } func (x *HandlerReadyRequest) GetEgressId() string { if x != nil { return x.EgressId } return "" } type HandlerFinishedRequest struct { state protoimpl.MessageState `protogen:"open.v1"` EgressId string `protobuf:"bytes,1,opt,name=egress_id,json=egressId,proto3" json:"egress_id,omitempty"` Metrics string `protobuf:"bytes,2,opt,name=metrics,proto3" json:"metrics,omitempty"` Info *livekit.EgressInfo `protobuf:"bytes,3,opt,name=info,proto3" json:"info,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *HandlerFinishedRequest) Reset() { *x = HandlerFinishedRequest{} mi := &file_ipc_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } func (x *HandlerFinishedRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*HandlerFinishedRequest) ProtoMessage() {} func (x *HandlerFinishedRequest) ProtoReflect() protoreflect.Message { mi := &file_ipc_proto_msgTypes[1] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use HandlerFinishedRequest.ProtoReflect.Descriptor instead. func (*HandlerFinishedRequest) Descriptor() ([]byte, []int) { return file_ipc_proto_rawDescGZIP(), []int{1} } func (x *HandlerFinishedRequest) GetEgressId() string { if x != nil { return x.EgressId } return "" } func (x *HandlerFinishedRequest) GetMetrics() string { if x != nil { return x.Metrics } return "" } func (x *HandlerFinishedRequest) GetInfo() *livekit.EgressInfo { if x != nil { return x.Info } return nil } type StorageEventRequest struct { state protoimpl.MessageState `protogen:"open.v1"` EgressId string `protobuf:"bytes,1,opt,name=egress_id,json=egressId,proto3" json:"egress_id,omitempty"` Operation string `protobuf:"bytes,2,opt,name=operation,proto3" json:"operation,omitempty"` Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"` Size int64 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"` LifetimeDays int64 `protobuf:"varint,5,opt,name=lifetime_days,json=lifetimeDays,proto3" json:"lifetime_days,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *StorageEventRequest) Reset() { *x = StorageEventRequest{} mi := &file_ipc_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } func (x *StorageEventRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*StorageEventRequest) ProtoMessage() {} func (x *StorageEventRequest) ProtoReflect() protoreflect.Message { mi := &file_ipc_proto_msgTypes[2] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use StorageEventRequest.ProtoReflect.Descriptor instead. func (*StorageEventRequest) Descriptor() ([]byte, []int) { return file_ipc_proto_rawDescGZIP(), []int{2} } func (x *StorageEventRequest) GetEgressId() string { if x != nil { return x.EgressId } return "" } func (x *StorageEventRequest) GetOperation() string { if x != nil { return x.Operation } return "" } func (x *StorageEventRequest) GetPath() string { if x != nil { return x.Path } return "" } func (x *StorageEventRequest) GetSize() int64 { if x != nil { return x.Size } return 0 } func (x *StorageEventRequest) GetLifetimeDays() int64 { if x != nil { return x.LifetimeDays } return 0 } type GstPipelineDebugDotRequest struct { state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *GstPipelineDebugDotRequest) Reset() { *x = GstPipelineDebugDotRequest{} mi := &file_ipc_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } func (x *GstPipelineDebugDotRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*GstPipelineDebugDotRequest) ProtoMessage() {} func (x *GstPipelineDebugDotRequest) ProtoReflect() protoreflect.Message { mi := &file_ipc_proto_msgTypes[3] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GstPipelineDebugDotRequest.ProtoReflect.Descriptor instead. func (*GstPipelineDebugDotRequest) Descriptor() ([]byte, []int) { return file_ipc_proto_rawDescGZIP(), []int{3} } type GstPipelineDebugDotResponse struct { state protoimpl.MessageState `protogen:"open.v1"` DotFile string `protobuf:"bytes,1,opt,name=dot_file,json=dotFile,proto3" json:"dot_file,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *GstPipelineDebugDotResponse) Reset() { *x = GstPipelineDebugDotResponse{} mi := &file_ipc_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } func (x *GstPipelineDebugDotResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*GstPipelineDebugDotResponse) ProtoMessage() {} func (x *GstPipelineDebugDotResponse) ProtoReflect() protoreflect.Message { mi := &file_ipc_proto_msgTypes[4] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GstPipelineDebugDotResponse.ProtoReflect.Descriptor instead. func (*GstPipelineDebugDotResponse) Descriptor() ([]byte, []int) { return file_ipc_proto_rawDescGZIP(), []int{4} } func (x *GstPipelineDebugDotResponse) GetDotFile() string { if x != nil { return x.DotFile } return "" } type PProfRequest struct { state protoimpl.MessageState `protogen:"open.v1"` ProfileName string `protobuf:"bytes,1,opt,name=profile_name,json=profileName,proto3" json:"profile_name,omitempty"` Timeout int32 `protobuf:"varint,2,opt,name=timeout,proto3" json:"timeout,omitempty"` Debug int32 `protobuf:"varint,3,opt,name=debug,proto3" json:"debug,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *PProfRequest) Reset() { *x = PProfRequest{} mi := &file_ipc_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } func (x *PProfRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*PProfRequest) ProtoMessage() {} func (x *PProfRequest) ProtoReflect() protoreflect.Message { mi := &file_ipc_proto_msgTypes[5] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use PProfRequest.ProtoReflect.Descriptor instead. func (*PProfRequest) Descriptor() ([]byte, []int) { return file_ipc_proto_rawDescGZIP(), []int{5} } func (x *PProfRequest) GetProfileName() string { if x != nil { return x.ProfileName } return "" } func (x *PProfRequest) GetTimeout() int32 { if x != nil { return x.Timeout } return 0 } func (x *PProfRequest) GetDebug() int32 { if x != nil { return x.Debug } return 0 } type PProfResponse struct { state protoimpl.MessageState `protogen:"open.v1"` PprofFile []byte `protobuf:"bytes,1,opt,name=pprof_file,json=pprofFile,proto3" json:"pprof_file,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *PProfResponse) Reset() { *x = PProfResponse{} mi := &file_ipc_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } func (x *PProfResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*PProfResponse) ProtoMessage() {} func (x *PProfResponse) ProtoReflect() protoreflect.Message { mi := &file_ipc_proto_msgTypes[6] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use PProfResponse.ProtoReflect.Descriptor instead. func (*PProfResponse) Descriptor() ([]byte, []int) { return file_ipc_proto_rawDescGZIP(), []int{6} } func (x *PProfResponse) GetPprofFile() []byte { if x != nil { return x.PprofFile } return nil } type MetricsRequest struct { state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *MetricsRequest) Reset() { *x = MetricsRequest{} mi := &file_ipc_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } func (x *MetricsRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*MetricsRequest) ProtoMessage() {} func (x *MetricsRequest) ProtoReflect() protoreflect.Message { mi := &file_ipc_proto_msgTypes[7] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use MetricsRequest.ProtoReflect.Descriptor instead. func (*MetricsRequest) Descriptor() ([]byte, []int) { return file_ipc_proto_rawDescGZIP(), []int{7} } type MetricsResponse struct { state protoimpl.MessageState `protogen:"open.v1"` Metrics string `protobuf:"bytes,1,opt,name=metrics,proto3" json:"metrics,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *MetricsResponse) Reset() { *x = MetricsResponse{} mi := &file_ipc_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } func (x *MetricsResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*MetricsResponse) ProtoMessage() {} func (x *MetricsResponse) ProtoReflect() protoreflect.Message { mi := &file_ipc_proto_msgTypes[8] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use MetricsResponse.ProtoReflect.Descriptor instead. func (*MetricsResponse) Descriptor() ([]byte, []int) { return file_ipc_proto_rawDescGZIP(), []int{8} } func (x *MetricsResponse) GetMetrics() string { if x != nil { return x.Metrics } return "" } type KillEgressRequest struct { state protoimpl.MessageState `protogen:"open.v1"` Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *KillEgressRequest) Reset() { *x = KillEgressRequest{} mi := &file_ipc_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } func (x *KillEgressRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*KillEgressRequest) ProtoMessage() {} func (x *KillEgressRequest) ProtoReflect() protoreflect.Message { mi := &file_ipc_proto_msgTypes[9] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use KillEgressRequest.ProtoReflect.Descriptor instead. func (*KillEgressRequest) Descriptor() ([]byte, []int) { return file_ipc_proto_rawDescGZIP(), []int{9} } func (x *KillEgressRequest) GetError() string { if x != nil { return x.Error } return "" } var File_ipc_proto protoreflect.FileDescriptor const file_ipc_proto_rawDesc = "" + "\n" + "\tipc.proto\x12\x03ipc\x1a\x1bgoogle/protobuf/empty.proto\x1a\x14livekit_egress.proto\x1a\x10rpc/egress.proto\"2\n" + "\x13HandlerReadyRequest\x12\x1b\n" + "\tegress_id\x18\x01 \x01(\tR\begressId\"x\n" + "\x16HandlerFinishedRequest\x12\x1b\n" + "\tegress_id\x18\x01 \x01(\tR\begressId\x12\x18\n" + "\ametrics\x18\x02 \x01(\tR\ametrics\x12'\n" + "\x04info\x18\x03 \x01(\v2\x13.livekit.EgressInfoR\x04info\"\x9d\x01\n" + "\x13StorageEventRequest\x12\x1b\n" + "\tegress_id\x18\x01 \x01(\tR\begressId\x12\x1c\n" + "\toperation\x18\x02 \x01(\tR\toperation\x12\x12\n" + "\x04path\x18\x03 \x01(\tR\x04path\x12\x12\n" + "\x04size\x18\x04 \x01(\x03R\x04size\x12#\n" + "\rlifetime_days\x18\x05 \x01(\x03R\flifetimeDays\"\x1c\n" + "\x1aGstPipelineDebugDotRequest\"8\n" + "\x1bGstPipelineDebugDotResponse\x12\x19\n" + "\bdot_file\x18\x01 \x01(\tR\adotFile\"a\n" + "\fPProfRequest\x12!\n" + "\fprofile_name\x18\x01 \x01(\tR\vprofileName\x12\x18\n" + "\atimeout\x18\x02 \x01(\x05R\atimeout\x12\x14\n" + "\x05debug\x18\x03 \x01(\x05R\x05debug\".\n" + "\rPProfResponse\x12\x1d\n" + "\n" + "pprof_file\x18\x01 \x01(\fR\tpprofFile\"\x10\n" + "\x0eMetricsRequest\"+\n" + "\x0fMetricsResponse\x12\x18\n" + "\ametrics\x18\x01 \x01(\tR\ametrics\")\n" + "\x11KillEgressRequest\x12\x14\n" + "\x05error\x18\x01 \x01(\tR\x05error2\xe5\x02\n" + "\rEgressService\x12B\n" + "\fHandlerReady\x12\x18.ipc.HandlerReadyRequest\x1a\x16.google.protobuf.Empty\"\x00\x12>\n" + "\rHandlerUpdate\x12\x13.livekit.EgressInfo\x1a\x16.google.protobuf.Empty\"\x00\x12H\n" + "\x0fHandlerFinished\x12\x1b.ipc.HandlerFinishedRequest\x1a\x16.google.protobuf.Empty\"\x00\x12B\n" + "\vReplayReady\x12\x17.rpc.EgressReadyRequest\x1a\x18.rpc.EgressReadyResponse\"\x00\x12B\n" + "\fStorageEvent\x12\x18.ipc.StorageEventRequest\x1a\x16.google.protobuf.Empty\"\x002\x96\x02\n" + "\rEgressHandler\x12U\n" + "\x0eGetPipelineDot\x12\x1f.ipc.GstPipelineDebugDotRequest\x1a .ipc.GstPipelineDebugDotResponse\"\x00\x123\n" + "\bGetPProf\x12\x11.ipc.PProfRequest\x1a\x12.ipc.PProfResponse\"\x00\x129\n" + "\n" + "GetMetrics\x12\x13.ipc.MetricsRequest\x1a\x14.ipc.MetricsResponse\"\x00\x12>\n" + "\n" + "KillEgress\x12\x16.ipc.KillEgressRequest\x1a\x16.google.protobuf.Empty\"\x00B#Z!github.com/livekit/egress/pkg/ipcb\x06proto3" var ( file_ipc_proto_rawDescOnce sync.Once file_ipc_proto_rawDescData []byte ) func file_ipc_proto_rawDescGZIP() []byte { file_ipc_proto_rawDescOnce.Do(func() { file_ipc_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ipc_proto_rawDesc), len(file_ipc_proto_rawDesc))) }) return file_ipc_proto_rawDescData } var file_ipc_proto_msgTypes = make([]protoimpl.MessageInfo, 10) var file_ipc_proto_goTypes = []any{ (*HandlerReadyRequest)(nil), // 0: ipc.HandlerReadyRequest (*HandlerFinishedRequest)(nil), // 1: ipc.HandlerFinishedRequest (*StorageEventRequest)(nil), // 2: ipc.StorageEventRequest (*GstPipelineDebugDotRequest)(nil), // 3: ipc.GstPipelineDebugDotRequest (*GstPipelineDebugDotResponse)(nil), // 4: ipc.GstPipelineDebugDotResponse (*PProfRequest)(nil), // 5: ipc.PProfRequest (*PProfResponse)(nil), // 6: ipc.PProfResponse (*MetricsRequest)(nil), // 7: ipc.MetricsRequest (*MetricsResponse)(nil), // 8: ipc.MetricsResponse (*KillEgressRequest)(nil), // 9: ipc.KillEgressRequest (*livekit.EgressInfo)(nil), // 10: livekit.EgressInfo (*rpc.EgressReadyRequest)(nil), // 11: rpc.EgressReadyRequest (*emptypb.Empty)(nil), // 12: google.protobuf.Empty (*rpc.EgressReadyResponse)(nil), // 13: rpc.EgressReadyResponse } var file_ipc_proto_depIdxs = []int32{ 10, // 0: ipc.HandlerFinishedRequest.info:type_name -> livekit.EgressInfo 0, // 1: ipc.EgressService.HandlerReady:input_type -> ipc.HandlerReadyRequest 10, // 2: ipc.EgressService.HandlerUpdate:input_type -> livekit.EgressInfo 1, // 3: ipc.EgressService.HandlerFinished:input_type -> ipc.HandlerFinishedRequest 11, // 4: ipc.EgressService.ReplayReady:input_type -> rpc.EgressReadyRequest 2, // 5: ipc.EgressService.StorageEvent:input_type -> ipc.StorageEventRequest 3, // 6: ipc.EgressHandler.GetPipelineDot:input_type -> ipc.GstPipelineDebugDotRequest 5, // 7: ipc.EgressHandler.GetPProf:input_type -> ipc.PProfRequest 7, // 8: ipc.EgressHandler.GetMetrics:input_type -> ipc.MetricsRequest 9, // 9: ipc.EgressHandler.KillEgress:input_type -> ipc.KillEgressRequest 12, // 10: ipc.EgressService.HandlerReady:output_type -> google.protobuf.Empty 12, // 11: ipc.EgressService.HandlerUpdate:output_type -> google.protobuf.Empty 12, // 12: ipc.EgressService.HandlerFinished:output_type -> google.protobuf.Empty 13, // 13: ipc.EgressService.ReplayReady:output_type -> rpc.EgressReadyResponse 12, // 14: ipc.EgressService.StorageEvent:output_type -> google.protobuf.Empty 4, // 15: ipc.EgressHandler.GetPipelineDot:output_type -> ipc.GstPipelineDebugDotResponse 6, // 16: ipc.EgressHandler.GetPProf:output_type -> ipc.PProfResponse 8, // 17: ipc.EgressHandler.GetMetrics:output_type -> ipc.MetricsResponse 12, // 18: ipc.EgressHandler.KillEgress:output_type -> google.protobuf.Empty 10, // [10:19] is the sub-list for method output_type 1, // [1:10] is the sub-list for method input_type 1, // [1:1] is the sub-list for extension type_name 1, // [1:1] is the sub-list for extension extendee 0, // [0:1] is the sub-list for field type_name } func init() { file_ipc_proto_init() } func file_ipc_proto_init() { if File_ipc_proto != nil { return } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_ipc_proto_rawDesc), len(file_ipc_proto_rawDesc)), NumEnums: 0, NumMessages: 10, NumExtensions: 0, NumServices: 2, }, GoTypes: file_ipc_proto_goTypes, DependencyIndexes: file_ipc_proto_depIdxs, MessageInfos: file_ipc_proto_msgTypes, }.Build() File_ipc_proto = out.File file_ipc_proto_goTypes = nil file_ipc_proto_depIdxs = nil } ================================================ FILE: pkg/ipc/ipc.proto ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package ipc; option go_package = "github.com/livekit/egress/pkg/ipc"; import "google/protobuf/empty.proto"; import "livekit_egress.proto"; import "rpc/egress.proto"; service EgressService { rpc HandlerReady(HandlerReadyRequest) returns (google.protobuf.Empty) {}; rpc HandlerUpdate(livekit.EgressInfo) returns (google.protobuf.Empty) {}; rpc HandlerFinished(HandlerFinishedRequest) returns (google.protobuf.Empty) {}; rpc ReplayReady(rpc.EgressReadyRequest) returns (rpc.EgressReadyResponse) {}; rpc StorageEvent(StorageEventRequest) returns (google.protobuf.Empty) {}; } message HandlerReadyRequest { string egress_id = 1; } message HandlerFinishedRequest { string egress_id = 1; string metrics = 2; livekit.EgressInfo info = 3; } message StorageEventRequest { string egress_id = 1; string operation = 2; string path = 3; int64 size = 4; int64 lifetime_days = 5; } service EgressHandler { rpc GetPipelineDot(GstPipelineDebugDotRequest) returns (GstPipelineDebugDotResponse) {}; rpc GetPProf(PProfRequest) returns (PProfResponse) {}; rpc GetMetrics(MetricsRequest) returns (MetricsResponse) {}; rpc KillEgress(KillEgressRequest) returns (google.protobuf.Empty) {}; } message GstPipelineDebugDotRequest {} message GstPipelineDebugDotResponse { string dot_file = 1; } message PProfRequest { string profile_name = 1; int32 timeout = 2; int32 debug = 3; } message PProfResponse { bytes pprof_file = 1; } message MetricsRequest {} message MetricsResponse { string metrics = 1; } message KillEgressRequest { string error = 1; } ================================================ FILE: pkg/ipc/ipc_grpc.pb.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.5.1 // - protoc v6.33.0 // source: ipc.proto package ipc import ( context "context" livekit "github.com/livekit/protocol/livekit" rpc "github.com/livekit/protocol/rpc" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" emptypb "google.golang.org/protobuf/types/known/emptypb" ) // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. // Requires gRPC-Go v1.64.0 or later. const _ = grpc.SupportPackageIsVersion9 const ( EgressService_HandlerReady_FullMethodName = "/ipc.EgressService/HandlerReady" EgressService_HandlerUpdate_FullMethodName = "/ipc.EgressService/HandlerUpdate" EgressService_HandlerFinished_FullMethodName = "/ipc.EgressService/HandlerFinished" EgressService_ReplayReady_FullMethodName = "/ipc.EgressService/ReplayReady" EgressService_StorageEvent_FullMethodName = "/ipc.EgressService/StorageEvent" ) // EgressServiceClient is the client API for EgressService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type EgressServiceClient interface { HandlerReady(ctx context.Context, in *HandlerReadyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) HandlerUpdate(ctx context.Context, in *livekit.EgressInfo, opts ...grpc.CallOption) (*emptypb.Empty, error) HandlerFinished(ctx context.Context, in *HandlerFinishedRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) ReplayReady(ctx context.Context, in *rpc.EgressReadyRequest, opts ...grpc.CallOption) (*rpc.EgressReadyResponse, error) StorageEvent(ctx context.Context, in *StorageEventRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) } type egressServiceClient struct { cc grpc.ClientConnInterface } func NewEgressServiceClient(cc grpc.ClientConnInterface) EgressServiceClient { return &egressServiceClient{cc} } func (c *egressServiceClient) HandlerReady(ctx context.Context, in *HandlerReadyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(emptypb.Empty) err := c.cc.Invoke(ctx, EgressService_HandlerReady_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } func (c *egressServiceClient) HandlerUpdate(ctx context.Context, in *livekit.EgressInfo, opts ...grpc.CallOption) (*emptypb.Empty, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(emptypb.Empty) err := c.cc.Invoke(ctx, EgressService_HandlerUpdate_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } func (c *egressServiceClient) HandlerFinished(ctx context.Context, in *HandlerFinishedRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(emptypb.Empty) err := c.cc.Invoke(ctx, EgressService_HandlerFinished_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } func (c *egressServiceClient) ReplayReady(ctx context.Context, in *rpc.EgressReadyRequest, opts ...grpc.CallOption) (*rpc.EgressReadyResponse, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(rpc.EgressReadyResponse) err := c.cc.Invoke(ctx, EgressService_ReplayReady_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } func (c *egressServiceClient) StorageEvent(ctx context.Context, in *StorageEventRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(emptypb.Empty) err := c.cc.Invoke(ctx, EgressService_StorageEvent_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } // EgressServiceServer is the server API for EgressService service. // All implementations must embed UnimplementedEgressServiceServer // for forward compatibility. type EgressServiceServer interface { HandlerReady(context.Context, *HandlerReadyRequest) (*emptypb.Empty, error) HandlerUpdate(context.Context, *livekit.EgressInfo) (*emptypb.Empty, error) HandlerFinished(context.Context, *HandlerFinishedRequest) (*emptypb.Empty, error) ReplayReady(context.Context, *rpc.EgressReadyRequest) (*rpc.EgressReadyResponse, error) StorageEvent(context.Context, *StorageEventRequest) (*emptypb.Empty, error) mustEmbedUnimplementedEgressServiceServer() } // UnimplementedEgressServiceServer must be embedded to have // forward compatible implementations. // // NOTE: this should be embedded by value instead of pointer to avoid a nil // pointer dereference when methods are called. type UnimplementedEgressServiceServer struct{} func (UnimplementedEgressServiceServer) HandlerReady(context.Context, *HandlerReadyRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method HandlerReady not implemented") } func (UnimplementedEgressServiceServer) HandlerUpdate(context.Context, *livekit.EgressInfo) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method HandlerUpdate not implemented") } func (UnimplementedEgressServiceServer) HandlerFinished(context.Context, *HandlerFinishedRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method HandlerFinished not implemented") } func (UnimplementedEgressServiceServer) ReplayReady(context.Context, *rpc.EgressReadyRequest) (*rpc.EgressReadyResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ReplayReady not implemented") } func (UnimplementedEgressServiceServer) StorageEvent(context.Context, *StorageEventRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method StorageEvent not implemented") } func (UnimplementedEgressServiceServer) mustEmbedUnimplementedEgressServiceServer() {} func (UnimplementedEgressServiceServer) testEmbeddedByValue() {} // UnsafeEgressServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to EgressServiceServer will // result in compilation errors. type UnsafeEgressServiceServer interface { mustEmbedUnimplementedEgressServiceServer() } func RegisterEgressServiceServer(s grpc.ServiceRegistrar, srv EgressServiceServer) { // If the following call pancis, it indicates UnimplementedEgressServiceServer was // embedded by pointer and is nil. This will cause panics if an // unimplemented method is ever invoked, so we test this at initialization // time to prevent it from happening at runtime later due to I/O. if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { t.testEmbeddedByValue() } s.RegisterService(&EgressService_ServiceDesc, srv) } func _EgressService_HandlerReady_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(HandlerReadyRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(EgressServiceServer).HandlerReady(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: EgressService_HandlerReady_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(EgressServiceServer).HandlerReady(ctx, req.(*HandlerReadyRequest)) } return interceptor(ctx, in, info, handler) } func _EgressService_HandlerUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(livekit.EgressInfo) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(EgressServiceServer).HandlerUpdate(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: EgressService_HandlerUpdate_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(EgressServiceServer).HandlerUpdate(ctx, req.(*livekit.EgressInfo)) } return interceptor(ctx, in, info, handler) } func _EgressService_HandlerFinished_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(HandlerFinishedRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(EgressServiceServer).HandlerFinished(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: EgressService_HandlerFinished_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(EgressServiceServer).HandlerFinished(ctx, req.(*HandlerFinishedRequest)) } return interceptor(ctx, in, info, handler) } func _EgressService_ReplayReady_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(rpc.EgressReadyRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(EgressServiceServer).ReplayReady(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: EgressService_ReplayReady_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(EgressServiceServer).ReplayReady(ctx, req.(*rpc.EgressReadyRequest)) } return interceptor(ctx, in, info, handler) } func _EgressService_StorageEvent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(StorageEventRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(EgressServiceServer).StorageEvent(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: EgressService_StorageEvent_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(EgressServiceServer).StorageEvent(ctx, req.(*StorageEventRequest)) } return interceptor(ctx, in, info, handler) } // EgressService_ServiceDesc is the grpc.ServiceDesc for EgressService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) var EgressService_ServiceDesc = grpc.ServiceDesc{ ServiceName: "ipc.EgressService", HandlerType: (*EgressServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "HandlerReady", Handler: _EgressService_HandlerReady_Handler, }, { MethodName: "HandlerUpdate", Handler: _EgressService_HandlerUpdate_Handler, }, { MethodName: "HandlerFinished", Handler: _EgressService_HandlerFinished_Handler, }, { MethodName: "ReplayReady", Handler: _EgressService_ReplayReady_Handler, }, { MethodName: "StorageEvent", Handler: _EgressService_StorageEvent_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "ipc.proto", } const ( EgressHandler_GetPipelineDot_FullMethodName = "/ipc.EgressHandler/GetPipelineDot" EgressHandler_GetPProf_FullMethodName = "/ipc.EgressHandler/GetPProf" EgressHandler_GetMetrics_FullMethodName = "/ipc.EgressHandler/GetMetrics" EgressHandler_KillEgress_FullMethodName = "/ipc.EgressHandler/KillEgress" ) // EgressHandlerClient is the client API for EgressHandler service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type EgressHandlerClient interface { GetPipelineDot(ctx context.Context, in *GstPipelineDebugDotRequest, opts ...grpc.CallOption) (*GstPipelineDebugDotResponse, error) GetPProf(ctx context.Context, in *PProfRequest, opts ...grpc.CallOption) (*PProfResponse, error) GetMetrics(ctx context.Context, in *MetricsRequest, opts ...grpc.CallOption) (*MetricsResponse, error) KillEgress(ctx context.Context, in *KillEgressRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) } type egressHandlerClient struct { cc grpc.ClientConnInterface } func NewEgressHandlerClient(cc grpc.ClientConnInterface) EgressHandlerClient { return &egressHandlerClient{cc} } func (c *egressHandlerClient) GetPipelineDot(ctx context.Context, in *GstPipelineDebugDotRequest, opts ...grpc.CallOption) (*GstPipelineDebugDotResponse, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GstPipelineDebugDotResponse) err := c.cc.Invoke(ctx, EgressHandler_GetPipelineDot_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } func (c *egressHandlerClient) GetPProf(ctx context.Context, in *PProfRequest, opts ...grpc.CallOption) (*PProfResponse, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(PProfResponse) err := c.cc.Invoke(ctx, EgressHandler_GetPProf_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } func (c *egressHandlerClient) GetMetrics(ctx context.Context, in *MetricsRequest, opts ...grpc.CallOption) (*MetricsResponse, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(MetricsResponse) err := c.cc.Invoke(ctx, EgressHandler_GetMetrics_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } func (c *egressHandlerClient) KillEgress(ctx context.Context, in *KillEgressRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(emptypb.Empty) err := c.cc.Invoke(ctx, EgressHandler_KillEgress_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } // EgressHandlerServer is the server API for EgressHandler service. // All implementations must embed UnimplementedEgressHandlerServer // for forward compatibility. type EgressHandlerServer interface { GetPipelineDot(context.Context, *GstPipelineDebugDotRequest) (*GstPipelineDebugDotResponse, error) GetPProf(context.Context, *PProfRequest) (*PProfResponse, error) GetMetrics(context.Context, *MetricsRequest) (*MetricsResponse, error) KillEgress(context.Context, *KillEgressRequest) (*emptypb.Empty, error) mustEmbedUnimplementedEgressHandlerServer() } // UnimplementedEgressHandlerServer must be embedded to have // forward compatible implementations. // // NOTE: this should be embedded by value instead of pointer to avoid a nil // pointer dereference when methods are called. type UnimplementedEgressHandlerServer struct{} func (UnimplementedEgressHandlerServer) GetPipelineDot(context.Context, *GstPipelineDebugDotRequest) (*GstPipelineDebugDotResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetPipelineDot not implemented") } func (UnimplementedEgressHandlerServer) GetPProf(context.Context, *PProfRequest) (*PProfResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetPProf not implemented") } func (UnimplementedEgressHandlerServer) GetMetrics(context.Context, *MetricsRequest) (*MetricsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetMetrics not implemented") } func (UnimplementedEgressHandlerServer) KillEgress(context.Context, *KillEgressRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method KillEgress not implemented") } func (UnimplementedEgressHandlerServer) mustEmbedUnimplementedEgressHandlerServer() {} func (UnimplementedEgressHandlerServer) testEmbeddedByValue() {} // UnsafeEgressHandlerServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to EgressHandlerServer will // result in compilation errors. type UnsafeEgressHandlerServer interface { mustEmbedUnimplementedEgressHandlerServer() } func RegisterEgressHandlerServer(s grpc.ServiceRegistrar, srv EgressHandlerServer) { // If the following call pancis, it indicates UnimplementedEgressHandlerServer was // embedded by pointer and is nil. This will cause panics if an // unimplemented method is ever invoked, so we test this at initialization // time to prevent it from happening at runtime later due to I/O. if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { t.testEmbeddedByValue() } s.RegisterService(&EgressHandler_ServiceDesc, srv) } func _EgressHandler_GetPipelineDot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GstPipelineDebugDotRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(EgressHandlerServer).GetPipelineDot(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: EgressHandler_GetPipelineDot_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(EgressHandlerServer).GetPipelineDot(ctx, req.(*GstPipelineDebugDotRequest)) } return interceptor(ctx, in, info, handler) } func _EgressHandler_GetPProf_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(PProfRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(EgressHandlerServer).GetPProf(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: EgressHandler_GetPProf_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(EgressHandlerServer).GetPProf(ctx, req.(*PProfRequest)) } return interceptor(ctx, in, info, handler) } func _EgressHandler_GetMetrics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(MetricsRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(EgressHandlerServer).GetMetrics(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: EgressHandler_GetMetrics_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(EgressHandlerServer).GetMetrics(ctx, req.(*MetricsRequest)) } return interceptor(ctx, in, info, handler) } func _EgressHandler_KillEgress_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(KillEgressRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(EgressHandlerServer).KillEgress(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: EgressHandler_KillEgress_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(EgressHandlerServer).KillEgress(ctx, req.(*KillEgressRequest)) } return interceptor(ctx, in, info, handler) } // EgressHandler_ServiceDesc is the grpc.ServiceDesc for EgressHandler service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) var EgressHandler_ServiceDesc = grpc.ServiceDesc{ ServiceName: "ipc.EgressHandler", HandlerType: (*EgressHandlerServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "GetPipelineDot", Handler: _EgressHandler_GetPipelineDot_Handler, }, { MethodName: "GetPProf", Handler: _EgressHandler_GetPProf_Handler, }, { MethodName: "GetMetrics", Handler: _EgressHandler_GetMetrics_Handler, }, { MethodName: "KillEgress", Handler: _EgressHandler_KillEgress_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "ipc.proto", } ================================================ FILE: pkg/logging/csv.go ================================================ // Copyright 2025 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package logging import ( "fmt" "os" "path" "reflect" "strings" "time" ) type TrackStats struct { Timestamp string PacketsReceived uint64 PaddingReceived uint64 LastReceived string PacketsDropped uint64 PacketsPushed uint64 SamplesPushed uint64 LastPushed string Drift time.Duration MaxDrift time.Duration } type StreamStats struct { Timestamp string Keyframes uint64 OutBytesTotal uint64 OutBytesAcked uint64 InBytesTotal uint64 InBytesAcked uint64 } // CSVLogger is used for logging data in CSV format. It does not validate columns or data type CSVLogger[T any] struct { f *os.File } func NewCSVLogger[T any](filename string) (*CSVLogger[T], error) { if !strings.HasSuffix(filename, ".csv") { filename = filename + ".csv" } filename = path.Join(os.TempDir(), filename) f, err := os.Create(filename) if err != nil { return nil, err } columns := make([]string, 0) t := reflect.TypeFor[T]() for i := range t.NumField() { columns = append(columns, t.Field(i).Name) } _, _ = fmt.Fprintf(f, "%s\n", strings.Join(columns, ",")) return &CSVLogger[T]{ f: f, }, nil } func (l *CSVLogger[T]) Write(value *T) { v := reflect.ValueOf(value).Elem() t := v.Type() row := make([]string, t.NumField()) for i := range t.NumField() { row[i] = fmt.Sprintf("%v", v.Field(i).Interface()) } _, _ = l.f.WriteString(strings.Join(row, ",") + "\n") } func (l *CSVLogger[T]) Close() { _ = l.f.Close() } ================================================ FILE: pkg/logging/handler.go ================================================ package logging import ( "bytes" "fmt" "strings" "time" "github.com/frostbyte73/core" "go.uber.org/atomic" "github.com/livekit/protocol/logger" ) const ( channelSize = 4096 dropLogThrottle = 10 * time.Second ) type HandlerLogger struct { ch chan []byte done core.Fuse dropped atomic.Int64 lastDropLog atomic.Int64 // unix nanos l logger.Logger } func NewHandlerLogger(handlerID, egressID string) *HandlerLogger { h := &HandlerLogger{ ch: make(chan []byte, channelSize), l: logger.GetLogger().WithValues( "handlerID", handlerID, "egressID", egressID, ), } go h.drain() return h } func (h *HandlerLogger) Write(p []byte) (int, error) { cp := make([]byte, len(p)) copy(cp, p) select { case h.ch <- cp: default: count := h.dropped.Inc() now := time.Now().UnixNano() last := h.lastDropLog.Load() if now-last >= int64(dropLogThrottle) { if h.lastDropLog.CompareAndSwap(last, now) { h.l.Warnw(fmt.Sprintf("handler logger dropped %d messages", count), nil) h.dropped.Store(0) } } } return len(p), nil } func (h *HandlerLogger) Close() error { close(h.ch) <-h.done.Watch() return nil } func (h *HandlerLogger) drain() { var buf []byte var panicBuf []string defer func() { // flush remaining buffer if len(buf) > 0 { h.processLine(string(buf), &panicBuf) } // flush any accumulated panic if len(panicBuf) > 0 { h.l.Errorw(strings.Join(panicBuf, "\n"), nil) } h.done.Break() }() for chunk := range h.ch { buf = append(buf, chunk...) for { idx := bytes.IndexByte(buf, '\n') if idx < 0 { break } line := string(buf[:idx]) buf = buf[idx+1:] h.processLine(line, &panicBuf) } } } func (h *HandlerLogger) processLine(line string, panicBuf *[]string) { if len(line) == 0 { return } if line[len(line)-1] == '}' { if len(*panicBuf) > 0 { h.flushPanic(panicBuf) } fmt.Println(line) return } // gstreamer stderr (timestamp-prefixed) if strings.HasPrefix(line, "0:00:0") { return } // glib/gobject warnings from gstreamer if strings.HasPrefix(line, "(egress:") { h.l.Warnw(line, nil) return } // panic entry if strings.HasPrefix(line, "panic:") || strings.HasPrefix(line, "fatal error:") || strings.HasPrefix(line, "goroutine ") { *panicBuf = append(*panicBuf, line) return } // panic accumulation if len(*panicBuf) > 0 { if h.isPanicContinuation(line) { *panicBuf = append(*panicBuf, line) return } h.flushPanic(panicBuf) } h.l.Errorw(line, nil) } func (h *HandlerLogger) isPanicContinuation(line string) bool { if line[0] == '\t' { return true } if strings.HasPrefix(line, "goroutine ") { return true } if !strings.HasPrefix(line, "(") && strings.Contains(line, "(") { return true } return false } func (h *HandlerLogger) flushPanic(panicBuf *[]string) { if len(*panicBuf) > 0 { h.l.Errorw(strings.Join(*panicBuf, "\n"), nil) *panicBuf = nil } } ================================================ FILE: pkg/logging/s3.go ================================================ // Copyright 2025 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package logging import ( "fmt" "strings" "github.com/aws/smithy-go/logging" "github.com/linkdata/deadlock" "github.com/livekit/protocol/logger" ) // S3Logger only logs aws messages on upload failure type S3Logger struct { mu deadlock.Mutex msgs []string idx int } func NewS3Logger() *S3Logger { return &S3Logger{ msgs: make([]string, 10), } } func (l *S3Logger) Logf(classification logging.Classification, format string, v ...interface{}) { format = "aws %s: " + format v = append([]interface{}{strings.ToLower(string(classification))}, v...) l.mu.Lock() l.msgs[l.idx%len(l.msgs)] = fmt.Sprintf(format, v...) l.idx++ l.mu.Unlock() } func (l *S3Logger) WriteLogs() { l.mu.Lock() size := len(l.msgs) for range size { if msg := l.msgs[l.idx%size]; msg != "" { logger.Debugw(msg) } l.idx++ } l.mu.Unlock() } ================================================ FILE: pkg/pipeline/builder/audio.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package builder import ( "fmt" "time" "github.com/go-gst/go-gst/gst" "github.com/go-gst/go-gst/gst/app" "github.com/linkdata/deadlock" "go.uber.org/atomic" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/gstreamer" "github.com/livekit/egress/pkg/types" "github.com/livekit/protocol/livekit" "github.com/livekit/protocol/logger" lksdk "github.com/livekit/server-sdk-go/v2" ) const ( leakyQueue = true blockingQueue = false audioRateTolerance = 3 * time.Millisecond audioBinName = "audio" ) type AudioBin struct { bin *gstreamer.Bin conf *config.PipelineConfig mu deadlock.Mutex nextID int nextChannel livekit.AudioChannel names map[string]string audioPacer *audioPacer } type driftProcessNotifier interface { DriftProcessed() } type audioPacer struct { pitch *gst.Element active atomic.Bool remaining time.Duration tc driftProcessNotifier tempoAdjustmentRate float64 } func (a *audioPacer) start(drift time.Duration) { if a.pitch == nil || drift == 0 { return } if a.active.Load() { logger.Errorw( "starting audio pacer, but it's already active", errors.New("tempo controller bug"), ) return } rate := 1 + a.tempoAdjustmentRate if drift > 0 { rate = 1 - a.tempoAdjustmentRate } compensationFactor := 1 / a.tempoAdjustmentRate driftNanoseconds := int64(drift) compensationNanoseconds := int64(compensationFactor * float64(driftNanoseconds)) compensationDuration := time.Duration(compensationNanoseconds) a.remaining = compensationDuration.Abs() logger.Debugw("starting audio pacer", "remaining", a.remaining, "rate", rate) a.pitch.SetArg("tempo", fmt.Sprintf("%.2f", rate)) a.active.Store(true) } func (a *audioPacer) observeProcessedDuration(d time.Duration) { if !a.active.Load() { return } a.remaining -= d if a.remaining <= 0 { logger.Debugw("audio gap processed, stopping the pacer") a.stop() a.tc.DriftProcessed() } } func (a *audioPacer) stop() { if a.pitch == nil || a.tc == nil { return } a.pitch.SetArg("tempo", fmt.Sprintf("%.1f", 1.0)) a.active.Store(false) a.remaining = 0 } func BuildAudioBin(pipeline *gstreamer.Pipeline, p *config.PipelineConfig) error { b := &AudioBin{ bin: pipeline.NewBin(audioBinName), conf: p, names: make(map[string]string), } switch p.SourceType { case types.SourceTypeWeb: if err := b.buildWebInput(); err != nil { return err } case types.SourceTypeSDK: if err := b.buildSDKInput(); err != nil { return err } pipeline.AddOnTrackAdded(b.onTrackAdded) pipeline.AddOnTrackRemoved(b.onTrackRemoved) pipeline.AddOnSourceBinReset(b.onSourceBinReset) } if len(p.GetEncodedOutputs()) > 1 { tee, err := gst.NewElementWithName("tee", fmt.Sprintf("%s_tee", audioBinName)) if err != nil { return err } if err = b.bin.AddElement(tee); err != nil { return err } } else { queue, err := gstreamer.BuildQueue(fmt.Sprintf("%s_queue", audioBinName), p.Latency.PipelineLatency, p.Live) if err != nil { return errors.ErrGstPipelineError(err) } if err = b.bin.AddElement(queue); err != nil { return err } } return pipeline.AddSourceBin(b.bin) } func (b *AudioBin) onTrackAdded(ts *config.TrackSource) { if b.bin.GetState() > gstreamer.StateRunning { return } if ts.TrackKind == lksdk.TrackKindAudio { logger.Debugw("adding audio app src bin", "trackID", ts.TrackID) if err := b.addAudioAppSrcBin(ts); err != nil { logger.Errorw("failed to add audio app src bin", err, "trackID", ts.TrackID) b.bin.OnError(err) } } } func (b *AudioBin) onTrackRemoved(trackID string) { if b.bin.GetState() > gstreamer.StateRunning { return } b.mu.Lock() name, ok := b.names[trackID] if !ok { b.mu.Unlock() return } delete(b.names, trackID) b.mu.Unlock() if err := b.bin.RemoveSourceBin(name); err != nil { b.bin.OnError(err) } } func (b *AudioBin) buildWebInput() error { pulseSrc, err := gst.NewElement("pulsesrc") if err != nil { return errors.ErrGstPipelineError(err) } if err = pulseSrc.SetProperty("device", fmt.Sprintf("%s.monitor", b.conf.Info.EgressId)); err != nil { return errors.ErrGstPipelineError(err) } if err = b.bin.AddElement(pulseSrc); err != nil { return err } if err = addAudioConverter(b.bin, b.conf, livekit.AudioChannel_AUDIO_CHANNEL_BOTH, leakyQueue); err != nil { return err } if b.conf.AudioTranscoding { if err = b.addEncoder(); err != nil { return err } } return nil } func (b *AudioBin) buildSDKInput() error { for _, tr := range b.conf.AudioTracks { if err := b.addAudioAppSrcBin(tr); err != nil { return err } } if b.conf.Live { if err := b.addAudioTestSrcBin(); err != nil { return err } } if err := b.addMixer(); err != nil { return err } if b.conf.AudioTranscoding { if err := b.addEncoder(); err != nil { return err } } return nil } func (b *AudioBin) addAudioAppSrcBin(ts *config.TrackSource) error { b.mu.Lock() defer b.mu.Unlock() return b.addAudioAppSrcBinLocked(ts) } func (b *AudioBin) addAudioAppSrcBinLocked(ts *config.TrackSource) error { name := fmt.Sprintf("%s_%d", ts.TrackID, b.nextID) b.nextID++ b.names[ts.TrackID] = name appSrcBin := b.bin.NewBin(name) appSrcBin.SetEOSFunc(func() bool { return false }) ts.AppSrc.SetArg("format", "time") if err := ts.AppSrc.SetProperty("is-live", b.conf.Live); err != nil { return err } if !b.conf.Live { if err := ts.AppSrc.SetProperty("block", true); err != nil { return err } } if err := appSrcBin.AddElement(ts.AppSrc.Element); err != nil { return err } switch ts.MimeType { case types.MimeTypeOpus: if err := ts.AppSrc.SetProperty("caps", gst.NewCapsFromString(fmt.Sprintf( "application/x-rtp,media=audio,payload=%d,encoding-name=OPUS,clock-rate=%d", ts.PayloadType, ts.ClockRate, ))); err != nil { return errors.ErrGstPipelineError(err) } rtpOpusDepay, err := gst.NewElement("rtpopusdepay") if err != nil { return errors.ErrGstPipelineError(err) } opusDec, err := gst.NewElement("opusdec") if err != nil { return errors.ErrGstPipelineError(err) } if err = appSrcBin.AddElements(rtpOpusDepay, opusDec); err != nil { return err } case types.MimeTypePCMU: if err := ts.AppSrc.SetProperty("caps", gst.NewCapsFromString(fmt.Sprintf( "application/x-rtp,media=audio,payload=%d,encoding-name=PCMU,clock-rate=%d", ts.PayloadType, ts.ClockRate, ))); err != nil { return errors.ErrGstPipelineError(err) } rtpPCMUDepay, err := gst.NewElement("rtppcmudepay") if err != nil { return errors.ErrGstPipelineError(err) } mulawDec, err := gst.NewElement("mulawdec") if err != nil { return errors.ErrGstPipelineError(err) } if err = appSrcBin.AddElements(rtpPCMUDepay, mulawDec); err != nil { return err } case types.MimeTypePCMA: if err := ts.AppSrc.SetProperty("caps", gst.NewCapsFromString(fmt.Sprintf( "application/x-rtp,media=audio,payload=%d,encoding-name=PCMA,clock-rate=%d", ts.PayloadType, ts.ClockRate, ))); err != nil { return errors.ErrGstPipelineError(err) } rtpPCMADepay, err := gst.NewElement("rtppcmadepay") if err != nil { return errors.ErrGstPipelineError(err) } alawDec, err := gst.NewElement("alawdec") if err != nil { return errors.ErrGstPipelineError(err) } if err = appSrcBin.AddElements(rtpPCMADepay, alawDec); err != nil { return err } default: return errors.ErrNotSupported(string(ts.MimeType)) } addAudioConvertFunc := addAudioConverter if b.conf.AudioTempoController.Enabled { addAudioConvertFunc = b.addAudioConvertWithPitch } if err := addAudioConvertFunc(appSrcBin, b.conf, b.getChannelLocked(ts), blockingQueue); err != nil { return err } if err := b.bin.AddSourceBin(appSrcBin); err != nil { return err } if ts.TempoController != nil { ts.TempoController.OnDriftDetectedCallback(func(drift time.Duration) { if b.audioPacer.pitch != nil { logger.Debugw("starting audio pacer to cover the drift", "drift", drift) b.audioPacer.start(drift) } }) b.audioPacer.tc = ts.TempoController } return nil } func (b *AudioBin) onSourceBinReset(ts *config.TrackSource) error { if ts.TrackKind != lksdk.TrackKindAudio { return nil } return b.resetAudioAppSrcBin(ts) } func (b *AudioBin) resetAudioAppSrcBin(ts *config.TrackSource) error { b.mu.Lock() defer b.mu.Unlock() oldName, ok := b.names[ts.TrackID] if !ok { return errors.New("track already removed, cannot reset audio source bin") } if b.bin.GetState() > gstreamer.StateRunning { return errors.New("pipeline stopping, cannot reset audio source bin") } // Force-remove old bin (blocks on GLib main loop, safe to hold b.mu since // ForceRemoveSourceBin only acquires gstreamer.Bin's internal mutex) if err := b.bin.ForceRemoveSourceBin(oldName); err != nil { return fmt.Errorf("failed to force remove audio source bin: %w", err) } newElement, err := gst.NewElementWithName("appsrc", fmt.Sprintf("app_%s", ts.TrackID)) if err != nil { return errors.ErrGstPipelineError(err) } ts.AppSrc = app.SrcFromElement(newElement) if err := b.addAudioAppSrcBinLocked(ts); err != nil { return fmt.Errorf("failed to add new audio source bin: %w", err) } logger.Infow("audio source bin reset complete", "trackID", ts.TrackID, "newBin", b.names[ts.TrackID]) return nil } func (b *AudioBin) getChannelLocked(ts *config.TrackSource) livekit.AudioChannel { if ts.AudioChannel != nil { return *ts.AudioChannel } switch b.conf.AudioMixing { case livekit.AudioMixing_DEFAULT_MIXING: return livekit.AudioChannel_AUDIO_CHANNEL_BOTH case livekit.AudioMixing_DUAL_CHANNEL_AGENT: if ts.ParticipantKind == lksdk.ParticipantAgent { return livekit.AudioChannel_AUDIO_CHANNEL_LEFT } return livekit.AudioChannel_AUDIO_CHANNEL_RIGHT case livekit.AudioMixing_DUAL_CHANNEL_ALTERNATE: if b.nextChannel == livekit.AudioChannel_AUDIO_CHANNEL_LEFT { b.nextChannel = livekit.AudioChannel_AUDIO_CHANNEL_RIGHT } else { b.nextChannel = livekit.AudioChannel_AUDIO_CHANNEL_LEFT } return b.nextChannel } return livekit.AudioChannel_AUDIO_CHANNEL_BOTH } func (b *AudioBin) addAudioTestSrcBin() error { testSrcBin := b.bin.NewBin(fmt.Sprintf("%s_test_src", audioBinName)) if err := b.bin.AddSourceBin(testSrcBin); err != nil { return err } audioTestSrc, err := gst.NewElement("audiotestsrc") if err != nil { return errors.ErrGstPipelineError(err) } if err = audioTestSrc.SetProperty("volume", 0.0); err != nil { return errors.ErrGstPipelineError(err) } if err = audioTestSrc.SetProperty("do-timestamp", true); err != nil { return errors.ErrGstPipelineError(err) } if err = audioTestSrc.SetProperty("is-live", true); err != nil { return errors.ErrGstPipelineError(err) } // 20 ms @ 48 kHz if err = audioTestSrc.SetProperty("samplesperbuffer", 960); err != nil { return errors.ErrGstPipelineError(err) } audioCaps, err := newAudioCapsFilter(b.conf, livekit.AudioChannel_AUDIO_CHANNEL_BOTH) if err != nil { return err } return testSrcBin.AddElements(audioTestSrc, audioCaps) } func (b *AudioBin) addMixer() error { audioMixer, err := gst.NewElement("audiomixer") if err != nil { return errors.ErrGstPipelineError(err) } if err = audioMixer.SetProperty("latency", uint64(b.conf.Latency.AudioMixerLatency)); err != nil { return errors.ErrGstPipelineError(err) } if err = audioMixer.SetProperty("alignment-threshold", uint64(b.conf.Latency.PipelineLatency)); err != nil { return errors.ErrGstPipelineError(err) } mixedCaps, err := newAudioCapsFilter(b.conf, livekit.AudioChannel_AUDIO_CHANNEL_BOTH) if err != nil { return err } subscribeForQoS(audioMixer) return b.bin.AddElements(audioMixer, mixedCaps) } func (b *AudioBin) addEncoder() error { switch b.conf.AudioOutCodec { case types.MimeTypeOpus: opusEnc, err := gst.NewElement("opusenc") if err != nil { return errors.ErrGstPipelineError(err) } if err = opusEnc.SetProperty("bitrate", int(b.conf.AudioBitrate*1000)); err != nil { return errors.ErrGstPipelineError(err) } return b.bin.AddElement(opusEnc) case types.MimeTypeAAC: faac, err := gst.NewElement("faac") if err != nil { return errors.ErrGstPipelineError(err) } if err = faac.SetProperty("bitrate", int(b.conf.AudioBitrate*1000)); err != nil { return errors.ErrGstPipelineError(err) } return b.bin.AddElement(faac) case types.MimeTypeMP3: mp3enc, err := gst.NewElement("lamemp3enc") if err != nil { return errors.ErrGstPipelineError(err) } // target=bitrate is required for cbr and bitrate to take effect; // without it lamemp3enc defaults to quality-based VBR. mp3enc.SetArg("target", "bitrate") if err = mp3enc.SetProperty("cbr", true); err != nil { return errors.ErrGstPipelineError(err) } if err = mp3enc.SetProperty("bitrate", int(b.conf.AudioBitrate)); err != nil { return errors.ErrGstPipelineError(err) } return b.bin.AddElement(mp3enc) case types.MimeTypeRawAudio: return nil default: return errors.ErrNotSupported(string(b.conf.AudioOutCodec)) } } func addAudioConverter(b *gstreamer.Bin, p *config.PipelineConfig, channel livekit.AudioChannel, isLeaky bool) error { rate, err := gstreamer.BuildAudioRate("audio_rate", audioRateTolerance) if err != nil { return err } audioQueue, err := gstreamer.BuildQueue(fmt.Sprintf("%s_input_queue", audioBinName), p.Latency.PipelineLatency, isLeaky) if err != nil { return err } audioConvert, err := gst.NewElement("audioconvert") if err != nil { return errors.ErrGstPipelineError(err) } audioResample, err := gst.NewElement("audioresample") if err != nil { return errors.ErrGstPipelineError(err) } capsFilter, err := newAudioCapsFilter(p, channel) if err != nil { return err } return b.AddElements(rate, audioQueue, audioConvert, audioResample, capsFilter) } func (b *AudioBin) installPitchProbes() { if b.audioPacer.pitch == nil { return } if sinkPad := b.audioPacer.pitch.GetStaticPad("sink"); sinkPad != nil { sinkPad.AddProbe(gst.PadProbeTypeBuffer, func(_ *gst.Pad, info *gst.PadProbeInfo) gst.PadProbeReturn { if !b.audioPacer.active.Load() { return gst.PadProbeOK } if buf := info.GetBuffer(); buf != nil && buf.Duration() != gst.ClockTimeNone { b.audioPacer.observeProcessedDuration(*buf.Duration().AsDuration()) } return gst.PadProbeOK }) } if srcPad := b.audioPacer.pitch.GetStaticPad("src"); srcPad != nil { // pitch element min latency can go negative, so we need to normalize it // to workaround the obvious issue with the element latency query handling srcPad.AddProbe(gst.PadProbeTypeQueryUpstream|gst.PadProbeTypePull, func(_ *gst.Pad, info *gst.PadProbeInfo) gst.PadProbeReturn { q := info.GetQuery() if q == nil || q.Type() != gst.QueryLatency { return gst.PadProbeOK } live, minimum, maximum := q.ParseLatency() // Normalize: ensure min <= max if minimum > maximum { logger.Debugw("normalizing min latency to 0", "min", minimum) minimum = 0 } q.SetLatency(live, minimum, maximum) return gst.PadProbeOK }, ) } } func (b *AudioBin) addAudioConvertWithPitch(bin *gstreamer.Bin, p *config.PipelineConfig, channel livekit.AudioChannel, isLeaky bool) error { // add audio rate element to handle discontinuities or codec DTX rate, err := gstreamer.BuildAudioRate("audio_rate", audioRateTolerance) if err != nil { return err } q, err := gstreamer.BuildQueue(fmt.Sprintf("%s_input_queue", audioBinName), p.Latency.PipelineLatency, isLeaky) if err != nil { return err } ac1, err := gst.NewElement("audioconvert") if err != nil { return errors.ErrGstPipelineError(err) } ar1, err := gst.NewElement("audioresample") if err != nil { return errors.ErrGstPipelineError(err) } // go to float for pitch element f32caps, err := newAudioFloatCapsFilter(p, channel) if err != nil { return err } pitch, err := gst.NewElement("pitch") if err != nil { return errors.ErrGstPipelineError(err) } pitch.SetArg("tempo", fmt.Sprintf("%.1f", 1.0)) ac2, err := gst.NewElement("audioconvert") if err != nil { return errors.ErrGstPipelineError(err) } // back to pipeline/native format s16caps, err := newAudioCapsFilter(p, channel) if err != nil { return err } // keep a handle for pacer control b.audioPacer = &audioPacer{ pitch: pitch, tempoAdjustmentRate: p.AudioTempoController.AdjustmentRate, } b.installPitchProbes() return bin.AddElements(rate, q, ac1, ar1, f32caps, pitch, ac2, s16caps) } // F32 caps used only around `pitch` func newAudioFloatCapsFilter(p *config.PipelineConfig, channel livekit.AudioChannel) (*gst.Element, error) { var channelCaps string if channel == livekit.AudioChannel_AUDIO_CHANNEL_BOTH { channelCaps = "channels=2" } else { channelCaps = fmt.Sprintf("channels=1,channel-mask=(bitmask)0x%d", channel) } rate := 48000 if p.AudioOutCodec == types.MimeTypeAAC { rate = int(p.AudioFrequency) } caps := gst.NewCapsFromString(fmt.Sprintf("audio/x-raw,format=F32LE,layout=interleaved,rate=%d,%s", rate, channelCaps)) cf, err := gst.NewElement("capsfilter") if err != nil { return nil, errors.ErrGstPipelineError(err) } if err = cf.SetProperty("caps", caps); err != nil { return nil, errors.ErrGstPipelineError(err) } return cf, nil } func newAudioCapsFilter(p *config.PipelineConfig, channel livekit.AudioChannel) (*gst.Element, error) { var channelCaps string if channel == livekit.AudioChannel_AUDIO_CHANNEL_BOTH { channelCaps = "channels=2" } else { channelCaps = fmt.Sprintf("channels=1,channel-mask=(bitmask)0x%d", channel) } var caps *gst.Caps switch p.AudioOutCodec { case types.MimeTypeOpus, types.MimeTypeRawAudio: caps = gst.NewCapsFromString(fmt.Sprintf( "audio/x-raw,format=S16LE,layout=interleaved,rate=48000,%s", channelCaps, )) case types.MimeTypeAAC, types.MimeTypeMP3: caps = gst.NewCapsFromString(fmt.Sprintf( "audio/x-raw,format=S16LE,layout=interleaved,rate=%d,%s", p.AudioFrequency, channelCaps, )) default: return nil, errors.ErrNotSupported(string(p.AudioOutCodec)) } capsFilter, err := gst.NewElement("capsfilter") if err != nil { return nil, errors.ErrGstPipelineError(err) } if err = capsFilter.SetProperty("caps", caps); err != nil { return nil, errors.ErrGstPipelineError(err) } return capsFilter, nil } func subscribeForQoS(mixer *gst.Element) { mixer.Connect("pad-added", func(_ *gst.Element, pad *gst.Pad) { if err := pad.SetProperty("qos-messages", true); err != nil { logger.Errorw("failed to set QoS messages on pad", err) } }) } ================================================ FILE: pkg/pipeline/builder/file.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package builder import ( "github.com/go-gst/go-gst/gst" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/gstreamer" "github.com/livekit/egress/pkg/types" ) func BuildFileBin(pipeline *gstreamer.Pipeline, p *config.PipelineConfig) (*gstreamer.Bin, error) { b := pipeline.NewBin("file") o := p.GetFileConfig() var mux muxer var err error switch o.OutputType { case types.OutputTypeOGG: mux, err = newMuxer("oggmux") case types.OutputTypeIVF: mux, err = newMuxer("avmux_ivf") case types.OutputTypeMP4: mux, err = newMuxer("mp4mux") case types.OutputTypeWebM: mux, err = newMuxer("webmmux") case types.OutputTypeMP3: mux, err = newMP3Muxer() default: return nil, errors.ErrInvalidInput("output type") } if err != nil { return nil, errors.ErrGstPipelineError(err) } sink, err := gst.NewElement("filesink") if err != nil { return nil, errors.ErrGstPipelineError(err) } if err = sink.SetProperty("location", o.LocalFilepath); err != nil { return nil, errors.ErrGstPipelineError(err) } if err = sink.SetProperty("sync", false); err != nil { return nil, errors.ErrGstPipelineError(err) } if !p.Live { if err = sink.SetProperty("async", false); err != nil { return nil, errors.ErrGstPipelineError(err) } } if err = b.AddElements(mux.GetElement(), sink); err != nil { return nil, err } b.SetGetSrcPad(func(name string) *gst.Pad { return mux.GetRequestPad(name + "_%u") }) return b, nil } ================================================ FILE: pkg/pipeline/builder/image.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package builder import ( "fmt" "path" "time" "github.com/go-gst/go-gst/gst" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/gstreamer" "github.com/livekit/egress/pkg/types" ) const ( imageQueueLatency = 200 * time.Millisecond ) func BuildImageBin(c *config.ImageConfig, pipeline *gstreamer.Pipeline, p *config.PipelineConfig) (*gstreamer.Bin, error) { b := pipeline.NewBin(fmt.Sprintf("image_%s", c.Id)) var err error var fakeAudio *gst.Element if p.AudioEnabled { fakeAudio, err = gst.NewElement("fakesink") if err != nil { return nil, err } } queue, err := gstreamer.BuildQueue(fmt.Sprintf("image_queue_%s", c.Id), imageQueueLatency, true) if err != nil { return nil, err } if err := b.AddElements(queue); err != nil { return nil, errors.ErrGstPipelineError(err) } b.SetGetSrcPad(func(name string) *gst.Pad { if name == audioBinName { return fakeAudio.GetStaticPad("sink") } return queue.GetStaticPad("sink") }) b.SetShouldLink(func(srcBin string) bool { return srcBin != audioBinName }) videoRate, err := gst.NewElement("videorate") if err != nil { return nil, errors.ErrGstPipelineError(err) } if err = videoRate.SetProperty("skip-to-first", true); err != nil { return nil, err } if err := b.AddElements(videoRate); err != nil { return nil, errors.ErrGstPipelineError(err) } videoScale, err := gst.NewElement("videoscale") if err != nil { return nil, errors.ErrGstPipelineError(err) } if err := b.AddElements(videoScale); err != nil { return nil, errors.ErrGstPipelineError(err) } caps, err := gst.NewElement("capsfilter") if err != nil { return nil, errors.ErrGstPipelineError(err) } capsString := fmt.Sprintf( "video/x-raw,framerate=1/%d,format=I420,colorimetry=bt709,chroma-site=mpeg2,pixel-aspect-ratio=1/1", c.CaptureInterval) if c.Width > 0 && c.Height > 0 { capsString = fmt.Sprintf("%s,width=%d,height=%d,", capsString, c.Width, c.Height) } err = caps.SetProperty("caps", gst.NewCapsFromString(capsString)) if err != nil { return nil, err } if err := b.AddElements(caps); err != nil { return nil, errors.ErrGstPipelineError(err) } switch c.ImageOutCodec { case types.MimeTypeJPEG: enc, err := gst.NewElement("jpegenc") if err != nil { return nil, errors.ErrGstPipelineError(err) } if err := b.AddElements(enc); err != nil { return nil, errors.ErrGstPipelineError(err) } default: return nil, errors.ErrNoCompatibleCodec } sink, err := gst.NewElementWithName("multifilesink", fmt.Sprintf("multifilesink_%s", c.Id)) if err != nil { return nil, err } err = sink.SetProperty("post-messages", true) if err != nil { return nil, err } // File will be renamed if the TS prefix is configured location := fmt.Sprintf("%s_%%05d%s", path.Join(c.LocalDir, c.ImagePrefix), types.FileExtensionForOutputType[c.OutputType]) err = sink.SetProperty("location", location) if err != nil { return nil, err } if err = b.AddElements(sink); err != nil { return nil, errors.ErrGstPipelineError(err) } return b, nil } ================================================ FILE: pkg/pipeline/builder/muxer.go ================================================ // Copyright 2025 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package builder import ( "fmt" "strings" "github.com/go-gst/go-gst/gst" ) // muxer captures the minimal behavior builders need from a muxing element, allowing // us to swap between real gst muxers and light-weight shims (e.g. xingmux for MP3). type muxer interface { GetRequestPad(name string) *gst.Pad GetElement() *gst.Element } // muxerImpl wraps a concrete gst.Element so it satisfies the muxer interface. type muxerImpl struct { *gst.Element } // newMuxer constructs a wrapper around the named gst muxer element. func newMuxer(elementName string) (*muxerImpl, error) { element, err := gst.NewElement(elementName) if err != nil { return nil, err } if factory := element.GetFactory(); factory != nil { if klass := factory.GetMetadata("klass"); !strings.Contains(klass, "Muxer") { element.Unref() return nil, fmt.Errorf("element %s is not a muxer", elementName) } } return &muxerImpl{ Element: element, }, nil } func (m *muxerImpl) GetRequestPad(name string) *gst.Pad { return m.Element.GetRequestPad(name) } func (m *muxerImpl) GetElement() *gst.Element { return m.Element } // mp3Muxer wraps xingmux as a muxer so audio-only MP3 outputs // can reuse the same linking logic as containerised formats. type mp3Muxer struct { muxerImpl } // newMP3Muxer provides a muxer-compatible wrapper around gst xingmux. // xingmux inserts a Xing header containing total frame and byte counts, // allowing players to determine the file duration without scanning every frame. func newMP3Muxer() (*mp3Muxer, error) { xing, err := gst.NewElement("xingmux") if err != nil { return nil, err } return &mp3Muxer{ muxerImpl: muxerImpl{ Element: xing, }, }, nil } // GetRequestPad always returns the static sink pad to satisfy the muxer contract. func (m *mp3Muxer) GetRequestPad(_ string) *gst.Pad { return m.GetStaticPad("sink") } ================================================ FILE: pkg/pipeline/builder/muxer_test.go ================================================ package builder import ( "strings" "sync" "testing" "github.com/go-gst/go-gst/gst" "github.com/stretchr/testify/require" ) var gstInitOnce sync.Once func initGStreamer(t *testing.T) { t.Helper() gstInitOnce.Do(func() { gst.Init(nil) }) } func TestNewMuxer_KnownMuxers(t *testing.T) { initGStreamer(t) for _, name := range []string{"oggmux", "avmux_ivf", "mp4mux", "webmmux", "mpegtsmux"} { t.Run(name, func(t *testing.T) { m, err := newMuxer(name) require.NoError(t, err) require.NotNil(t, m) require.NotNil(t, m.GetElement()) }) } } func TestNewMuxer_InvalidMuxer(t *testing.T) { initGStreamer(t) _, err := newMuxer("identity") require.Error(t, err) require.True(t, strings.Contains(err.Error(), "not a muxer"), "unexpected error: %v", err) } func TestNewMP3Muxer(t *testing.T) { initGStreamer(t) m, err := newMP3Muxer() require.NoError(t, err) require.NotNil(t, m.GetRequestPad("unused")) } ================================================ FILE: pkg/pipeline/builder/pts_fixer.go ================================================ package builder import ( "github.com/go-gst/go-gst/gst" "github.com/livekit/egress/pkg/errors" "github.com/livekit/protocol/logger" ) // PTSFixer wraps a gst element and restores missing PTS values on its src pad // so downstream elements observe a monotonic timeline even when upstream elements // emit GST_CLOCK_TIME_NONE buffers (e.g. due to baseparse bugs). type ptsFixer struct { *gst.Element pad *gst.Pad probe uint64 last uint64 ptsSeen bool log logger.Logger } func newPTSFixer(elementName, context string) (*ptsFixer, error) { element, err := gst.NewElement(elementName) if err != nil { return nil, errors.ErrGstPipelineError(err) } pad := element.GetStaticPad("src") if pad == nil { element.Unref() return nil, errors.ErrGstPipelineError(newMissingPadError(elementName, "src")) } fixer := &ptsFixer{ Element: element, pad: pad, log: logger.GetLogger().WithValues("component", "pts_fixer", "context", context, "element", elementName), } fixer.probe = pad.AddProbe(gst.PadProbeTypeBuffer, fixer.onBuffer) return fixer, nil } func (f *ptsFixer) onBuffer(_ *gst.Pad, info *gst.PadProbeInfo) gst.PadProbeReturn { buf := info.GetBuffer() if buf == nil { return gst.PadProbeOK } pts := buf.PresentationTimestamp() if pts == gst.ClockTimeNone { if !f.ptsSeen { return gst.PadProbeOK } restored := gst.ClockTime(f.last) buf.SetPresentationTimestamp(restored) f.log.Debugw("restored missing pts from previous buffer", "pts", restored) return gst.PadProbeOK } f.last = uint64(pts) f.ptsSeen = true return gst.PadProbeOK } ================================================ FILE: pkg/pipeline/builder/segment.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package builder import ( "fmt" "path" "time" "github.com/go-gst/go-gst/gst" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/gstreamer" "github.com/livekit/protocol/livekit" "github.com/livekit/protocol/logger" ) type FirstSampleMetadata struct { StartDate int64 // Real time date of the first media sample } func BuildSegmentBin(pipeline *gstreamer.Pipeline, p *config.PipelineConfig) (*gstreamer.Bin, error) { b := pipeline.NewBin("segment") o := p.GetSegmentConfig() var h264ParseFixer *ptsFixer var err error if p.VideoEnabled { h264ParseFixer, err = newPTSFixer("h264parse", "segment:h264") if err != nil { return nil, err } if err = b.AddElements(h264ParseFixer.Element); err != nil { return nil, errors.ErrGstPipelineError(err) } } sink, err := gst.NewElement("splitmuxsink") if err != nil { return nil, errors.ErrGstPipelineError(err) } if err = sink.SetProperty("max-size-time", uint64(time.Duration(o.SegmentDuration)*time.Second)); err != nil { return nil, errors.ErrGstPipelineError(err) } if err = sink.SetProperty("send-keyframe-requests", true); err != nil { return nil, errors.ErrGstPipelineError(err) } if err = sink.SetProperty("muxer-factory", "mpegtsmux"); err != nil { return nil, errors.ErrGstPipelineError(err) } var startDate time.Time _, err = sink.Connect("format-location-full", func(_ *gst.Element, fragmentId uint, firstSample *gst.Sample) string { var pts time.Duration if firstSample != nil && firstSample.GetBuffer() != nil { pts = *firstSample.GetBuffer().PresentationTimestamp().AsDuration() } else { logger.Infow("nil sample passed into 'format-location-full' event handler, assuming 0 pts") } if startDate.IsZero() { now := time.Now() startDate = now.Add(-pts) mdata := FirstSampleMetadata{ StartDate: now.UnixNano(), } str := gst.MarshalStructure(mdata) msg := gst.NewElementMessage(sink, str) sink.GetBus().Post(msg) } var segmentName string switch o.SegmentSuffix { case livekit.SegmentedFileSuffix_TIMESTAMP: ts := startDate.Add(pts) segmentName = fmt.Sprintf("%s_%s%03d.ts", o.SegmentPrefix, ts.Format("20060102150405"), ts.UnixMilli()%1000) default: segmentName = fmt.Sprintf("%s_%05d.ts", o.SegmentPrefix, fragmentId) } return path.Join(o.LocalDir, segmentName) }) if err != nil { return nil, errors.ErrGstPipelineError(err) } if err = b.AddElements(sink); err != nil { return nil, errors.ErrGstPipelineError(err) } b.SetGetSrcPad(func(name string) *gst.Pad { if name == audioBinName { return sink.GetRequestPad("audio_%u") } else if h264ParseFixer != nil { return h264ParseFixer.GetStaticPad("sink") } // Should never happen return nil }) return b, nil } ================================================ FILE: pkg/pipeline/builder/stream.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package builder import ( "fmt" "time" "github.com/go-gst/go-gst/gst" "go.uber.org/atomic" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/gstreamer" "github.com/livekit/egress/pkg/logging" "github.com/livekit/egress/pkg/types" "github.com/livekit/protocol/logger" "github.com/livekit/protocol/utils" ) type StreamBin struct { Bin *gstreamer.Bin OutputType types.OutputType latency time.Duration } type Stream struct { Conf *config.Stream Bin *gstreamer.Bin outputType types.OutputType sink *gst.Element keyframes atomic.Uint64 reconnections atomic.Int32 disconnectedAt atomic.Time failed atomic.Bool } func BuildStreamBin(pipeline *gstreamer.Pipeline, p *config.PipelineConfig, o *config.StreamConfig) (*StreamBin, error) { b := pipeline.NewBin("stream") var mux *gst.Element var err error switch o.OutputType { case types.OutputTypeRTMP: mux, err = gst.NewElement("flvmux") if err != nil { return nil, errors.ErrGstPipelineError(err) } if err = mux.SetProperty("streamable", true); err != nil { return nil, errors.ErrGstPipelineError(err) } if err = mux.SetProperty("skip-backwards-streams", true); err != nil { return nil, errors.ErrGstPipelineError(err) } // add latency to give time for flvmux to receive and order packets from both streams if err = mux.SetProperty("latency", uint64(p.Latency.PipelineLatency)); err != nil { return nil, errors.ErrGstPipelineError(err) } b.SetGetSrcPad(func(name string) *gst.Pad { return mux.GetRequestPad(name) }) case types.OutputTypeSRT: mux, err = gst.NewElement("mpegtsmux") if err != nil { return nil, errors.ErrGstPipelineError(err) } if err = mux.SetProperty("latency", uint64(p.Latency.PipelineLatency)); err != nil { return nil, errors.ErrGstPipelineError(err) } default: err = errors.ErrInvalidInput("output type") } if err != nil { return nil, err } tee, err := gst.NewElement("tee") if err != nil { return nil, errors.ErrGstPipelineError(err) } if err = tee.SetProperty("allow-not-linked", true); err != nil { return nil, errors.ErrGstPipelineError(err) } if err = b.AddElements(mux, tee); err != nil { return nil, err } sb := &StreamBin{ Bin: b, OutputType: o.OutputType, latency: p.Latency.PipelineLatency, } return sb, nil } func (sb *StreamBin) BuildStream(stream *config.Stream, framerate int32) (*Stream, error) { stream.Name = utils.NewGuid("") b := sb.Bin.NewBin(stream.Name) queue, err := gstreamer.BuildQueue(fmt.Sprintf("queue_%s", stream.Name), sb.latency, true) if err != nil { return nil, errors.ErrGstPipelineError(err) } ss := &Stream{ Conf: stream, Bin: b, outputType: sb.OutputType, } var sink *gst.Element switch sb.OutputType { case types.OutputTypeRTMP: sink, err = gst.NewElementWithName("rtmp2sink", fmt.Sprintf("rtmp2sink_%s", stream.Name)) if err != nil { return nil, errors.ErrGstPipelineError(err) } if err = sink.Set("location", stream.ParsedUrl); err != nil { return nil, errors.ErrGstPipelineError(err) } if err = sink.SetProperty("async-connect", false); err != nil { return nil, errors.ErrGstPipelineError(err) } case types.OutputTypeSRT: sink, err = gst.NewElementWithName("srtsink", fmt.Sprintf("srtsink_%s", stream.Name)) if err != nil { return nil, errors.ErrGstPipelineError(err) } if err = sink.SetProperty("uri", stream.ParsedUrl); err != nil { return nil, errors.ErrGstPipelineError(err) } if err = sink.SetProperty("wait-for-connection", false); err != nil { return nil, errors.ErrGstPipelineError(err) } default: return nil, errors.ErrInvalidInput("output type") } // GstBaseSink properties if err = sink.SetProperty("async", false); err != nil { return nil, errors.ErrGstPipelineError(err) } if err = sink.SetProperty("sync", false); err != nil { return nil, errors.ErrGstPipelineError(err) } if err = b.AddElements(queue, sink); err != nil { return nil, err } ss.sink = sink // add a proxy pad between the queue and sink to prevent errors from propagating upstream b.SetLinkFunc(func(_ []*gst.Element) error { proxy := gst.NewGhostPad(fmt.Sprintf("proxy_%s", stream.Name), sink.GetStaticPad("sink")) proxy.Ref() proxy.ActivateMode(gst.PadModePush, true) switch sb.OutputType { case types.OutputTypeRTMP: videoFrameDuration := uint64(1000000000 / framerate) proxy.SetChainFunction(func(self *gst.Pad, _ *gst.Object, buffer *gst.Buffer) gst.FlowReturn { buffer.Ref() if uint64(buffer.Duration())-videoFrameDuration < 2 && !buffer.HasFlags(gst.BufferFlagDeltaUnit) { // non-delta video frame ss.keyframes.Inc() } links, _ := self.GetInternalLinks() switch { case len(links) != 1: return gst.FlowNotLinked case links[0].Push(buffer) == gst.FlowEOS: return gst.FlowEOS default: return gst.FlowOK } }) case types.OutputTypeSRT: proxy.SetChainListFunction(func(self *gst.Pad, _ *gst.Object, list *gst.BufferList) gst.FlowReturn { list.Ref() if ss.failed.Load() { return gst.FlowOK } links, _ := self.GetInternalLinks() if len(links) != 1 { return gst.FlowNotLinked } switch links[0].PushList(list) { case gst.FlowEOS: return gst.FlowEOS case gst.FlowError: ss.failed.Store(true) return gst.FlowOK default: return gst.FlowOK } }) } // link queue to sink if padReturn := queue.GetStaticPad("src").Link(proxy.Pad); padReturn != gst.PadLinkOK { return errors.ErrPadLinkFailed(queue.GetName(), "proxy", padReturn.String()) } return nil }) return ss, nil } func (s *Stream) Reset(streamErr error) (bool, error) { var outBytes uint64 if stats, ok := s.Stats(); ok { outBytes = stats.OutBytesAcked } if s.reconnections.Load() == 0 && outBytes == 0 { // unable to connect, probably a bad stream key or url return false, nil } if outBytes > 0 { // first disconnection s.disconnectedAt.Store(time.Now()) s.reconnections.Store(0) } else if time.Since(s.disconnectedAt.Load()) > time.Second*30 { return false, nil } s.reconnections.Inc() logger.Warnw("resetting stream", streamErr, "url", s.Conf.RedactedUrl) if err := s.Bin.SetState(gst.StateNull); err != nil { return false, err } if err := s.Bin.SetState(gst.StatePlaying); err != nil { return false, err } return true, nil } const ( outBytesTotal = "out-bytes-total" outBytesAcked = "out-bytes-acked" inBytesTotal = "in-bytes-total" inBytesAcked = "in-bytes-acked" srtBytesSent = "bytes-sent-total" ) func (s *Stream) Stats() (*logging.StreamStats, bool) { structure, err := s.sink.GetProperty("stats") if err != nil || structure == nil { return nil, false } stats := structure.(*gst.Structure).Values() if stats == nil { return nil, false } streamStats := &logging.StreamStats{ Timestamp: time.Now().Format(time.DateTime), } switch s.outputType { case types.OutputTypeRTMP: streamStats.Keyframes = s.keyframes.Load() streamStats.OutBytesTotal = tryUInt64(stats, outBytesTotal) streamStats.OutBytesAcked = tryUInt64(stats, outBytesAcked) streamStats.InBytesTotal = tryUInt64(stats, inBytesTotal) streamStats.InBytesAcked = tryUInt64(stats, inBytesAcked) case types.OutputTypeSRT: streamStats.OutBytesTotal = tryUInt64(stats, srtBytesSent) default: return nil, false } return streamStats, true } // sink stats sometimes returns strings instead of uint64 func tryUInt64(stats map[string]interface{}, key string) uint64 { switch val := stats[key].(type) { case uint64: return val default: logger.Infow(fmt.Sprintf("unexpected type for %s", key), "type", fmt.Sprintf("%T", val), "value", val, ) return 0 } } ================================================ FILE: pkg/pipeline/builder/video.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package builder import ( "fmt" "strings" "time" "github.com/go-gst/go-gst/gst" "github.com/go-gst/go-gst/gst/app" "github.com/linkdata/deadlock" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/gstreamer" "github.com/livekit/egress/pkg/types" "github.com/livekit/protocol/logger" lksdk "github.com/livekit/server-sdk-go/v2" ) const ( videoTestSrcName = "video_test_src" ) type VideoBin struct { bin *gstreamer.Bin conf *config.PipelineConfig mu deadlock.Mutex nextID int selectedPad string lastPTS uint64 pads map[string]*gst.Pad names map[string]string selector *gst.Element rawVideoTee *gst.Element } // buildVideoQueue creates a queue for the video pipeline. For live sources the // queue is leaky (drops old buffers when full) to handle real-time overrun. For // non-live replay the queue is blocking so backpressure throttles the source. func (b *VideoBin) buildVideoQueue(name string) (*gst.Element, error) { queue, err := gstreamer.BuildQueue(name, b.conf.Latency.PipelineLatency, b.conf.Live) if err != nil { return nil, errors.ErrGstPipelineError(err) } return queue, nil } func BuildVideoBin(pipeline *gstreamer.Pipeline, p *config.PipelineConfig) error { b := &VideoBin{ bin: pipeline.NewBin("video"), conf: p, } switch p.SourceType { case types.SourceTypeWeb: if err := b.buildWebInput(); err != nil { return err } case types.SourceTypeSDK: if err := b.buildSDKInput(); err != nil { return err } pipeline.AddOnTrackAdded(b.onTrackAdded) pipeline.AddOnTrackRemoved(b.onTrackRemoved) pipeline.AddOnTrackMuted(b.onTrackMuted) pipeline.AddOnTrackUnmuted(b.onTrackUnmuted) pipeline.AddOnSourceBinReset(b.onSourceBinReset) } var getPad func() *gst.Pad if len(p.GetEncodedOutputs()) > 1 { tee, err := gst.NewElementWithName("tee", "video_tee") if err != nil { return errors.ErrGstPipelineError(err) } if err = b.bin.AddElement(tee); err != nil { return err } getPad = func() *gst.Pad { return tee.GetRequestPad("src_%u") } } else if len(p.GetEncodedOutputs()) > 0 { queue, err := b.buildVideoQueue("video_queue") if err != nil { return err } if err = b.bin.AddElement(queue); err != nil { return err } getPad = func() *gst.Pad { return queue.GetStaticPad("src") } } b.bin.SetGetSinkPad(func(name string) *gst.Pad { if strings.HasPrefix(name, "image") { return b.rawVideoTee.GetRequestPad("src_%u") } else if getPad != nil { return getPad() } return nil }) return pipeline.AddSourceBin(b.bin) } func (b *VideoBin) onTrackAdded(ts *config.TrackSource) { if b.bin.GetState() > gstreamer.StateRunning { return } if ts.TrackKind == lksdk.TrackKindVideo { logger.Debugw("adding video app src bin", "trackID", ts.TrackID) if err := b.addAppSrcBin(ts); err != nil { logger.Errorw("failed to add video app src bin", err, "trackID", ts.TrackID) b.bin.OnError(err) } } } func (b *VideoBin) onTrackRemoved(trackID string) { if b.bin.GetState() > gstreamer.StateRunning { return } b.mu.Lock() name, ok := b.names[trackID] if !ok { b.mu.Unlock() return } delete(b.names, trackID) delete(b.pads, name) if b.selectedPad == name { if err := b.setSelectorPadLocked(videoTestSrcName); err != nil { b.mu.Unlock() b.bin.OnError(err) return } } b.mu.Unlock() if err := b.bin.RemoveSourceBin(name); err != nil { b.bin.OnError(err) } } func (b *VideoBin) onTrackMuted(trackID string) { if b.bin.GetState() > gstreamer.StateRunning { return } b.mu.Lock() if name, ok := b.names[trackID]; ok && b.selectedPad == name { if err := b.setSelectorPadLocked(videoTestSrcName); err != nil { b.mu.Unlock() b.bin.OnError(err) return } } b.mu.Unlock() } func (b *VideoBin) onTrackUnmuted(trackID string) { if b.bin.GetState() > gstreamer.StateRunning { return } b.mu.Lock() if name, ok := b.names[trackID]; ok { if err := b.setSelectorPadLocked(name); err != nil { b.mu.Unlock() b.bin.OnError(err) return } } b.mu.Unlock() } func (b *VideoBin) onSourceBinReset(ts *config.TrackSource) error { if ts.TrackKind != lksdk.TrackKindVideo { return nil } return b.resetVideoAppSrcBin(ts) } func (b *VideoBin) resetVideoAppSrcBin(ts *config.TrackSource) error { b.mu.Lock() defer b.mu.Unlock() oldName, ok := b.names[ts.TrackID] if !ok { return errors.New("track already removed, cannot reset video source bin") } if b.bin.GetState() > gstreamer.StateRunning { return errors.New("pipeline stopping, cannot reset video source bin") } // If the stuck bin is the currently selected pad, switch to test src first if b.conf.VideoDecoding && b.selectedPad == oldName { if err := b.setSelectorPadLocked(videoTestSrcName); err != nil { return err } } // Clean up old pad reference before force-remove delete(b.pads, oldName) // Force-remove old bin (blocks on GLib main loop, safe to hold b.mu since // ForceRemoveSourceBin only acquires gstreamer.Bin's internal mutex) if err := b.bin.ForceRemoveSourceBin(oldName); err != nil { return fmt.Errorf("failed to force remove video source bin: %w", err) } // Create new appsrc element (reuse the same element name so watch.go works) newElement, err := gst.NewElementWithName("appsrc", fmt.Sprintf("app_%s", ts.TrackID)) if err != nil { return errors.ErrGstPipelineError(err) } ts.AppSrc = app.SrcFromElement(newElement) name := fmt.Sprintf("%s_%d", ts.TrackID, b.nextID) b.nextID++ appSrcBin, err := b.buildAppSrcBin(ts, name) if err != nil { return fmt.Errorf("failed to build new video source bin: %w", err) } if b.conf.VideoDecoding { b.createSrcPadLocked(ts.TrackID, name) } if err = b.bin.AddSourceBin(appSrcBin); err != nil { return fmt.Errorf("failed to add new video source bin: %w", err) } if b.conf.VideoDecoding { if err := b.setSelectorPadLocked(name); err != nil { return err } } logger.Infow("video source bin reset complete", "trackID", ts.TrackID, "newBin", name) return nil } func (b *VideoBin) buildWebInput() error { xImageSrc, err := gst.NewElement("ximagesrc") if err != nil { return errors.ErrGstPipelineError(err) } if err = xImageSrc.SetProperty("display-name", b.conf.Display); err != nil { return errors.ErrGstPipelineError(err) } if err = xImageSrc.SetProperty("use-damage", false); err != nil { return errors.ErrGstPipelineError(err) } if err = xImageSrc.SetProperty("show-pointer", false); err != nil { return errors.ErrGstPipelineError(err) } videoQueue, err := b.buildVideoQueue("video_input_queue") if err != nil { return err } videoConvert, err := gst.NewElement("videoconvert") if err != nil { return errors.ErrGstPipelineError(err) } videoRate, err := gst.NewElement("videorate") if err != nil { return errors.ErrGstPipelineError(err) } if err = videoRate.SetProperty("skip-to-first", true); err != nil { return errors.ErrGstPipelineError(err) } caps, err := gst.NewElement("capsfilter") if err != nil { return errors.ErrGstPipelineError(err) } if err = caps.SetProperty("caps", gst.NewCapsFromString(fmt.Sprintf( "video/x-raw,framerate=%d/1", b.conf.Framerate, ), )); err != nil { return errors.ErrGstPipelineError(err) } if err = b.bin.AddElements(xImageSrc, videoQueue, videoConvert, videoRate, caps); err != nil { return err } return b.addDecodedVideoSink() } func (b *VideoBin) buildSDKInput() error { b.pads = make(map[string]*gst.Pad) b.names = make(map[string]string) // add selector first so pads can be created if b.conf.VideoDecoding { if err := b.addSelector(); err != nil { return err } } if b.conf.VideoTrack != nil { if err := b.addAppSrcBin(b.conf.VideoTrack); err != nil { return err } } if b.conf.VideoDecoding { b.bin.SetGetSrcPad(b.getSrcPad) if err := b.addVideoTestSrcBin(); err != nil { return err } if b.conf.VideoTrack == nil { if err := b.setSelectorPad(videoTestSrcName); err != nil { return err } } if err := b.addDecodedVideoSink(); err != nil { return err } } return nil } func (b *VideoBin) addAppSrcBin(ts *config.TrackSource) error { name := fmt.Sprintf("%s_%d", ts.TrackID, b.nextID) b.nextID++ appSrcBin, err := b.buildAppSrcBin(ts, name) if err != nil { return err } if b.conf.VideoDecoding { b.createSrcPad(ts.TrackID, name) } if err = b.bin.AddSourceBin(appSrcBin); err != nil { return err } if b.conf.VideoDecoding { return b.setSelectorPad(name) } return nil } func (b *VideoBin) buildAppSrcBin(ts *config.TrackSource, name string) (*gstreamer.Bin, error) { appSrcBin := b.bin.NewBin(name) appSrcBin.SetEOSFunc(func() bool { return false }) ts.AppSrc.SetArg("format", "time") if err := ts.AppSrc.SetProperty("is-live", b.conf.Live); err != nil { return nil, errors.ErrGstPipelineError(err) } if !b.conf.Live { if err := ts.AppSrc.SetProperty("block", true); err != nil { return nil, errors.ErrGstPipelineError(err) } } if err := appSrcBin.AddElement(ts.AppSrc.Element); err != nil { return nil, err } switch ts.MimeType { case types.MimeTypeH264: if err := ts.AppSrc.SetProperty("caps", gst.NewCapsFromString(fmt.Sprintf( "application/x-rtp,media=video,payload=%d,encoding-name=H264,clock-rate=%d", ts.PayloadType, ts.ClockRate, ))); err != nil { return nil, errors.ErrGstPipelineError(err) } rtpH264Depay, err := gst.NewElement("rtph264depay") if err != nil { return nil, errors.ErrGstPipelineError(err) } caps, err := gst.NewElement("capsfilter") if err != nil { return nil, errors.ErrGstPipelineError(err) } if err = caps.SetProperty("caps", gst.NewCapsFromString( "video/x-h264,stream-format=byte-stream", )); err != nil { return nil, errors.ErrGstPipelineError(err) } if err = appSrcBin.AddElements(rtpH264Depay, caps); err != nil { return nil, err } if !b.conf.VideoDecoding { h264ParseFixer, err := newPTSFixer("h264parse", fmt.Sprintf("track:%s", ts.TrackID)) if err != nil { return nil, err } if err = appSrcBin.AddElement(h264ParseFixer.Element); err != nil { return nil, err } return appSrcBin, nil } avDecH264, err := gst.NewElement("avdec_h264") if err != nil { return nil, errors.ErrGstPipelineError(err) } if err = appSrcBin.AddElement(avDecH264); err != nil { return nil, err } case types.MimeTypeVP8: if err := ts.AppSrc.SetProperty("caps", gst.NewCapsFromString(fmt.Sprintf( "application/x-rtp,media=video,payload=%d,encoding-name=VP8,clock-rate=%d", ts.PayloadType, ts.ClockRate, ))); err != nil { return nil, errors.ErrGstPipelineError(err) } rtpVP8Depay, err := gst.NewElement("rtpvp8depay") if err != nil { return nil, errors.ErrGstPipelineError(err) } if err = appSrcBin.AddElement(rtpVP8Depay); err != nil { return nil, err } if !b.conf.VideoDecoding { return appSrcBin, nil } vp8Dec, err := gst.NewElement("vp8dec") if err != nil { return nil, errors.ErrGstPipelineError(err) } if err = appSrcBin.AddElement(vp8Dec); err != nil { return nil, err } case types.MimeTypeVP9: if err := ts.AppSrc.SetProperty("caps", gst.NewCapsFromString(fmt.Sprintf( "application/x-rtp,media=video,payload=%d,encoding-name=VP9,clock-rate=%d", ts.PayloadType, ts.ClockRate, ))); err != nil { return nil, errors.ErrGstPipelineError(err) } rtpVP9Depay, err := gst.NewElement("rtpvp9depay") if err != nil { return nil, errors.ErrGstPipelineError(err) } if err = appSrcBin.AddElement(rtpVP9Depay); err != nil { return nil, err } if !b.conf.VideoDecoding { vp9ParseFixer, err := newPTSFixer("vp9parse", fmt.Sprintf("track:%s", ts.TrackID)) if err != nil { return nil, err } vp9Parse := vp9ParseFixer.Element vp9Caps, err := gst.NewElement("capsfilter") if err != nil { return nil, errors.ErrGstPipelineError(err) } if err = vp9Caps.SetProperty("caps", gst.NewCapsFromString( "video/x-vp9,width=[16,2147483647],height=[16,2147483647]", )); err != nil { return nil, errors.ErrGstPipelineError(err) } if err = appSrcBin.AddElements(vp9Parse, vp9Caps); err != nil { return nil, err } return appSrcBin, nil } vp9Dec, err := gst.NewElement("vp9dec") if err != nil { return nil, errors.ErrGstPipelineError(err) } if err = appSrcBin.AddElement(vp9Dec); err != nil { return nil, err } default: return nil, errors.ErrNotSupported(string(ts.MimeType)) } if err := b.addVideoConverter(appSrcBin); err != nil { return nil, err } return appSrcBin, nil } func (b *VideoBin) addVideoTestSrcBin() error { testSrcBin := b.bin.NewBin(videoTestSrcName) if err := b.bin.AddSourceBin(testSrcBin); err != nil { return err } videoTestSrc, err := gst.NewElement("videotestsrc") if err != nil { return errors.ErrGstPipelineError(err) } if err = videoTestSrc.SetProperty("is-live", true); err != nil { return errors.ErrGstPipelineError(err) } videoTestSrc.SetArg("pattern", "black") queue, err := gstreamer.BuildQueue("video_test_src_queue", b.conf.Latency.PipelineLatency, false) if err != nil { return err } if err = queue.SetProperty("min-threshold-time", uint64(2e9)); err != nil { return errors.ErrGstPipelineError(err) } caps, err := b.newVideoCapsFilter(true) if err != nil { return errors.ErrGstPipelineError(err) } if err = testSrcBin.AddElements(videoTestSrc, queue, caps); err != nil { return err } b.createTestSrcPad() return nil } func (b *VideoBin) addSelector() error { inputSelector, err := gst.NewElement("input-selector") if err != nil { return errors.ErrGstPipelineError(err) } videoRate, err := gst.NewElement("videorate") if err != nil { return errors.ErrGstPipelineError(err) } if err = videoRate.SetProperty("skip-to-first", true); err != nil { return errors.ErrGstPipelineError(err) } caps, err := b.newVideoCapsFilter(true) if err != nil { return errors.ErrGstPipelineError(err) } if err = b.bin.AddElements(inputSelector, videoRate, caps); err != nil { return err } b.selector = inputSelector return nil } func (b *VideoBin) addEncoder() error { videoQueue, err := gstreamer.BuildQueue("video_encoder_queue", b.conf.Latency.PipelineLatency, false) if err != nil { return errors.ErrGstPipelineError(err) } if err = b.bin.AddElement(videoQueue); err != nil { return err } switch b.conf.VideoOutCodec { // we only encode h264, the rest are too slow case types.MimeTypeH264: x264Enc, err := gst.NewElement("x264enc") if err != nil { return errors.ErrGstPipelineError(err) } x264Enc.SetArg("speed-preset", "veryfast") var options []string disabledSceneCut := false // Streaming outputs always set KeyFrameInterval, so this effectively disables scenecut for RTMP/SRT. if b.conf.KeyFrameInterval != 0 { keyframeInterval := uint(b.conf.KeyFrameInterval * float64(b.conf.Framerate)) if err = x264Enc.SetProperty("key-int-max", keyframeInterval); err != nil { return errors.ErrGstPipelineError(err) } options = append(options, "scenecut=0") disabledSceneCut = true } bufCapacity := uint(2000) // 2s if b.conf.GetSegmentConfig() != nil { // avoid key frames other than at segments boundaries as splitmuxsink can become inconsistent otherwise if !disabledSceneCut { options = append(options, "scenecut=0") disabledSceneCut = true } bufCapacity = uint(time.Duration(b.conf.GetSegmentConfig().SegmentDuration) * (time.Second / time.Millisecond)) } if bufCapacity > 10000 { // Max value allowed by gstreamer bufCapacity = 10000 } if err = x264Enc.SetProperty("vbv-buf-capacity", bufCapacity); err != nil { return errors.ErrGstPipelineError(err) } if err = x264Enc.SetProperty("bitrate", uint(b.conf.VideoBitrate)); err != nil { return errors.ErrGstPipelineError(err) } if sc := b.conf.GetStreamConfig(); sc != nil && sc.OutputType == types.OutputTypeRTMP { options = append(options, "nal-hrd=cbr") } if len(options) > 0 { optionString := strings.Join(options, ":") if err = x264Enc.SetProperty("option-string", optionString); err != nil { return errors.ErrGstPipelineError(err) } } caps, err := gst.NewElement("capsfilter") if err != nil { return errors.ErrGstPipelineError(err) } if err = caps.SetProperty("caps", gst.NewCapsFromString(fmt.Sprintf( "video/x-h264,profile=%s,multiview-mode=mono,multiview-flags=(GstVideoMultiviewFlagsSet)0:ffffffff:/right-view-first/left-flipped/left-flopped/right-flipped/right-flopped/half-aspect/mixed-mono", b.conf.VideoProfile, ))); err != nil { return errors.ErrGstPipelineError(err) } if err = b.bin.AddElements(x264Enc, caps); err != nil { return err } return nil case types.MimeTypeVP9: vp9Enc, err := gst.NewElement("vp9enc") if err != nil { return errors.ErrGstPipelineError(err) } if err = vp9Enc.SetProperty("deadline", int64(1)); err != nil { return errors.ErrGstPipelineError(err) } if err = vp9Enc.SetProperty("row-mt", true); err != nil { return errors.ErrGstPipelineError(err) } if err = vp9Enc.SetProperty("tile-columns", 3); err != nil { return errors.ErrGstPipelineError(err) } if err = vp9Enc.SetProperty("tile-rows", 1); err != nil { return errors.ErrGstPipelineError(err) } if err = vp9Enc.SetProperty("frame-parallel", true); err != nil { return errors.ErrGstPipelineError(err) } if err = vp9Enc.SetProperty("max-quantizer", 52); err != nil { return errors.ErrGstPipelineError(err) } if err = vp9Enc.SetProperty("min-quantizer", 2); err != nil { return errors.ErrGstPipelineError(err) } if err = b.bin.AddElement(vp9Enc); err != nil { return err } fallthrough default: return errors.ErrNotSupported(fmt.Sprintf("%s encoding", b.conf.VideoOutCodec)) } } func (b *VideoBin) addDecodedVideoSink() error { var err error b.rawVideoTee, err = gst.NewElement("tee") if err != nil { return errors.ErrGstPipelineError(err) } if err = b.bin.AddElement(b.rawVideoTee); err != nil { return err } if b.conf.VideoEncoding { err = b.addEncoder() if err != nil { return err } } return nil } func (b *VideoBin) addVideoConverter(bin *gstreamer.Bin) error { videoQueue, err := b.buildVideoQueue("video_input_queue") if err != nil { return err } videoConvert, err := gst.NewElement("videoconvert") if err != nil { return errors.ErrGstPipelineError(err) } videoScale, err := gst.NewElement("videoscale") if err != nil { return errors.ErrGstPipelineError(err) } elements := []*gst.Element{videoQueue, videoConvert, videoScale} if !b.conf.VideoDecoding { videoRate, err := gst.NewElement("videorate") if err != nil { return errors.ErrGstPipelineError(err) } if err = videoRate.SetProperty("skip-to-first", true); err != nil { return errors.ErrGstPipelineError(err) } elements = append(elements, videoRate) } caps, err := b.newVideoCapsFilter(!b.conf.VideoDecoding) if err != nil { return errors.ErrGstPipelineError(err) } elements = append(elements, caps) return bin.AddElements(elements...) } func (b *VideoBin) newVideoCapsFilter(includeFramerate bool) (*gst.Element, error) { caps, err := gst.NewElement("capsfilter") if err != nil { return nil, errors.ErrGstPipelineError(err) } if includeFramerate { err = caps.SetProperty("caps", gst.NewCapsFromString(fmt.Sprintf( "video/x-raw,framerate=%d/1,format=I420,width=%d,height=%d,colorimetry=bt709,chroma-site=mpeg2,pixel-aspect-ratio=1/1", b.conf.Framerate, b.conf.Width, b.conf.Height, ))) } else { err = caps.SetProperty("caps", gst.NewCapsFromString(fmt.Sprintf( "video/x-raw,format=I420,width=%d,height=%d,colorimetry=bt709,chroma-site=mpeg2,pixel-aspect-ratio=1/1", b.conf.Width, b.conf.Height, ))) } if err != nil { return nil, errors.ErrGstPipelineError(err) } return caps, nil } func (b *VideoBin) getSrcPad(name string) *gst.Pad { b.mu.Lock() defer b.mu.Unlock() return b.pads[name] } func (b *VideoBin) createSrcPad(trackID, name string) { b.mu.Lock() defer b.mu.Unlock() b.createSrcPadLocked(trackID, name) } func (b *VideoBin) createSrcPadLocked(trackID, name string) { b.names[trackID] = name pad := b.selector.GetRequestPad("sink_%u") pad.AddProbe(gst.PadProbeTypeBuffer, func(_ *gst.Pad, info *gst.PadProbeInfo) gst.PadProbeReturn { pts := uint64(info.GetBuffer().PresentationTimestamp()) b.mu.Lock() if pts < b.lastPTS || (b.selectedPad != videoTestSrcName && b.selectedPad != name) { b.mu.Unlock() return gst.PadProbeDrop } b.lastPTS = pts b.mu.Unlock() return gst.PadProbeOK }) b.pads[name] = pad } func (b *VideoBin) createTestSrcPad() { b.mu.Lock() defer b.mu.Unlock() pad := b.selector.GetRequestPad("sink_%u") pad.AddProbe(gst.PadProbeTypeBuffer, func(_ *gst.Pad, info *gst.PadProbeInfo) gst.PadProbeReturn { pts := uint64(info.GetBuffer().PresentationTimestamp()) b.mu.Lock() if pts < b.lastPTS || (b.selectedPad != videoTestSrcName) { b.mu.Unlock() return gst.PadProbeDrop } b.lastPTS = pts b.mu.Unlock() return gst.PadProbeOK }) b.pads[videoTestSrcName] = pad } func (b *VideoBin) setSelectorPad(name string) error { b.mu.Lock() defer b.mu.Unlock() return b.setSelectorPadLocked(name) } func (b *VideoBin) setSelectorPadLocked(name string) error { pad := b.pads[name] // drop until the next keyframe pad.AddProbe(gst.PadProbeTypeBuffer, func(_ *gst.Pad, info *gst.PadProbeInfo) gst.PadProbeReturn { buffer := info.GetBuffer() if buffer.HasFlags(gst.BufferFlagDeltaUnit) { return gst.PadProbeDrop } logger.Debugw("active pad changed", "name", name) return gst.PadProbeRemove }) if err := b.selector.SetProperty("active-pad", pad); err != nil { return errors.ErrGstPipelineError(err) } b.selectedPad = name return nil } ================================================ FILE: pkg/pipeline/builder/vp9_probe.go ================================================ package builder import ( "fmt" "sync/atomic" "time" "github.com/go-gst/go-gst/gst" "github.com/linkdata/deadlock" "github.com/livekit/egress/pkg/errors" "github.com/livekit/protocol/logger" ) const ( keyframeHistorySize = 10 keyframeRequestInterval = 200 * time.Millisecond ) // vp9ParseProbe inspects buffers around vp9parse to detect and signal missing // PTS and capture timing diagnostics. It never mutates the media flow; state // such as lastSinkPTS is tracked solely for logging and debugging. type vp9ParseProbe struct { trackID string srcPad *gst.Pad sinkPad *gst.Pad srcProbeID uint64 sinkProbeID uint64 onSignal func() logger logger.Logger lastSrcPTS atomic.Uint64 lastSrcValid atomic.Bool missingPTS atomic.Bool lastSinkPTS atomic.Uint64 lastSinkValid atomic.Bool keyframeMu deadlock.Mutex keyframePTS []time.Duration totalIntervalSum time.Duration totalIntervals int lastKeyframeRequestNS atomic.Int64 keyframePending atomic.Bool } func newVP9ParseProbe(trackID string, parse *gst.Element, onSignal func()) (*vp9ParseProbe, error) { srcPad := parse.GetStaticPad("src") if srcPad == nil { return nil, errors.ErrGstPipelineError(newMissingPadError("vp9parse", "src")) } sinkPad := parse.GetStaticPad("sink") if sinkPad == nil { srcPad.Unref() return nil, errors.ErrGstPipelineError(newMissingPadError("vp9parse", "sink")) } p := &vp9ParseProbe{ trackID: trackID, srcPad: srcPad, sinkPad: sinkPad, onSignal: onSignal, logger: logger.GetLogger().WithValues("trackID", trackID, "component", "vp9_probe"), keyframePTS: make([]time.Duration, 0, keyframeHistorySize), } p.srcProbeID = srcPad.AddProbe(gst.PadProbeTypeBuffer, p.onSrcBuffer) p.sinkProbeID = sinkPad.AddProbe(gst.PadProbeTypeBuffer, p.onSinkBuffer) return p, nil } func (p *vp9ParseProbe) Close() { p.logKeyframeHistory("probe_closed") if p.srcPad != nil { p.srcPad.RemoveProbe(p.srcProbeID) p.srcPad.Unref() p.srcPad = nil } if p.sinkPad != nil { p.sinkPad.RemoveProbe(p.sinkProbeID) p.sinkPad.Unref() p.sinkPad = nil } } func (p *vp9ParseProbe) onSrcBuffer(_ *gst.Pad, info *gst.PadProbeInfo) gst.PadProbeReturn { buffer := info.GetBuffer() if buffer == nil { return gst.PadProbeOK } pts, ok := clockTimeToDuration(buffer.PresentationTimestamp()) if !ok { p.handleMissingPTS() return gst.PadProbeDrop } p.handleValidPTS(buffer, pts) if p.keyframePending.Load() { return gst.PadProbeDrop } return gst.PadProbeOK } // just for logging purposes func (p *vp9ParseProbe) onSinkBuffer(_ *gst.Pad, info *gst.PadProbeInfo) gst.PadProbeReturn { buffer := info.GetBuffer() if buffer == nil { return gst.PadProbeOK } pts, ok := clockTimeToDuration(buffer.PresentationTimestamp()) if !ok { return gst.PadProbeOK } if !p.lastSinkValid.Load() { p.lastSinkPTS.Store(uint64(pts)) p.lastSinkValid.Store(true) return gst.PadProbeOK } prev := time.Duration(p.lastSinkPTS.Load()) delta := pts - prev if delta < 0 { p.logger.Warnw("vp9parse sink pts moved backwards", nil, "delta", delta) p.logKeyframeHistory("backward_pts") } p.lastSinkPTS.Store(uint64(pts)) return gst.PadProbeOK } func (p *vp9ParseProbe) handleMissingPTS() { p.keyframePending.Store(true) p.requestKeyframeIfDue() if !p.missingPTS.CompareAndSwap(false, true) { return } fields := []any{} if p.lastSrcValid.Load() { last := time.Duration(p.lastSrcPTS.Load()) fields = append(fields, "lastValidPTS", last) } if avg, count, ok := p.keyframeStats(); ok { fields = append(fields, "avgKeyframeInterval", avg, "keyframesTracked", count) } p.logger.Warnw("vp9parse buffer missing PTS", nil, fields...) p.logKeyframeHistory("missing_pts") } func (p *vp9ParseProbe) handleValidPTS(buffer *gst.Buffer, pts time.Duration) { p.lastSrcPTS.Store(uint64(pts)) p.lastSrcValid.Store(true) p.missingPTS.Store(false) if buffer.GetFlags()&gst.BufferFlagDeltaUnit == 0 { wasPending := p.keyframePending.Swap(false) if wasPending { p.logger.Debugw("keyframe pending, got one") buffer.SetFlags(buffer.GetFlags() | gst.BufferFlagDiscont) } p.trackKeyframe(pts) } else { p.requestKeyframeIfDue() } } func (p *vp9ParseProbe) trackKeyframe(pts time.Duration) { p.keyframeMu.Lock() defer p.keyframeMu.Unlock() if count := len(p.keyframePTS); count > 0 { delta := pts - p.keyframePTS[count-1] if delta > 0 { p.totalIntervalSum += delta p.totalIntervals++ } } p.keyframePTS = append(p.keyframePTS, pts) if len(p.keyframePTS) > keyframeHistorySize { p.keyframePTS = p.keyframePTS[1:] // sliding window only keeps the most recent timestamps for debugging logs } } func (p *vp9ParseProbe) requestKeyframeIfDue() { if p.onSignal == nil { return } if !p.keyframePending.Load() { return } now := time.Now().UnixNano() last := p.lastKeyframeRequestNS.Load() if last != 0 && time.Duration(now-last) < keyframeRequestInterval { return } p.onSignal() p.lastKeyframeRequestNS.Store(now) } func clockTimeToDuration(ct gst.ClockTime) (time.Duration, bool) { if ct == gst.ClockTimeNone { return 0, false } return time.Duration(uint64(ct)), true } func (p *vp9ParseProbe) keyframeStats() (time.Duration, int, bool) { p.keyframeMu.Lock() defer p.keyframeMu.Unlock() if p.totalIntervals == 0 { return 0, len(p.keyframePTS), false } avg := p.totalIntervalSum / time.Duration(p.totalIntervals) return avg, p.totalIntervals + 1, true } func (p *vp9ParseProbe) logKeyframeHistory(reason string) { p.keyframeMu.Lock() if len(p.keyframePTS) == 0 { p.keyframeMu.Unlock() return } history := make([]time.Duration, len(p.keyframePTS)) copy(history, p.keyframePTS) avg := time.Duration(0) count := 0 if p.totalIntervals > 0 { avg = p.totalIntervalSum / time.Duration(p.totalIntervals) count = p.totalIntervals + 1 } p.keyframeMu.Unlock() p.logger.Debugw("vp9 keyframe history", "reason", reason, "history", history, "avgKeyframeInterval", avg, "keyframesTracked", count) } type missingPadError struct { element string pad string } func newMissingPadError(element, pad string) error { return missingPadError{element: element, pad: pad} } func (e missingPadError) Error() string { return fmt.Sprintf("missing %s pad on %s", e.pad, e.element) } ================================================ FILE: pkg/pipeline/builder/websocket.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package builder import ( "github.com/go-gst/go-gst/gst" "github.com/go-gst/go-gst/gst/app" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/gstreamer" ) func BuildWebsocketBin(pipeline *gstreamer.Pipeline, appSinkCallbacks *app.SinkCallbacks) (*gstreamer.Bin, error) { b := pipeline.NewBin("websocket") appSink, err := app.NewAppSink() if err != nil { return nil, errors.ErrGstPipelineError(err) } appSink.SetCallbacks(appSinkCallbacks) if err = b.AddElement(appSink.Element); err != nil { return nil, err } b.SetGetSrcPad(func(_ string) *gst.Pad { return appSink.GetStaticPad("sink") }) return b, nil } ================================================ FILE: pkg/pipeline/controller.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pipeline import ( "context" "fmt" "os" "path" "path/filepath" "sort" "sync" "time" "github.com/frostbyte73/core" "github.com/go-gst/go-gst/gst" "github.com/linkdata/deadlock" "go.uber.org/atomic" "go.uber.org/zap" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/gstreamer" "github.com/livekit/egress/pkg/ipc" "github.com/livekit/egress/pkg/pipeline/builder" "github.com/livekit/egress/pkg/pipeline/sink" "github.com/livekit/egress/pkg/pipeline/source" "github.com/livekit/egress/pkg/stats" "github.com/livekit/egress/pkg/types" "github.com/livekit/protocol/livekit" "github.com/livekit/protocol/logger" "github.com/livekit/psrpc" "go.opentelemetry.io/otel" ) const ( pipelineName = "pipeline" eosTimeout = time.Second * 30 streamRetryUpdateInterval = time.Minute ) type Controller struct { *config.PipelineConfig ipcServiceClient ipc.EgressServiceClient // gstreamer gstLogger *zap.SugaredLogger src source.Source callbacks *gstreamer.Callbacks p *gstreamer.Pipeline sinks map[types.EgressType][]sink.Sink // replay timing replayStartAt int64 // wallclock unix nanos replayDuration int64 // milliseconds // internal mu deadlock.Mutex monitor *stats.HandlerMonitor limitTimer *time.Timer storageMonitorCancel context.CancelFunc paused core.Fuse playing core.Fuse eosSent core.Fuse eosTimer *time.Timer eosReceived core.Fuse stopped core.Fuse storageLimitOnce sync.Once pipelineEndedAt int64 stats controllerStats pipelineCreatedAt time.Time } type controllerStats struct { mixerDroppedAudioBuffers atomic.Uint64 droppedVideoBuffers atomic.Uint64 mixerDroppedAudioDuration atomic.Duration queuesDroppedAudioBuffers atomic.Uint64 droppedAudioBuffersByQueue map[string]uint64 droppedVideoBuffersByQueue map[string]uint64 } // SourceBuilder constructs a pipeline source. It receives the controller's // callbacks so the source can synchronize on GstReady; custom sources that // don't need gst synchronization can ignore the argument. type SourceBuilder func(callbacks *gstreamer.Callbacks) (source.Source, error) var ( tracer = otel.Tracer("github.com/livekit/egress/pkg/pipeline") ) func New(ctx context.Context, conf *config.PipelineConfig, ipcServiceClient ipc.EgressServiceClient) (*Controller, error) { ctx, span := tracer.Start(ctx, "Pipeline.New") defer span.End() return NewWithSource(ctx, conf, ipcServiceClient, func(callbacks *gstreamer.Callbacks) (source.Source, error) { return source.New(ctx, conf, callbacks) }) } // NewWithSource creates a Controller using the given SourceBuilder. The builder // runs after the controller has been constructed and receives the controller's // Callbacks, so the source can share GstReady with the pipeline. Use this when // the source isn't the standard source.New (testfeeder, replay export, etc.). func NewWithSource( ctx context.Context, conf *config.PipelineConfig, ipcServiceClient ipc.EgressServiceClient, srcBuilder SourceBuilder, ) (*Controller, error) { c := newController(conf, ipcServiceClient) // initialize gst go func() { _, span := tracer.Start(ctx, "gst.Init") defer span.End() gst.Init(nil) gst.SetLogFunction(c.gstLog) close(c.callbacks.GstReady) }() src, err := srcBuilder(c.callbacks) if err != nil { return nil, err } c.src = src // create pipeline <-c.callbacks.GstReady if err := c.BuildPipeline(); err != nil { c.src.Close() return nil, err } return c, nil } // Callbacks returns the pipeline callbacks. Sources that need to wait for // GstReady before creating appsrc elements can use this. func (c *Controller) Callbacks() *gstreamer.Callbacks { return c.callbacks } func newController(conf *config.PipelineConfig, ipcServiceClient ipc.EgressServiceClient) *Controller { c := &Controller{ PipelineConfig: conf, ipcServiceClient: ipcServiceClient, gstLogger: logger.GetLogger().(logger.ZapLogger).ToZap().WithOptions(zap.WithCaller(false)), callbacks: &gstreamer.Callbacks{ GstReady: make(chan struct{}), BuildReady: make(chan struct{}), }, sinks: make(map[types.EgressType][]sink.Sink), monitor: stats.NewHandlerMonitor(conf.NodeID, conf.ClusterID, conf.Info.EgressId), stats: controllerStats{ droppedVideoBuffersByQueue: make(map[string]uint64), droppedAudioBuffersByQueue: make(map[string]uint64), }, } c.callbacks.SetOnError(c.OnError) c.callbacks.SetOnEOSSent(c.onEOSSent) c.callbacks.SetOnDebugDotRequest(func(reason string) { if !c.Debug.EnableProfiling { return } logger.Debugw("debug dot requested", "reason", reason) c.generateDotFile(reason) }) return c } func (c *Controller) BuildPipeline() error { p, err := gstreamer.NewPipeline(pipelineName, c.Latency.PipelineLatency, c.callbacks) if err != nil { return errors.ErrGstPipelineError(err) } c.pipelineCreatedAt = time.Now() p.SetWatch(c.messageWatch) p.AddOnStop(func() error { c.stopped.Break() return nil }) if sdkSrc, ok := c.src.(*source.SDKSource); ok { p.SetEOSFunc(func() bool { sdkSrc.CloseWriters() return true }) } if c.AudioEnabled { if err = builder.BuildAudioBin(p, c.PipelineConfig); err != nil { return err } } if c.VideoEnabled { if err = builder.BuildVideoBin(p, c.PipelineConfig); err != nil { return err } } for egressType, outputs := range c.Outputs { for _, o := range outputs { s, err := sink.NewSink(p, c.PipelineConfig, egressType, o, c.callbacks, c.monitor) if err != nil { return err } c.sinks[egressType] = append(c.sinks[egressType], s) } } if err = p.Link(); err != nil { return err } // initial graph is fully wired; from now on, dynamic additions must be linked immediately p.UpgradeState(gstreamer.StateStarted) c.p = p if timeAware, ok := c.src.(source.TimeAware); ok { timeAware.SetTimeProvider(p) } close(c.callbacks.BuildReady) return nil } func (c *Controller) SetReplayTiming(startAt, durationMs int64) { c.replayStartAt = startAt c.replayDuration = durationMs } func (c *Controller) Run(ctx context.Context) *livekit.EgressInfo { ctx, span := tracer.Start(ctx, "Pipeline.Run") defer span.End() defer c.Close() defer func() { if c.VideoEnabled { logger.Infow( "video input queue stats", "videoBuffersDropped", c.stats.droppedVideoBuffers.Load(), "requestType", c.RequestType, "sourceType", c.SourceType, "droppedByQueue", c.stats.droppedVideoBuffersByQueue, ) } if c.SourceType == types.SourceTypeSDK { logger.Infow( "audio qos stats", "audioBuffersDropped", c.stats.mixerDroppedAudioBuffers.Load(), "totalAudioDurationDropped", c.stats.mixerDroppedAudioDuration.Load(), "queueDroppedAudioBuffers", c.stats.queuesDroppedAudioBuffers.Load(), "droppedByQueue", c.stats.droppedAudioBuffersByQueue, "requestType", c.RequestType, ) } }() // session limit timer c.startSessionLimitTimer(ctx) // close when room ends go func() { <-c.src.EndRecording() c.SendEOS(ctx, livekit.EndReasonSrcClosed) }() // wait until room is ready start := c.src.StartRecording() if start != nil { logger.Debugw("waiting for start signal") select { case <-c.stopped.Watch(): c.src.Close() c.Info.SetAborted(livekit.MsgStartNotReceived) return c.Info case <-start: // continue } } // Replay timing gate: wait until start_at if c.replayStartAt > 0 { waitDuration := time.Until(time.Unix(0, c.replayStartAt)) if waitDuration > 0 { logger.Debugw("waiting for replay start time", "waitDuration", waitDuration) select { case <-c.stopped.Watch(): c.src.Close() c.Info.SetAborted(livekit.MsgStartNotReceived) return c.Info case <-time.After(waitDuration): // continue } } } for _, si := range c.sinks { for _, s := range si { if err := s.Start(); err != nil { c.src.Close() c.Info.SetFailed(err) return c.Info } } } c.startOutputSizeMonitor() // Replay duration timer if c.replayDuration > 0 { time.AfterFunc(time.Duration(c.replayDuration)*time.Millisecond, func() { c.SendEOS(ctx, livekit.EndReasonSrcClosed) }) } err := c.p.Run() if err != nil { c.src.Close() c.Info.SetFailed(err) return c.Info } logger.Debugw("closing source") c.src.Close() if c.playing.IsBroken() { logger.Debugw("closing sinks") for _, si := range c.sinks { for _, s := range si { if c.eosReceived.IsBroken() || s.EOSReceived() { if err := s.Close(); err != nil && c.Info.Status != livekit.EgressStatus_EGRESS_FAILED { c.Info.SetFailed(err) } } } } } return c.Info } func (c *Controller) UpdateStream(ctx context.Context, req *livekit.UpdateStreamRequest) error { ctx, span := tracer.Start(ctx, "Pipeline.UpdateStream") defer span.End() o := c.GetStreamConfig() if o == nil { return errors.ErrNonStreamingPipeline } errs := errors.ErrArray{} // add stream outputs first for _, rawUrl := range req.AddOutputUrls { // validate and redact url stream, err := o.AddStream(rawUrl, o.OutputType) if err != nil { errs.AppendErr(err) continue } // add stream info to results c.mu.Lock() c.Info.StreamResults = append(c.Info.StreamResults, stream.StreamInfo) if list := c.Info.GetStream(); list != nil { //nolint:staticcheck // keep deprecated field for older clients list.Info = append(list.Info, stream.StreamInfo) } c.mu.Unlock() // add stream if err = c.getStreamSink().AddStream(stream); err != nil { stream.StreamInfo.Status = livekit.StreamInfo_FAILED stream.StreamInfo.Error = err.Error() stream.UpdateEndTime(time.Now().UnixNano()) errs.AppendErr(err) continue } c.OutputCount.Inc() } // remove stream outputs for _, rawUrl := range req.RemoveOutputUrls { stream, err := o.GetStream(rawUrl) if err != nil { errs.AppendErr(err) continue } if err = c.streamFinished(ctx, stream); err != nil { errs.AppendErr(err) } } c.streamUpdated(ctx) return errs.ToError() } func (c *Controller) UpdateEgress(ctx context.Context, req *livekit.UpdateEgressRequest) error { ctx, span := tracer.Start(ctx, "Pipeline.UpdateEgress") defer span.End() errs := errors.ErrArray{} // update stream targets if len(req.AddStreamUrls) > 0 || len(req.RemoveStreamUrls) > 0 { streamReq := &livekit.UpdateStreamRequest{ EgressId: req.EgressId, AddOutputUrls: req.AddStreamUrls, RemoveOutputUrls: req.RemoveStreamUrls, } if err := c.UpdateStream(ctx, streamReq); err != nil { errs.AppendErr(err) } } // update layout — not yet supported if req.Layout != "" { errs.AppendErr(errors.ErrFeatureDisabled("layout update")) } // update URL — not yet supported if req.Url != "" { errs.AppendErr(errors.ErrFeatureDisabled("url update")) } return errs.ToError() } func (c *Controller) streamFinished(ctx context.Context, stream *config.Stream) error { stream.StreamInfo.Status = livekit.StreamInfo_FINISHED stream.UpdateEndTime(time.Now().UnixNano()) // remove output o := c.GetStreamConfig() o.Streams.Delete(stream.ParsedUrl) c.OutputCount.Dec() // end egress if no outputs remaining if c.OutputCount.Load() == 0 { c.SendEOS(ctx, livekit.EndReasonStreamsStopped) return nil } logger.Infow("stream finished", "url", stream.RedactedUrl, "status", stream.StreamInfo.Status, "duration", stream.StreamInfo.Duration, ) return c.getStreamSink().RemoveStream(stream) } func (c *Controller) streamFailed(ctx context.Context, stream *config.Stream, streamErr error) error { stream.StreamInfo.Status = livekit.StreamInfo_FAILED stream.StreamInfo.Error = streamErr.Error() stream.UpdateEndTime(time.Now().UnixNano()) // remove output o := c.GetStreamConfig() o.Streams.Delete(stream.ParsedUrl) c.OutputCount.Dec() // fail egress if no outputs remaining if c.OutputCount.Load() == 0 { return psrpc.NewError(psrpc.Unavailable, streamErr) } logger.Infow("stream failed", "url", stream.RedactedUrl, "status", stream.StreamInfo.Status, "duration", stream.StreamInfo.Duration, "error", streamErr) c.streamUpdated(ctx) return c.getStreamSink().RemoveStream(stream) } func (c *Controller) trackStreamRetry(ctx context.Context, stream *config.Stream) { now := time.Now() stream.StreamInfo.LastRetryAt = now.UnixNano() stream.StreamInfo.Retries++ if !stream.ShouldSendRetryUpdate(now, streamRetryUpdateInterval) { return } logger.Infow("retrying stream update", "url", stream.RedactedUrl, "retries", stream.StreamInfo.Retries, ) c.streamUpdated(ctx) } func (c *Controller) onEOSSent() { // for video-only track/track composite, EOS might have already // made it through the pipeline by the time endRecording is closed if (c.RequestType == types.RequestTypeTrack || c.RequestType == types.RequestTypeTrackComposite) && !c.AudioEnabled { // this will not actually send a second EOS, but will make sure everything is in the correct state c.SendEOS(context.Background(), livekit.EndReasonSrcClosed) } } func (c *Controller) onStorageLimitReached() { c.storageLimitOnce.Do(func() { c.Info.SetLimitReached() c.SendEOS(context.Background(), livekit.EndReasonLimitReached) }) } func (c *Controller) SendEOS(ctx context.Context, reason string) { ctx, span := tracer.Start(ctx, "Pipeline.SendEOS") defer span.End() c.eosSent.Once(func() { if c.limitTimer != nil { c.limitTimer.Stop() } c.Info.SetEndReason(reason) logger.Debugw("stopping pipeline", "reason", reason) switch c.Info.Status { case livekit.EgressStatus_EGRESS_STARTING: c.Info.SetAborted(livekit.MsgStoppedBeforeStarted) c.p.Stop() case livekit.EgressStatus_EGRESS_ABORTED, livekit.EgressStatus_EGRESS_FAILED: c.p.Stop() case livekit.EgressStatus_EGRESS_ACTIVE: c.Info.UpdateStatus(livekit.EgressStatus_EGRESS_ENDING) c.sendHandlerUpdate(ctx, c.Info) c.sendEOS() case livekit.EgressStatus_EGRESS_ENDING: c.sendHandlerUpdate(ctx, c.Info) c.sendEOS() case livekit.EgressStatus_EGRESS_LIMIT_REACHED: c.sendEOS() } if c.SourceType == types.SourceTypeWeb { // web source uses the current time c.updateEndTime() } }) } func (c *Controller) sendEOS() { for _, sinks := range c.sinks { for _, s := range sinks { s.AddEOSProbe() } } c.eosTimer = time.AfterFunc(eosTimeout, func() { logger.Debugw("eos timer firing") for egressType, si := range c.sinks { switch egressType { case types.EgressTypeFile, types.EgressTypeSegments, types.EgressTypeImages: for _, s := range si { if !s.EOSReceived() { c.OnError(errors.ErrPipelineFrozen) return } } default: // finalization not required } } c.p.Stop() }) go func() { c.p.SendEOS() logger.Debugw("eos sent") }() } func (c *Controller) OnError(err error) { logger.Errorw("controller onError invoked", err) if errors.Is(err, errors.ErrPipelineFrozen) && c.Debug.EnableProfiling { c.generateDotFile("error") c.generatePProf() } if c.Info.Status != livekit.EgressStatus_EGRESS_FAILED && (!c.eosSent.IsBroken() || c.FinalizationRequired) { c.Info.SetFailed(err) } go c.p.Stop() } func (c *Controller) Close() { const closeSlowThreshold = 1 * time.Hour closeStart := time.Now() closeDone := make(chan struct{}) defer close(closeDone) go func() { select { case <-closeDone: return case <-time.After(closeSlowThreshold): logger.Warnw("Close() taking longer than expected", nil, "threshold", closeSlowThreshold, "elapsed", time.Since(closeStart), "egressID", c.Info.EgressId, "sourceType", c.SourceType, ) } }() c.stopOutputSizeMonitor() if c.SourceType == types.SourceTypeSDK || !c.eosSent.IsBroken() { // sdk source will use the timestamp of the last packet pushed to the pipeline c.updateEndTime() } // update status if c.Info.Status == livekit.EgressStatus_EGRESS_FAILED { if o := c.GetStreamConfig(); o != nil { o.Streams.Range(func(_, stream any) bool { stream.(*config.Stream).StreamInfo.Status = livekit.StreamInfo_FAILED return true }) } } // ensure egress ends with a final state switch c.Info.Status { case livekit.EgressStatus_EGRESS_STARTING: c.Info.SetAborted(livekit.MsgStoppedBeforeStarted) case livekit.EgressStatus_EGRESS_ACTIVE, livekit.EgressStatus_EGRESS_ENDING: c.Info.SetComplete() fallthrough case livekit.EgressStatus_EGRESS_LIMIT_REACHED, livekit.EgressStatus_EGRESS_COMPLETE: // upload manifest and add location to egress info c.uploadManifest() } // upload debug files c.uploadDebugFiles() } func (c *Controller) startSessionLimitTimer(ctx context.Context) { var timeout time.Duration for egressType := range c.Outputs { var t time.Duration switch egressType { case types.EgressTypeFile: t = c.FileOutputMaxDuration case types.EgressTypeStream, types.EgressTypeWebsocket: t = c.StreamOutputMaxDuration case types.EgressTypeSegments: t = c.SegmentOutputMaxDuration case types.EgressTypeImages: t = c.ImageOutputMaxDuration } if t > 0 && (timeout == 0 || t < timeout) { timeout = t } } if timeout > 0 { c.limitTimer = time.AfterFunc(timeout, func() { switch c.Info.Status { case livekit.EgressStatus_EGRESS_STARTING: c.Info.SetAborted(livekit.MsgLimitReachedWithoutStart) case livekit.EgressStatus_EGRESS_ACTIVE: c.Info.SetLimitReached() } if c.playing.IsBroken() { c.SendEOS(ctx, livekit.EndReasonLimitReached) } else { c.p.Stop() } }) } } func (c *Controller) startOutputSizeMonitor() { ctx, cancel := context.WithCancel(context.Background()) c.storageMonitorCancel = cancel c.p.AddOnStop(func() error { cancel() return nil }) go c.monitorOutputDirSize(ctx) } func (c *Controller) stopOutputSizeMonitor() { if c.storageMonitorCancel != nil { c.storageMonitorCancel() c.storageMonitorCancel = nil } } func (c *Controller) monitorOutputDirSize(ctx context.Context) { thresholds := []int64{ 1 << 30, // 1GB 3 << 30, // 3GB 5 << 30, // 5GB 10 << 30, // 10GB 20 << 30, // 20GB 50 << 30, // 50GB } ticker := time.NewTicker(15 * time.Second) defer ticker.Stop() nextThreshold := 0 statErrorLogged := false for { select { case <-ctx.Done(): return case <-ticker.C: } size, files, err := c.getOutputDirStats() if err != nil { if !statErrorLogged { logger.Debugw("failed to stat output directory", err, "dir", c.TmpDir) statErrorLogged = true } continue } statErrorLogged = false if c.FileOutputMaxSize > 0 && size >= c.FileOutputMaxSize { c.logOutputFileSizes(files, 10) logger.Warnw( "output storage limit reached", nil, "dir", c.TmpDir, "bytesWritten", size, "limitBytes", c.FileOutputMaxSize, ) c.onStorageLimitReached() return } thresholdTriggered := false for nextThreshold < len(thresholds) && size >= thresholds[nextThreshold] { logger.Debugw( "output size threshold exceeded", "dir", c.TmpDir, "bytesWritten", size, "thresholdBytes", thresholds[nextThreshold], ) thresholdTriggered = true nextThreshold++ } if thresholdTriggered { c.logOutputFileSizes(files, 10) } } } type outputFileStat struct { path string size int64 } func (c *Controller) getOutputDirStats() (int64, []outputFileStat, error) { if c.TmpDir == "" { return 0, nil, nil } var files []outputFileStat var total int64 err := filepath.Walk(c.TmpDir, func(p string, info os.FileInfo, err error) error { if err != nil { if os.IsNotExist(err) { return nil } return err } if info.IsDir() { return nil } total += info.Size() rel, relErr := filepath.Rel(c.TmpDir, p) if relErr != nil { rel = p } files = append(files, outputFileStat{ path: rel, size: info.Size(), }) return nil }) if err != nil { return 0, nil, err } sort.Slice(files, func(i, j int) bool { return files[i].size > files[j].size }) return total, files, nil } func (c *Controller) logOutputFileSizes(files []outputFileStat, limit int) { if files == nil { return } if limit > 0 && len(files) > limit { files = files[:limit] } for _, f := range files { logger.Infow("output file size", "file", f.path, "bytes", f.size) } } func (c *Controller) updateStartTime(startedAt int64) { for egressType, o := range c.Outputs { if len(o) == 0 { continue } switch egressType { case types.EgressTypeStream, types.EgressTypeWebsocket: streamConfig := o[0].(*config.StreamConfig) if streamConfig.OutputType == types.OutputTypeRTMP { // rtmp has special start time handling continue } streamConfig.Streams.Range(func(_, stream any) bool { stream.(*config.Stream).StreamInfo.StartedAt = startedAt return true }) case types.EgressTypeFile: o[0].(*config.FileConfig).FileInfo.StartedAt = startedAt case types.EgressTypeSegments: o[0].(*config.SegmentConfig).SegmentsInfo.StartedAt = startedAt case types.EgressTypeImages: for _, c := range o { c.(*config.ImageConfig).ImagesInfo.StartedAt = startedAt } } } if c.Info.Status == livekit.EgressStatus_EGRESS_STARTING { c.Info.UpdateStatus(livekit.EgressStatus_EGRESS_ACTIVE) c.sendHandlerUpdate(context.Background(), c.Info) } } func (c *Controller) updateStreamStartTime(streamID string) { if o := c.GetStreamConfig(); o != nil { o.Streams.Range(func(_, s any) bool { if stream := s.(*config.Stream); stream.StreamID == streamID && stream.StreamInfo.StartedAt == 0 { logger.Debugw("stream started", "url", stream.RedactedUrl) stream.StreamInfo.StartedAt = time.Now().UnixNano() c.Info.UpdatedAt = time.Now().UnixNano() c.streamUpdated(context.Background()) return false } return true }) } } func (c *Controller) streamUpdated(ctx context.Context) { c.Info.UpdatedAt = time.Now().UnixNano() if o := c.GetStreamConfig(); o != nil { skipUpdate := false // when adding streams, wait until they've all either started or failed before sending the update o.Streams.Range(func(_, stream any) bool { streamInfo := stream.(*config.Stream).StreamInfo if streamInfo.Status == livekit.StreamInfo_ACTIVE && streamInfo.StartedAt == 0 { skipUpdate = true return false } return true }) if skipUpdate { return } } c.sendHandlerUpdate(ctx, c.Info) } func (c *Controller) sendHandlerUpdate(ctx context.Context, info *livekit.EgressInfo) { if c.ipcServiceClient != nil { _, _ = c.ipcServiceClient.HandlerUpdate(ctx, info) } } func (c *Controller) updateEndTime() { endedAt := c.src.GetEndedAt() if c.pipelineEndedAt > endedAt { endedAt = c.pipelineEndedAt } for egressType, o := range c.Outputs { if len(o) == 0 { continue } switch egressType { case types.EgressTypeStream, types.EgressTypeWebsocket: streamConfig := o[0].(*config.StreamConfig) streamConfig.Streams.Range(func(_, s any) bool { stream := s.(*config.Stream) stream.StreamInfo.Status = livekit.StreamInfo_FINISHED stream.UpdateEndTime(endedAt) return true }) case types.EgressTypeFile: fileInfo := o[0].(*config.FileConfig).FileInfo if fileInfo.StartedAt == 0 { fileInfo.StartedAt = endedAt } fileInfo.EndedAt = endedAt fileInfo.Duration = endedAt - fileInfo.StartedAt case types.EgressTypeSegments: segmentsInfo := o[0].(*config.SegmentConfig).SegmentsInfo if segmentsInfo.StartedAt == 0 { segmentsInfo.StartedAt = endedAt } segmentsInfo.EndedAt = endedAt segmentsInfo.Duration = endedAt - segmentsInfo.StartedAt case types.EgressTypeImages: for _, c := range o { imageInfo := c.(*config.ImageConfig).ImagesInfo if imageInfo.StartedAt == 0 { imageInfo.StartedAt = endedAt } imageInfo.EndedAt = endedAt } } } } // uploadManifest happens last, after all sinks have finished func (c *Controller) uploadManifest() { if c.Manifest == nil { return } b, err := c.Manifest.Close(c.Info.EndedAt) if err != nil { logger.Errorw("failed to close manifest", err) return } manifestPath := path.Join(c.TmpDir, fmt.Sprintf("%s.json", c.Info.EgressId)) f, err := os.Create(manifestPath) if err != nil { logger.Errorw("failed to create manifest file", err) return } _, err = f.Write(b) if err != nil { logger.Errorw("failed to write to manifest file", err) return } _ = f.Close() infoUpdated := false for _, si := range c.sinks { for _, s := range si { location, uploaded, err := s.UploadManifest(manifestPath) if err != nil { if c.Info.BackupStorageUsed { logger.Errorw("failed to upload manifest", err) } else { logger.Warnw("failed to upload manifest", err) } continue } if !infoUpdated && uploaded { c.Info.ManifestLocation = location infoUpdated = true } } } } func (c *Controller) getStreamSink() *sink.StreamSink { s := c.sinks[types.EgressTypeStream] if len(s) == 0 { return nil } return s[0].(*sink.StreamSink) } func (c *Controller) getSegmentSink() *sink.SegmentSink { s := c.sinks[types.EgressTypeSegments] if len(s) == 0 { return nil } return s[0].(*sink.SegmentSink) } func (c *Controller) getImageSink(name string) *sink.ImageSink { id := name[len("multifilesink_"):] s := c.sinks[types.EgressTypeImages] if len(s) == 0 { return nil } // Use a map here? for _, si := range s { if i := si.(*sink.ImageSink); i.Id == id { return i } } return nil } ================================================ FILE: pkg/pipeline/debug.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pipeline import ( "context" "fmt" "os" "path" "strings" "time" "github.com/go-gst/go-gst/gst" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "github.com/livekit/egress/pkg/pipeline/sink/uploader" "github.com/livekit/egress/pkg/types" "github.com/livekit/protocol/logger" "github.com/livekit/protocol/pprof" ) func (c *Controller) GetGstPipelineDebugDot() (string, error) { dot := make(chan string, 1) go func() { dot <- c.p.DebugBinToDotData(gst.DebugGraphShowAll) }() select { case d := <-dot: return d, nil case <-time.After(3 * time.Second): return "", status.New(codes.DeadlineExceeded, "timed out requesting pipeline debug info").Err() } } func sanitizeDebugFilenameComponent(s string) string { var b strings.Builder for _, r := range s { if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') || (r >= '0' && r <= '9') || r == '-' || r == '_' { b.WriteRune(r) } else { b.WriteRune('_') } } return strings.Trim(b.String(), "_") } func (c *Controller) writeDotFile(filename, contents string) { f, err := os.Create(path.Join(c.TmpDir, filename)) if err != nil { return } defer f.Close() _, _ = f.WriteString(contents) } func (c *Controller) generateDotFile(reason string) { dot, err := c.GetGstPipelineDebugDot() if err != nil { logger.Errorw("failed to get gst pipeline debug dot", err) return } // always write the canonical file name for easy discovery c.writeDotFile(fmt.Sprintf("%s.dot", c.Info.EgressId), dot) if reason == "" { logger.Errorw("failed to get gst pipeline debug dot, reason is empty", nil) return } // make sure all dot captures for the egressID are written with timestamp suffix var suffixParts []string if ext := sanitizeDebugFilenameComponent(reason); ext != "" { suffixParts = append(suffixParts, ext) } suffixParts = append(suffixParts, time.Now().UTC().Format("20060102T150405Z")) filename := fmt.Sprintf("%s_%s.dot", c.Info.EgressId, strings.Join(suffixParts, "_")) c.writeDotFile(filename, dot) } func (c *Controller) generatePProf() { b, err := pprof.GetProfileData(context.Background(), "goroutine", 0, 0) if err != nil { logger.Errorw("failed to get profile data", err) return } f, err := os.Create(path.Join(c.TmpDir, fmt.Sprintf("%s.prof", c.Info.EgressId))) if err != nil { return } defer f.Close() _, _ = f.Write(b) } var debugFileDataTypes = map[string]types.OutputType{ "csv": "text/csv", "dot": types.OutputTypeBlob, "prof": types.OutputTypeBlob, "log": "text/plain", } func (c *Controller) uploadDebugFiles() { files, err := os.ReadDir(c.TmpDir) if err != nil { logger.Errorw("failed to read tmp dir", err) return } var u *uploader.Uploader for _, f := range files { info, err := f.Info() if err != nil || info.Size() == 0 { continue } s := strings.Split(f.Name(), ".") outputType, ok := debugFileDataTypes[s[len(s)-1]] if !ok { continue } if u == nil { u, err = uploader.New(&c.Debug.StorageConfig, nil, c.monitor, nil, nil) if err != nil { logger.Errorw("failed to create uploader", err) return } } local := path.Join(c.TmpDir, f.Name()) storage := path.Join(c.Info.EgressId, f.Name()) _, _, err = u.Upload(local, storage, outputType, false) if err != nil { logger.Errorw("failed to upload debug file", err, "filename", local) return } } } ================================================ FILE: pkg/pipeline/sink/file.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package sink import ( "path" "time" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/gstreamer" "github.com/livekit/egress/pkg/pipeline/builder" "github.com/livekit/egress/pkg/pipeline/sink/uploader" "github.com/livekit/egress/pkg/stats" "github.com/livekit/egress/pkg/types" "github.com/livekit/protocol/logger" ) type FileSink struct { *base *config.FileConfig *uploader.Uploader conf *config.PipelineConfig } func newFileSink( p *gstreamer.Pipeline, conf *config.PipelineConfig, o *config.FileConfig, monitor *stats.HandlerMonitor, ) (*FileSink, error) { u, err := uploader.New(o.StorageConfig, conf.BackupConfig, monitor, conf.StorageObserver, conf.Info) if err != nil { return nil, err } fileBin, err := builder.BuildFileBin(p, conf) if err != nil { return nil, err } if err = p.AddSinkBin(fileBin); err != nil { return nil, err } return &FileSink{ base: &base{bin: fileBin}, FileConfig: o, Uploader: u, conf: conf, }, nil } func (s *FileSink) Start() error { return nil } func (s *FileSink) UploadManifest(filepath string) (string, bool, error) { if s.DisableManifest && !s.conf.Info.BackupStorageUsed { return "", false, nil } storagePath := path.Join(path.Dir(s.StorageFilepath), path.Base(filepath)) location, _, err := s.Upload(filepath, storagePath, types.OutputTypeJSON, false) if err != nil { return "", false, err } return location, true, nil } func (s *FileSink) Close() error { start := time.Now() location, size, err := s.Upload(s.LocalFilepath, s.StorageFilepath, s.OutputType, false) if err != nil { logger.Debugw("file upload failed", err) return err } s.FileInfo.Location = location s.FileInfo.Size = size logger.Debugw("file upload completed", "bytes", size, "duration", time.Since(start)) if s.conf.Manifest != nil { s.conf.Manifest.AddFile(s.StorageFilepath, location) } return nil } ================================================ FILE: pkg/pipeline/sink/image.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package sink import ( "fmt" "path" "strings" "time" "github.com/frostbyte73/core" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/gstreamer" "github.com/livekit/egress/pkg/pipeline/builder" "github.com/livekit/egress/pkg/pipeline/sink/uploader" "github.com/livekit/egress/pkg/stats" "github.com/livekit/egress/pkg/types" "github.com/livekit/protocol/livekit" "github.com/livekit/protocol/logger" ) type ImageSink struct { *base *config.ImageConfig *uploader.Uploader conf *config.PipelineConfig callbacks *gstreamer.Callbacks initialized bool startTime time.Time startRunningTime uint64 createdImages chan *imageUpdate done core.Fuse } type imageUpdate struct { timestamp uint64 filename string } func newImageSink( p *gstreamer.Pipeline, conf *config.PipelineConfig, o *config.ImageConfig, callbacks *gstreamer.Callbacks, monitor *stats.HandlerMonitor, ) (*ImageSink, error) { u, err := uploader.New(o.StorageConfig, conf.BackupConfig, monitor, conf.StorageObserver, conf.Info) if err != nil { return nil, err } imageBin, err := builder.BuildImageBin(o, p, conf) if err != nil { return nil, err } if err = p.AddSinkBin(imageBin); err != nil { return nil, err } maxPendingUploads := (conf.MaxUploadQueue * 60) / int(o.CaptureInterval) return &ImageSink{ base: &base{ bin: imageBin, }, ImageConfig: o, Uploader: u, conf: conf, callbacks: callbacks, createdImages: make(chan *imageUpdate, maxPendingUploads), }, nil } func (s *ImageSink) Start() error { go func() { var err error defer func() { if err != nil { s.callbacks.OnError(err) } s.done.Break() }() for update := range s.createdImages { err = s.handleNewImage(update) if err != nil { logger.Errorw("new image handling failed", err) return } } }() return nil } func (s *ImageSink) handleNewImage(update *imageUpdate) error { s.ImagesInfo.ImageCount++ filename := update.filename ts := s.getImageTime(update.timestamp) imageLocalPath := path.Join(s.LocalDir, filename) if s.ImageSuffix != livekit.ImageFileSuffix_IMAGE_SUFFIX_INDEX { var newFilename string switch s.ImageSuffix { case livekit.ImageFileSuffix_IMAGE_SUFFIX_TIMESTAMP: newFilename = fmt.Sprintf("%s_%s%03d%s", s.ImagePrefix, ts.Format("20060102150405"), ts.UnixMilli()%1000, types.FileExtensionForOutputType[s.OutputType]) case livekit.ImageFileSuffix_IMAGE_SUFFIX_NONE_OVERWRITE: newFilename = fmt.Sprintf("%s%s", s.ImagePrefix, types.FileExtensionForOutputType[s.OutputType]) default: return errors.ErrNotSupported(s.ImageSuffix.String()) } filename = newFilename } imageStoragePath := path.Join(s.StorageDir, filename) location, _, err := s.Upload(imageLocalPath, imageStoragePath, s.OutputType, true) if err != nil { return err } if s.conf.Manifest != nil { s.conf.Manifest.AddImage(imageStoragePath, ts, location) } return nil } func (s *ImageSink) getImageTime(pts uint64) time.Time { if !s.initialized { s.startTime = time.Now() s.startRunningTime = pts s.initialized = true } return s.startTime.Add(time.Duration(pts - s.startRunningTime)) } func (s *ImageSink) NewImage(filepath string, ts uint64) error { if !strings.HasPrefix(filepath, s.LocalDir) { return fmt.Errorf("invalid filepath") } filename := filepath[len(s.LocalDir)+1:] s.createdImages <- &imageUpdate{ filename: filename, timestamp: ts, } return nil } func (s *ImageSink) UploadManifest(filepath string) (string, bool, error) { if s.DisableManifest && !s.conf.Info.BackupStorageUsed { return "", false, nil } storagePath := path.Join(s.StorageDir, path.Base(filepath)) location, _, err := s.Upload(filepath, storagePath, types.OutputTypeJSON, false) if err != nil { return "", false, err } return location, true, nil } func (s *ImageSink) Close() error { close(s.createdImages) <-s.done.Watch() return nil } ================================================ FILE: pkg/pipeline/sink/m3u8/writer.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package m3u8 import ( "container/list" "fmt" "io/fs" "os" "strconv" "strings" "time" ) type PlaylistType string const ( PlaylistTypeLive PlaylistType = "" PlaylistTypeEvent PlaylistType = "EVENT" ) type PlaylistWriter interface { Append(dateTime time.Time, duration float64, filename string) error Close() error } type basePlaylistWriter struct { filename string targetDuration int } type eventPlaylistWriter struct { basePlaylistWriter } type livePlaylistWriter struct { basePlaylistWriter windowSize int mediaSeq int livePlaylistHeader string livePlaylistSegments *list.List } func (p *basePlaylistWriter) createHeader(plType PlaylistType) string { var sb strings.Builder sb.WriteString("#EXTM3U\n") sb.WriteString("#EXT-X-VERSION:4\n") if plType != PlaylistTypeLive { fmt.Fprintf(&sb, "#EXT-X-PLAYLIST-TYPE:%s\n", plType) } sb.WriteString("#EXT-X-ALLOW-CACHE:NO\n") fmt.Fprintf(&sb, "#EXT-X-TARGETDURATION:%d\n", p.targetDuration) if plType != PlaylistTypeLive { sb.WriteString("#EXT-X-MEDIA-SEQUENCE:0\n") } return sb.String() } func (p *basePlaylistWriter) createSegmentEntry(dateTime time.Time, duration float64, filename string) string { var sb strings.Builder sb.WriteString("#EXT-X-PROGRAM-DATE-TIME:") sb.WriteString(dateTime.UTC().Format("2006-01-02T15:04:05.999Z07:00")) sb.WriteString("\n#EXTINF:") sb.WriteString(strconv.FormatFloat(duration, 'f', 3, 32)) sb.WriteString(",\n") sb.WriteString(filename) sb.WriteString("\n") return sb.String() } func NewEventPlaylistWriter(filename string, targetDuration int) (PlaylistWriter, error) { p := &eventPlaylistWriter{ basePlaylistWriter: basePlaylistWriter{ filename: filename, targetDuration: targetDuration, }, } f, err := os.Create(p.filename) if err != nil { return nil, err } defer f.Close() _, err = f.WriteString(p.createHeader(PlaylistTypeEvent)) if err != nil { return nil, err } return p, nil } func (p *eventPlaylistWriter) Append(dateTime time.Time, duration float64, filename string) error { f, err := os.OpenFile(p.filename, os.O_WRONLY|os.O_APPEND, fs.ModeAppend) if err != nil { return err } defer f.Close() _, err = f.WriteString(p.createSegmentEntry(dateTime, duration, filename)) return err } // Close sliding playlist and make them fixed. func (p *eventPlaylistWriter) Close() error { f, err := os.OpenFile(p.filename, os.O_WRONLY|os.O_APPEND, fs.ModeAppend) if err != nil { return err } defer f.Close() _, err = f.WriteString("#EXT-X-ENDLIST\n") return err } func NewLivePlaylistWriter(filename string, targetDuration int, windowSize int) (PlaylistWriter, error) { p := &livePlaylistWriter{ basePlaylistWriter: basePlaylistWriter{ filename: filename, targetDuration: targetDuration, }, windowSize: windowSize, livePlaylistSegments: list.New(), } p.livePlaylistHeader = p.createHeader(PlaylistTypeLive) return p, nil } func (p *livePlaylistWriter) Append(dateTime time.Time, duration float64, filename string) error { f, err := os.Create(p.filename) if err != nil { return err } defer f.Close() segmentStr := p.createSegmentEntry(dateTime, duration, filename) p.livePlaylistSegments.PushBack(segmentStr) for p.livePlaylistSegments.Len() > p.windowSize { p.livePlaylistSegments.Remove(p.livePlaylistSegments.Front()) p.mediaSeq++ } _, err = f.WriteString(p.generatePlaylist()) return err } func (p *livePlaylistWriter) Close() error { f, err := os.Create(p.filename) if err != nil { return err } defer f.Close() _, err = f.WriteString(p.generatePlaylist()) if err != nil { return err } _, err = f.WriteString("#EXT-X-ENDLIST\n") return err } func (p *livePlaylistWriter) generatePlaylist() string { var sb strings.Builder sb.WriteString(p.livePlaylistHeader) fmt.Fprintf(&sb, "#EXT-X-MEDIA-SEQUENCE:%d\n", p.mediaSeq) for elem := p.livePlaylistSegments.Front(); elem != nil; elem = elem.Next() { segmentStr := elem.Value.(string) sb.WriteString(segmentStr) } return sb.String() } ================================================ FILE: pkg/pipeline/sink/m3u8/writer_test.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package m3u8 import ( "fmt" "os" "testing" "time" "github.com/stretchr/testify/require" ) func TestEventPlaylistWriter(t *testing.T) { playlistName := "playlist.m3u8" w, err := NewEventPlaylistWriter(playlistName, 6) require.NoError(t, err) t.Cleanup(func() { _ = os.Remove(playlistName) }) now := time.Unix(0, 1683154504814142000) duration := 5.994 for i := 0; i < 3; i++ { require.NoError(t, w.Append(now, duration, fmt.Sprintf("playlist_0000%d.ts", i))) now = now.Add(time.Millisecond * 5994) } require.NoError(t, w.Close()) b, err := os.ReadFile(playlistName) require.NoError(t, err) expected := "#EXTM3U\n#EXT-X-VERSION:4\n#EXT-X-PLAYLIST-TYPE:EVENT\n#EXT-X-ALLOW-CACHE:NO\n#EXT-X-TARGETDURATION:6\n#EXT-X-MEDIA-SEQUENCE:0\n#EXT-X-PROGRAM-DATE-TIME:2023-05-03T22:55:04.814Z\n#EXTINF:5.994,\nplaylist_00000.ts\n#EXT-X-PROGRAM-DATE-TIME:2023-05-03T22:55:10.808Z\n#EXTINF:5.994,\nplaylist_00001.ts\n#EXT-X-PROGRAM-DATE-TIME:2023-05-03T22:55:16.802Z\n#EXTINF:5.994,\nplaylist_00002.ts\n#EXT-X-ENDLIST\n" require.Equal(t, expected, string(b)) } func TestLivePlaylistWriter(t *testing.T) { playlistName := "playlist.m3u8" w, err := NewLivePlaylistWriter(playlistName, 6, 3) require.NoError(t, err) t.Cleanup(func() { _ = os.Remove(playlistName) }) now := time.Unix(0, 1683154504814142000) duration := 5.994 for i := 0; i < 2; i++ { require.NoError(t, w.Append(now, duration, fmt.Sprintf("playlist_0000%d.ts", i))) now = now.Add(time.Millisecond * 5994) } b, err := os.ReadFile(playlistName) require.NoError(t, err) expected := "#EXTM3U\n#EXT-X-VERSION:4\n#EXT-X-ALLOW-CACHE:NO\n#EXT-X-TARGETDURATION:6\n#EXT-X-MEDIA-SEQUENCE:0\n#EXT-X-PROGRAM-DATE-TIME:2023-05-03T22:55:04.814Z\n#EXTINF:5.994,\nplaylist_00000.ts\n#EXT-X-PROGRAM-DATE-TIME:2023-05-03T22:55:10.808Z\n#EXTINF:5.994,\nplaylist_00001.ts\n" require.Equal(t, expected, string(b)) for i := 2; i < 4; i++ { require.NoError(t, w.Append(now, duration, fmt.Sprintf("playlist_0000%d.ts", i))) now = now.Add(time.Millisecond * 5994) } require.NoError(t, w.Close()) b, err = os.ReadFile(playlistName) require.NoError(t, err) expected = "#EXTM3U\n#EXT-X-VERSION:4\n#EXT-X-ALLOW-CACHE:NO\n#EXT-X-TARGETDURATION:6\n#EXT-X-MEDIA-SEQUENCE:1\n#EXT-X-PROGRAM-DATE-TIME:2023-05-03T22:55:04.814Z\n#EXTINF:5.994,\nplaylist_00001.ts\n#EXT-X-PROGRAM-DATE-TIME:2023-05-03T22:55:16.802Z\n#EXTINF:5.994,\nplaylist_00002.ts\n#EXT-X-PROGRAM-DATE-TIME:2023-05-03T22:55:22.796Z\n#EXTINF:5.994,\nplaylist_00003.ts\n#EXT-X-ENDLIST\n" require.Equal(t, expected, string(b)) } ================================================ FILE: pkg/pipeline/sink/segments.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package sink import ( "fmt" "path" "strings" "time" "github.com/frostbyte73/core" "github.com/linkdata/deadlock" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/gstreamer" "github.com/livekit/egress/pkg/pipeline/builder" "github.com/livekit/egress/pkg/pipeline/sink/m3u8" "github.com/livekit/egress/pkg/pipeline/sink/uploader" "github.com/livekit/egress/pkg/stats" "github.com/livekit/egress/pkg/types" "github.com/livekit/protocol/logger" ) const ( defaultLivePlaylistWindow = 5 ) type SegmentSink struct { *base *uploader.Uploader *config.SegmentConfig conf *config.PipelineConfig manifestPlaylist *config.Playlist callbacks *gstreamer.Callbacks segmentCount int playlist m3u8.PlaylistWriter livePlaylist m3u8.PlaylistWriter segmentLock deadlock.Mutex infoLock deadlock.Mutex playlistLock deadlock.Mutex initialized bool startTime time.Time lastUpload time.Time outputType types.OutputType startRunningTime uint64 openSegmentsStartTime map[string]uint64 closedSegments chan SegmentUpdate playlistUpdates chan SegmentUpdate done core.Fuse } type SegmentUpdate struct { endTime uint64 filename string uploadComplete chan struct{} } func newSegmentSink( p *gstreamer.Pipeline, conf *config.PipelineConfig, o *config.SegmentConfig, callbacks *gstreamer.Callbacks, monitor *stats.HandlerMonitor, ) (*SegmentSink, error) { u, err := uploader.New(o.StorageConfig, conf.BackupConfig, monitor, conf.StorageObserver, conf.Info) if err != nil { return nil, err } playlistName := path.Join(o.LocalDir, o.PlaylistFilename) playlist, err := m3u8.NewEventPlaylistWriter(playlistName, o.SegmentDuration) if err != nil { return nil, err } var livePlaylist m3u8.PlaylistWriter if o.LivePlaylistFilename != "" { playlistName = path.Join(o.LocalDir, o.LivePlaylistFilename) livePlaylist, err = m3u8.NewLivePlaylistWriter(playlistName, o.SegmentDuration, defaultLivePlaylistWindow) if err != nil { return nil, err } } outputType := o.OutputType if outputType == types.OutputTypeHLS { outputType = types.OutputTypeTS } segmentBin, err := builder.BuildSegmentBin(p, conf) if err != nil { return nil, err } if err = p.AddSinkBin(segmentBin); err != nil { return nil, err } maxPendingUploads := (conf.MaxUploadQueue * 60) / o.SegmentDuration segmentSink := &SegmentSink{ base: &base{ bin: segmentBin, }, Uploader: u, SegmentConfig: o, conf: conf, callbacks: callbacks, playlist: playlist, livePlaylist: livePlaylist, outputType: outputType, openSegmentsStartTime: make(map[string]uint64), closedSegments: make(chan SegmentUpdate, maxPendingUploads), playlistUpdates: make(chan SegmentUpdate, maxPendingUploads), } if conf.Manifest != nil { segmentSink.manifestPlaylist = conf.Manifest.AddPlaylist() } // Register gauges that track the number of segments and playlist updates pending upload monitor.RegisterPlaylistChannelSizeGauge(segmentSink.conf.NodeID, segmentSink.conf.ClusterID, segmentSink.conf.Info.EgressId, func() float64 { return float64(len(segmentSink.playlistUpdates)) }) monitor.RegisterSegmentsChannelSizeGauge(segmentSink.conf.NodeID, segmentSink.conf.ClusterID, segmentSink.conf.Info.EgressId, func() float64 { return float64(len(segmentSink.closedSegments)) }) return segmentSink, nil } func (s *SegmentSink) Start() error { go func() { defer close(s.playlistUpdates) for update := range s.closedSegments { s.handleClosedSegment(update) } }() go func() { defer s.done.Break() for update := range s.playlistUpdates { if err := s.handlePlaylistUpdates(update); err != nil { s.callbacks.OnError(err) return } } }() return nil } func (s *SegmentSink) handleClosedSegment(update SegmentUpdate) { // keep playlist updates in order s.playlistUpdates <- update segmentLocalPath := path.Join(s.LocalDir, update.filename) segmentStoragePath := path.Join(s.StorageDir, update.filename) // upload in parallel go func() { defer close(update.uploadComplete) location, size, err := s.Upload(segmentLocalPath, segmentStoragePath, s.outputType, true) if err != nil { s.callbacks.OnError(err) return } // lock segment info updates s.infoLock.Lock() s.SegmentsInfo.SegmentCount++ s.SegmentsInfo.Size += size if s.manifestPlaylist != nil { s.manifestPlaylist.AddSegment(segmentStoragePath, location) } s.infoLock.Unlock() }() } func (s *SegmentSink) handlePlaylistUpdates(update SegmentUpdate) error { s.segmentLock.Lock() t, ok := s.openSegmentsStartTime[update.filename] if !ok { s.segmentLock.Unlock() return fmt.Errorf("no open segment with the name %s", update.filename) } delete(s.openSegmentsStartTime, update.filename) s.segmentLock.Unlock() duration := float64(time.Duration(update.endTime-t)) / float64(time.Second) segmentStartTime := s.startTime.Add(time.Duration(t - s.startRunningTime)) // do not update playlist until upload is complete <-update.uploadComplete s.playlistLock.Lock() defer s.playlistLock.Unlock() if err := s.playlist.Append(segmentStartTime, duration, update.filename); err != nil { return err } s.segmentCount++ if s.shouldUploadPlaylist() { // ignore playlist upload failures until close _ = s.uploadPlaylist() } if s.livePlaylist != nil { if err := s.livePlaylist.Append(segmentStartTime, duration, update.filename); err != nil { return err } // ignore playlist upload failures until close _ = s.uploadLivePlaylist() } return nil } // Each segment adds about 100 bytes in the playlist, and long playlists can get very large. // Uploads every N segments, where N is the number of hours, with a minimum frequency of once per minute func (s *SegmentSink) shouldUploadPlaylist() bool { return s.lastUpload.IsZero() || s.segmentCount%(int(time.Since(s.startTime)/time.Hour)+1) == 0 || time.Since(s.lastUpload) > time.Minute } func (s *SegmentSink) uploadPlaylist() error { playlistLocalPath := path.Join(s.LocalDir, s.PlaylistFilename) playlistStoragePath := path.Join(s.StorageDir, s.PlaylistFilename) playlistLocation, _, err := s.Upload(playlistLocalPath, playlistStoragePath, s.OutputType, false) if err != nil { return err } s.lastUpload = time.Now() s.SegmentsInfo.PlaylistLocation = playlistLocation if s.manifestPlaylist != nil { s.manifestPlaylist.Location = playlistLocation } return nil } func (s *SegmentSink) uploadLivePlaylist() error { liveLocalPath := path.Join(s.LocalDir, s.LivePlaylistFilename) liveStoragePath := path.Join(s.StorageDir, s.LivePlaylistFilename) livePlaylistLocation, _, err := s.Upload(liveLocalPath, liveStoragePath, s.OutputType, false) if err == nil { s.SegmentsInfo.LivePlaylistLocation = livePlaylistLocation } return err } func (s *SegmentSink) UpdateStartDate(t time.Time) { s.segmentLock.Lock() defer s.segmentLock.Unlock() s.startTime = t } func (s *SegmentSink) FragmentOpened(filepath string, startTime uint64) error { if !strings.HasPrefix(filepath, s.LocalDir) { return fmt.Errorf("invalid filepath") } filename := filepath[len(s.LocalDir)+1:] s.segmentLock.Lock() defer s.segmentLock.Unlock() if !s.initialized { s.initialized = true s.startRunningTime = startTime } if _, ok := s.openSegmentsStartTime[filename]; ok { return fmt.Errorf("segment with this name already started") } s.openSegmentsStartTime[filename] = startTime return nil } func (s *SegmentSink) FragmentClosed(filepath string, endTime uint64) error { if !strings.HasPrefix(filepath, s.LocalDir) { return fmt.Errorf("invalid filepath") } filename := filepath[len(s.LocalDir)+1:] select { case s.closedSegments <- SegmentUpdate{ filename: filename, endTime: endTime, uploadComplete: make(chan struct{}), }: return nil default: err := errors.New("segment upload job queue is full") logger.Infow("failed to upload segment", "error", err) return errors.ErrUploadFailed(filename, err) } } func (s *SegmentSink) UploadManifest(filepath string) (string, bool, error) { if s.DisableManifest && !s.conf.Info.BackupStorageUsed { return "", false, nil } storagePath := path.Join(s.StorageDir, path.Base(filepath)) location, _, err := s.Upload(filepath, storagePath, types.OutputTypeJSON, false) if err != nil { return "", false, err } return location, true, nil } func (s *SegmentSink) Close() error { // wait for pending jobs to finish close(s.closedSegments) <-s.done.Watch() s.playlistLock.Lock() defer s.playlistLock.Unlock() if err := s.playlist.Close(); err != nil { return err } if err := s.uploadPlaylist(); err != nil { return err } if s.livePlaylist != nil { if err := s.livePlaylist.Close(); err != nil { return err } if err := s.uploadLivePlaylist(); err != nil { return err } } return nil } ================================================ FILE: pkg/pipeline/sink/sink.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package sink import ( "go.uber.org/atomic" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/gstreamer" "github.com/livekit/egress/pkg/stats" "github.com/livekit/egress/pkg/types" "github.com/livekit/protocol/logger" ) type Sink interface { Start() error AddEOSProbe() EOSReceived() bool Close() error UploadManifest(string) (string, bool, error) } type base struct { bin *gstreamer.Bin eosReceived atomic.Bool } func NewSink( p *gstreamer.Pipeline, conf *config.PipelineConfig, egressType types.EgressType, o config.OutputConfig, callbacks *gstreamer.Callbacks, monitor *stats.HandlerMonitor, ) (Sink, error) { switch egressType { case types.EgressTypeFile: return newFileSink(p, conf, o.(*config.FileConfig), monitor) case types.EgressTypeSegments: return newSegmentSink(p, conf, o.(*config.SegmentConfig), callbacks, monitor) case types.EgressTypeStream: return newStreamSink(p, conf, o.(*config.StreamConfig)) case types.EgressTypeWebsocket: return newWebsocketSink(p, o.(*config.StreamConfig), types.MimeTypeRawAudio, callbacks) case types.EgressTypeImages: return newImageSink(p, conf, o.(*config.ImageConfig), callbacks, monitor) default: return nil, errors.ErrInvalidInput("output type") } } func (s *base) AddEOSProbe() { if err := s.bin.AddOnEOSReceived(func() { logger.Debugw("eos received", "sink", s.bin.GetName()) s.eosReceived.Store(true) }); err != nil { logger.Errorw("failed to add EOS probe", err) } } func (s *base) EOSReceived() bool { return s.eosReceived.Load() } ================================================ FILE: pkg/pipeline/sink/stream.go ================================================ // Copyright 2025 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package sink import ( "time" "github.com/frostbyte73/core" "github.com/linkdata/deadlock" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/gstreamer" "github.com/livekit/egress/pkg/logging" "github.com/livekit/egress/pkg/pipeline/builder" "github.com/livekit/egress/pkg/types" "github.com/livekit/protocol/logger" ) type StreamSink struct { *base conf *config.PipelineConfig bin *builder.StreamBin closed core.Fuse mu deadlock.RWMutex streams map[string]*builder.Stream loggers map[string]*logging.CSVLogger[logging.StreamStats] } func newStreamSink(p *gstreamer.Pipeline, conf *config.PipelineConfig, o *config.StreamConfig) (*StreamSink, error) { streamBin, err := builder.BuildStreamBin(p, conf, o) if err != nil { return nil, err } ss := &StreamSink{ base: &base{ bin: streamBin.Bin, }, conf: conf, bin: streamBin, streams: make(map[string]*builder.Stream), loggers: make(map[string]*logging.CSVLogger[logging.StreamStats]), } o.Streams.Range(func(_, stream any) bool { err = ss.AddStream(stream.(*config.Stream)) return err == nil }) if err != nil { return nil, err } if err = p.AddSinkBin(streamBin.Bin); err != nil { return nil, err } return ss, nil } func (s *StreamSink) Start() error { if s.conf.Debug.EnableStreamLogging { go func() { closed := s.closed.Watch() ticker := time.NewTicker(time.Second * 10) defer ticker.Stop() for { select { case <-closed: return case <-ticker.C: s.mu.RLock() for name, stream := range s.streams { if stats, ok := stream.Stats(); ok { if csvLogger, ok := s.loggers[name]; ok { csvLogger.Write(stats) } } } s.mu.RUnlock() } } }() } return nil } func (s *StreamSink) AddStream(stream *config.Stream) error { ss, err := s.bin.BuildStream(stream, s.conf.Framerate) if err != nil { return err } s.mu.Lock() s.streams[stream.Name] = ss if s.conf.Debug.EnableStreamLogging && s.bin.OutputType == types.OutputTypeRTMP { csvLogger, err := logging.NewCSVLogger[logging.StreamStats](stream.Name) if err != nil { logger.Errorw("failed to create stream logger", err) } else { s.loggers[stream.Name] = csvLogger } } s.mu.Unlock() return s.bin.Bin.AddSinkBin(ss.Bin) } func (s *StreamSink) GetStream(name string) (*config.Stream, error) { s.mu.Lock() ss, ok := s.streams[name] s.mu.Unlock() if !ok { return nil, errors.ErrStreamNotFound(name) } return ss.Conf, nil } func (s *StreamSink) ResetStream(stream *config.Stream, streamErr error) (bool, error) { s.mu.Lock() ss, ok := s.streams[stream.Name] s.mu.Unlock() if !ok { return false, errors.ErrStreamNotFound(stream.RedactedUrl) } return ss.Reset(streamErr) } func (s *StreamSink) RemoveStream(stream *config.Stream) error { s.mu.Lock() _, ok := s.streams[stream.Name] if !ok { s.mu.Unlock() return errors.ErrStreamNotFound(stream.RedactedUrl) } delete(s.streams, stream.Name) s.mu.Unlock() return s.bin.Bin.RemoveSinkBin(stream.Name) } func (s *StreamSink) UploadManifest(_ string) (string, bool, error) { return "", false, nil } func (s *StreamSink) Close() error { s.closed.Once(func() { s.mu.Lock() defer s.mu.Unlock() for _, l := range s.loggers { l.Close() } }) return nil } ================================================ FILE: pkg/pipeline/sink/uploader/uploader.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package uploader import ( "os" "path" "time" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/stats" "github.com/livekit/egress/pkg/types" "github.com/livekit/protocol/livekit" "github.com/livekit/protocol/logger" "github.com/livekit/protocol/observability/storageobs" "github.com/livekit/psrpc" "github.com/livekit/storage" ) const presignedExpiration = time.Hour * 24 * 7 // 7 days type Uploader struct { primary *store backup *store primaryFailed bool info *livekit.EgressInfo monitor *stats.HandlerMonitor storageObserver config.StorageObserver } type store struct { storage.Storage conf *config.StorageConfig name string } func New(primary, backup *config.StorageConfig, monitor *stats.HandlerMonitor, storageObserver config.StorageObserver, info *livekit.EgressInfo) (*Uploader, error) { p, err := getUploader(primary) if err != nil { return nil, err } u := &Uploader{ primary: p, info: info, monitor: monitor, storageObserver: storageObserver, } if backup != nil { b, err := getUploader(backup) if err != nil { logger.Errorw("failed to create backup uploader", err) } else { u.backup = b } } return u, nil } func getUploader(conf *config.StorageConfig) (*store, error) { if conf == nil { conf = &config.StorageConfig{} } var ( s storage.Storage err error name string ) switch { case conf.S3 != nil: s, err = storage.NewS3(conf.S3) name = "S3" case conf.GCP != nil: s, err = storage.NewGCP(conf.GCP) name = "GCP" case conf.Azure != nil: s, err = storage.NewAzure(conf.Azure) name = "Azure" case conf.AliOSS != nil: s, err = storage.NewAliOSS(conf.AliOSS) name = "AliOSS" default: s, err = storage.NewLocal(&storage.LocalConfig{}) name = "Local" } if err != nil { return nil, err } return &store{ Storage: s, conf: conf, name: name, }, nil } func (u *Uploader) Upload( localFilepath, storageFilepath string, outputType types.OutputType, deleteAfterUpload bool, ) (string, int64, error) { var primaryErr error if !u.primaryFailed { start := time.Now() location, size, err := u.upload(localFilepath, storageFilepath, outputType, true) elapsed := time.Since(start) if err == nil { if u.monitor != nil { u.monitor.IncUploadCountSuccess(string(outputType), float64(elapsed.Milliseconds())) } if deleteAfterUpload { _ = os.Remove(localFilepath) } return location, size, nil } if u.monitor != nil { u.monitor.IncUploadCountFailure(string(outputType), float64(elapsed.Milliseconds())) } u.primaryFailed = true primaryErr = err } if u.backup != nil { location, size, backupErr := u.upload(localFilepath, storageFilepath, outputType, false) if backupErr == nil { if u.info != nil { u.info.SetBackupUsed() } if u.monitor != nil { u.monitor.IncBackupStorageWrites(string(outputType)) } if deleteAfterUpload { _ = os.Remove(localFilepath) } return location, size, nil } if primaryErr != nil { return "", 0, psrpc.NewErrorf(psrpc.InvalidArgument, "primary: %s\nbackup: %s", primaryErr.Error(), backupErr.Error()) } return "", 0, psrpc.NewError(psrpc.InvalidArgument, backupErr) } return "", 0, primaryErr } func (u *Uploader) upload(localFilepath string, storageFilepath string, outputType types.OutputType, primary bool) (location string, size int64, err error) { var s *store if primary { s = u.primary } else { s = u.backup } storageFilepath = path.Join(s.conf.Prefix, storageFilepath) location, size, err = s.UploadFile(localFilepath, storageFilepath, string(outputType)) if err != nil { return "", 0, errors.ErrUploadFailed(s.name, err) } if !primary && u.storageObserver != nil { u.storageObserver.OnStorageEvent(u.info.EgressId, string(storageobs.EventOperationUpload), location, size, int64(presignedExpiration/time.Hour/24)) } if s.conf.GeneratePresignedUrl { location, err = s.GeneratePresignedUrl(storageFilepath, presignedExpiration) if err != nil { return "", 0, errors.ErrUploadFailed(s.name, err) } if !primary && u.storageObserver != nil { u.storageObserver.OnStorageEvent(u.info.EgressId, string(storageobs.EventOperationDownload), location, size, 0) } } return location, size, nil } ================================================ FILE: pkg/pipeline/sink/uploader/uploader_test.go ================================================ package uploader import ( "io" "net/http" "os" "strings" "testing" "github.com/stretchr/testify/require" "github.com/livekit/egress/pkg/config" "github.com/livekit/protocol/livekit" "github.com/livekit/storage" ) func TestUploader(t *testing.T) { key := os.Getenv("AWS_ACCESS_KEY") secret := os.Getenv("AWS_SECRET") region := os.Getenv("AWS_REGION") bucket := os.Getenv("AWS_BUCKET") primary := &config.StorageConfig{ S3: &storage.S3Config{ AccessKey: "nonsense", Secret: "public", Region: "us-east-1", Bucket: "fake-bucket", }, } backup := &config.StorageConfig{ Prefix: "testProject", S3: &storage.S3Config{ AccessKey: key, Secret: secret, Region: region, Bucket: bucket, }, GeneratePresignedUrl: true, } info := &livekit.EgressInfo{} u, err := New(primary, backup, nil, nil, info) require.NoError(t, err) filepath := "uploader_test.go" storagePath := "uploader_test.go" location, size, err := u.Upload(filepath, storagePath, "text/plain", false) require.NoError(t, err) require.NotZero(t, size) require.NotEmpty(t, location) require.True(t, info.BackupStorageUsed) response, err := http.Get(location) require.NoError(t, err) defer response.Body.Close() require.Equal(t, http.StatusOK, response.StatusCode) b, err := io.ReadAll(response.Body) require.NoError(t, err) require.True(t, strings.HasPrefix(string(b), "package uploader")) } ================================================ FILE: pkg/pipeline/sink/websocket.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package sink import ( "encoding/json" "io" "net/http" "strings" "time" "github.com/go-gst/go-gst/gst" "github.com/go-gst/go-gst/gst/app" "github.com/gorilla/websocket" "github.com/linkdata/deadlock" "go.uber.org/atomic" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/gstreamer" "github.com/livekit/egress/pkg/pipeline/builder" "github.com/livekit/egress/pkg/types" "github.com/livekit/protocol/logger" "github.com/livekit/psrpc" ) const pingPeriod = time.Second * 30 type WebsocketSink struct { *base mu deadlock.Mutex conn *websocket.Conn sinkCallbacks *app.SinkCallbacks closed atomic.Bool } func newWebsocketSink( p *gstreamer.Pipeline, o *config.StreamConfig, mimeType types.MimeType, callbacks *gstreamer.Callbacks, ) (*WebsocketSink, error) { // set Content-Type header header := http.Header{} header.Set("Content-Type", string(mimeType)) var wsUrl string o.Streams.Range(func(url, _ any) bool { wsUrl = url.(string) return false }) conn, _, err := websocket.DefaultDialer.Dial(wsUrl, header) if err != nil { return nil, psrpc.NewError(psrpc.InvalidArgument, err) } websocketSink := &WebsocketSink{ base: &base{}, conn: conn, } websocketSink.sinkCallbacks = &app.SinkCallbacks{ EOSFunc: func(_ *app.Sink) { _ = websocketSink.Close() }, NewSampleFunc: func(appSink *app.Sink) gst.FlowReturn { // pull the sample that triggered this callback sample := appSink.PullSample() if sample == nil { return gst.FlowOK } // retrieve the buffer from the sample buffer := sample.GetBuffer() if buffer == nil { return gst.FlowOK } // map the buffer to READ operation samples := buffer.Map(gst.MapRead).Bytes() // send to writer _, err = websocketSink.Write(samples) if err != nil { if err == io.EOF { return gst.FlowEOS } callbacks.OnError(psrpc.NewError(psrpc.Unavailable, err)) } return gst.FlowOK }, } callbacks.AddOnTrackMuted(websocketSink.OnTrackMuted) callbacks.AddOnTrackUnmuted(websocketSink.OnTrackUnmuted) websocketSink.bin, err = builder.BuildWebsocketBin(p, websocketSink.sinkCallbacks) if err != nil { return nil, err } if err = p.AddSinkBin(websocketSink.bin); err != nil { return nil, err } return websocketSink, nil } func (s *WebsocketSink) Start() error { // override default ping handler to include locking s.conn.SetPingHandler(func(_ string) error { s.mu.Lock() defer s.mu.Unlock() _ = s.conn.WriteMessage(websocket.PongMessage, []byte("pong")) return nil }) // read loop is required for the ping handler to receive pings go func() { errCount := 0 for { _, _, err := s.conn.ReadMessage() if s.closed.Load() { return } if err != nil { var closeError *websocket.CloseError if errors.As(err, &closeError) || errors.Is(err, io.EOF) || strings.HasSuffix(err.Error(), "use of closed network connection") { return } errCount++ } // reads will panic after 1000 errors, break loop before that happens if errCount > 100 { logger.Errorw("closing websocket reader", err) return } } }() // write loop for sending pings go func() { ticker := time.NewTicker(pingPeriod) defer ticker.Stop() for { <-ticker.C s.mu.Lock() if s.closed.Load() { s.mu.Unlock() return } _ = s.conn.WriteMessage(websocket.PingMessage, []byte("ping")) s.mu.Unlock() } }() return nil } func (s *WebsocketSink) Write(p []byte) (int, error) { s.mu.Lock() defer s.mu.Unlock() if s.closed.Load() { return 0, io.EOF } return len(p), s.conn.WriteMessage(websocket.BinaryMessage, p) } func (s *WebsocketSink) OnTrackMuted(_ string) { if err := s.writeMutedMessage(true); err != nil { logger.Errorw("failed to write mute message", err) } } func (s *WebsocketSink) OnTrackUnmuted(_ string) { if err := s.writeMutedMessage(false); err != nil { logger.Errorw("failed to write unmute message", err) } } type textMessagePayload struct { Muted bool `json:"muted"` } func (s *WebsocketSink) writeMutedMessage(muted bool) error { data, err := json.Marshal(&textMessagePayload{ Muted: muted, }) if err != nil { return err } s.mu.Lock() defer s.mu.Unlock() if s.closed.Load() { return nil } return s.conn.WriteMessage(websocket.TextMessage, data) } func (s *WebsocketSink) UploadManifest(_ string) (string, bool, error) { return "", false, nil } func (s *WebsocketSink) Close() error { s.mu.Lock() defer s.mu.Unlock() if !s.closed.Swap(true) { logger.Debugw("closing websocket connection") // write close message for graceful disconnection _ = s.conn.WriteMessage(websocket.CloseMessage, nil) // terminate connection and close the `closed` channel _ = s.conn.Close() } return nil } ================================================ FILE: pkg/pipeline/source/pulse/pactl.go ================================================ package pulse import ( "bytes" "encoding/json" "os/exec" "github.com/livekit/egress/pkg/errors" ) func Clients() (int, error) { info, err := List() if err != nil { return 0, err } return len(info.Clients), nil } func List() (*PulseInfo, error) { cmd := exec.Command("pactl", "--format", "json", "list") var b, e bytes.Buffer cmd.Stdout = &b cmd.Stderr = &e if cmd.Run() != nil { return nil, errors.New(e.String()) } info := &PulseInfo{} return info, json.Unmarshal(b.Bytes(), info) } type PulseInfo struct { Modules []Module `json:"modules"` Sinks []Device `json:"sinks"` Sources []Device `json:"sources"` SinkInputs []SinkInput `json:"sink_inputs"` SourceOutputs []SourceOutput `json:"source_outputs"` Clients []Client `json:"clients"` Samples []interface{} `json:"samples"` Cards []interface{} `json:"cards"` } type Module struct { Name string `json:"name"` Argument string `json:"argument"` UsageCounter string `json:"usage_counter"` Properties map[string]interface{} `json:"properties"` } type Device struct { Index int `json:"index"` State string `json:"state"` Name string `json:"name"` Description string `json:"description"` Driver string `json:"driver"` SampleSpecification string `json:"sample_specification"` ChannelMap string `json:"channel_map"` OwnerModule int `json:"owner_module"` Mute bool `json:"mute"` Volume map[string]Volume `json:"volume"` Balance float64 `json:"balance"` BaseVolume Volume `json:"base_volume"` MonitorSource string `json:"monitor_source"` Latency Latency `json:"latency"` Flags []string `json:"flags"` Properties map[string]interface{} `json:"properties"` Ports []interface{} `json:"ports"` ActivePort interface{} `json:"active_port"` Formats []string `json:"formats"` } type IOBase struct { Index int `json:"index"` Driver string `json:"driver"` OwnerModule string `json:"owner_module"` Client string `json:"client"` SampleSpecification string `json:"sample_specification"` ChannelMap string `json:"channel_map"` Format string `json:"format"` Corked bool `json:"corked"` Mute bool `json:"mute"` Volume map[string]Volume `json:"volume"` Balance float64 `json:"balance"` BufferLatencyUSec float64 `json:"buffer_latency_usec"` SinkLatencyUSec float64 `json:"sink_latency_usec"` ResampleMethod string `json:"resample_method"` Properties map[string]interface{} `json:"properties"` } type SinkInput struct { IOBase `json:",inline"` Sink int `json:"sink"` } type SourceOutput struct { IOBase `json:",inline"` Source int `json:"source"` } type Client struct { Index int `json:"index"` Driver string `json:"driver"` OwnerModule string `json:"owner_module"` Properties map[string]interface{} `json:"properties"` } type Volume struct { Value int `json:"value"` ValuePercent string `json:"value_percent"` Db string `json:"db"` } type Latency struct { Actual float64 `json:"actual"` Configured float64 `json:"configured"` } type EgressInfo struct { EgressID string SinkInputs int SourceOutputs int } func (info *PulseInfo) GetEgressInfo() map[int]*EgressInfo { egressMap := make(map[int]*EgressInfo) for _, sink := range info.Sinks { egressMap[sink.Index] = &EgressInfo{ EgressID: sink.Name, } } for _, sinkInput := range info.SinkInputs { egressMap[sinkInput.Sink].SinkInputs++ } for _, sourceOutput := range info.SourceOutputs { egressMap[sourceOutput.Source].SourceOutputs++ } return egressMap } ================================================ FILE: pkg/pipeline/source/sdk/appwriter.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package sdk import ( "io" "net" "sync" "time" "github.com/frostbyte73/core" "github.com/go-gst/go-gst/gst" "github.com/go-gst/go-gst/gst/app" "github.com/linkdata/deadlock" "github.com/pion/rtp" "github.com/pion/rtp/codecs" "github.com/pion/webrtc/v4" "go.uber.org/atomic" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/gstreamer" "github.com/livekit/egress/pkg/logging" "github.com/livekit/egress/pkg/types" "github.com/livekit/media-sdk/jitter" "github.com/livekit/protocol/logger" "github.com/livekit/protocol/utils" lksdk "github.com/livekit/server-sdk-go/v2" "github.com/livekit/server-sdk-go/v2/pkg/synchronizer" ) const ( errBufferTooSmall = "buffer too small" discontinuityTolerance = 500 * time.Millisecond pipelineCheckInterval = 5 * time.Second cSamplesQueueDepth = 100 drainingTimeout = time.Second * 3 unsubscribedGracePeriod = time.Second * 2 // FlowFlushing recovery: threshold of consecutive FlowFlushing returns before // triggering source bin reset. ~2 seconds of 20ms audio packets. flushingThreshold = 100 // Maximum number of source bin resets per writer lifetime. maxSrcResets = 2 ) var errFlowFlushingThreshold = errors.New("persistent FlowFlushing detected") type sampleItem struct { sample []jitter.ExtPacket next *sampleItem } type AppWriter struct { conf *config.PipelineConfig logger logger.Logger csvLogger *logging.CSVLogger[logging.TrackStats] drift atomic.Duration maxDrift atomic.Duration pub lksdk.TrackPublication track *webrtc.TrackRemote codec types.MimeType src *app.Source startTime time.Time trackSource *config.TrackSource buffer *jitter.Buffer samplesHead *sampleItem samplesTail *sampleItem samplesLen int samplesLock deadlock.Mutex samplesCond *sync.Cond translator Translator callbacks *gstreamer.Callbacks sendPLI func() pliThrottle core.Throttle // a/v sync synchronizer *synchronizer.Synchronizer *synchronizer.TrackSynchronizer driftHandler DriftHandler lastPTS time.Duration lastDrift time.Duration lastPipelineCheckPTS time.Duration initialized bool // state buildReady core.Fuse active atomic.Bool lastReceived atomic.Time lastPushed atomic.Time playing core.Fuse draining core.Fuse unsubscribed core.Fuse endStreamSignaled core.Fuse endStreamSourceProcessed core.Fuse endStreamProcessed core.Fuse finished core.Fuse stats appWriterStats // FlowFlushing recovery flushingCount int // consecutive FlowFlushing returns from PushBuffer srcResetCount int // number of source bin resets performed // diagnostics, set on unexpected flushing when pushing packets to the pipeline flushDotRequested atomic.Bool // ensure selector/bin removal is only triggered once on terminal read errors removalRequested atomic.Bool tpLock deadlock.RWMutex timeProvider gstreamer.TimeProvider } type appWriterStats struct { packetsDropped atomic.Uint64 } type DriftHandler interface { EnqueueDrift(t time.Duration) Processed() time.Duration } func NewAppWriter( conf *config.PipelineConfig, track *webrtc.TrackRemote, pub lksdk.TrackPublication, rp *lksdk.RemoteParticipant, ts *config.TrackSource, synchronizer *synchronizer.Synchronizer, driftHandler DriftHandler, callbacks *gstreamer.Callbacks, ) (*AppWriter, error) { w := &AppWriter{ conf: conf, logger: logger.GetLogger().WithValues("trackID", track.ID(), "kind", track.Kind().String()), track: track, pub: pub, codec: ts.MimeType, src: ts.AppSrc, trackSource: ts, callbacks: callbacks, synchronizer: synchronizer, TrackSynchronizer: synchronizer.AddTrack(track, rp.Identity()), driftHandler: driftHandler, timeProvider: gstreamer.NopTimeProvider(), } w.samplesCond = sync.NewCond(&w.samplesLock) ts.OnKeyframeRequired = w.onKeyframeRequired if conf.Debug.EnableTrackLogging { csvLogger, err := logging.NewCSVLogger[logging.TrackStats](track.ID()) if err != nil { logger.Errorw("failed to create csv logger", err) } else { w.csvLogger = csvLogger w.OnSenderReport(func(drift time.Duration) { logger.Debugw("received sender report", "drift", drift) if w.driftHandler != nil { // presence of the drift handler means that PTS updates on SRs are disabled d := drift - w.lastDrift w.lastDrift = drift w.driftHandler.EnqueueDrift(d) } w.updateDrift(drift) }) } } var depacketizer rtp.Depacketizer switch ts.MimeType { case types.MimeTypeOpus: depacketizer = &codecs.OpusPacket{} w.translator = NewNullTranslator() case types.MimeTypePCMU, types.MimeTypePCMA: depacketizer = &G711Packet{} w.translator = NewNullTranslator() case types.MimeTypeH264: depacketizer = &codecs.H264Packet{} w.translator = NewNullTranslator() case types.MimeTypeVP8: depacketizer = &codecs.VP8Packet{} w.translator = NewVP8Translator(w.logger) case types.MimeTypeVP9: depacketizer = &codecs.VP9Packet{} w.translator = NewNullTranslator() default: return nil, errors.ErrNotSupported(string(ts.MimeType)) } opts := []jitter.Option{jitter.WithLogger(w.logger)} if track.Kind() == webrtc.RTPCodecTypeVideo { w.pliThrottle = core.NewThrottle(time.Second) w.sendPLI = func() { w.pliThrottle(func() { rp.WritePLI(track.SSRC()) }) } opts = append(opts, jitter.WithPacketLossHandler(func(uint64, uint64) { w.sendPLI() })) } w.buffer = jitter.NewBuffer( depacketizer, conf.Latency.JitterBufferLatency, w.onPacket, opts..., ) go w.start() return w, nil } func (w *AppWriter) start() { w.startTime = time.Now() w.active.Store(true) if w.csvLogger != nil { go w.logStats() } go func() { <-w.callbacks.BuildReady w.buildReady.Once(func() { if !w.active.Load() { w.callbacks.OnTrackMuted(w.track.ID()) } }) }() go w.pushSamples() for !w.endStreamSignaled.IsBroken() { w.readNext() } w.drainJitterBuffer() select { case <-w.endStreamProcessed.Watch(): w.logger.Debugw("endStreamProcessed fuse broken") case <-time.After(drainingTimeout): w.logger.Errorw("endStreamProcessed not broken after 3 seconds, bug in the draining logic!", nil, "endStreamSourceProcessed", w.endStreamSourceProcessed.IsBroken(), "playing", w.playing.IsBroken(), "active", w.active.Load(), "lastReceived", w.lastReceived.Load(), "lastPushed", w.lastPushed.Load(), "lastPTS", w.lastPTS, ) } // clean up if w.playing.IsBroken() { w.callbacks.OnEOSSent() if flow := w.src.EndStream(); flow != gst.FlowOK && flow != gst.FlowFlushing { w.logger.Warnw("unexpected flow return", nil, "flowReturn", flow.String()) } if w.driftHandler != nil { w.logger.Debugw("processed drift", "drift", w.driftHandler.Processed()) } } w.logger.Infow("writer finished") if w.csvLogger != nil { w.csvLogger.Close() } if w.trackSource != nil { w.trackSource.OnKeyframeRequired = nil } w.finished.Break() } func (w *AppWriter) readNext() { _ = w.track.SetReadDeadline(time.Now().Add(time.Millisecond * 500)) pkt, _, err := w.track.ReadRTP() if err != nil { w.handleReadError(err) return } receivedAt := time.Now() var packets []jitter.ExtPacket if !w.initialized { ready, dropped, done := w.PrimeForStart(jitter.ExtPacket{ReceivedAt: receivedAt, Packet: pkt}) if dropped > 0 { w.stats.packetsDropped.Add(uint64(dropped)) if w.sendPLI != nil { w.sendPLI() } } if !done { return } w.initialized = true packets = ready w.lastReceived.Store(ready[len(ready)-1].ReceivedAt) } else { w.lastReceived.Store(receivedAt) } if !w.active.Swap(true) { // set track active w.logTrackState("track active") if w.buildReady.IsBroken() { w.callbacks.OnTrackUnmuted(w.track.ID()) } if w.sendPLI != nil { w.sendPLI() } } if len(packets) > 0 { w.buffer.PushExtPacketBatch(packets) } else { w.buffer.Push(pkt) } } func (w *AppWriter) handleReadError(err error) { var netErr net.Error switch { case w.draining.IsBroken(): if !w.endStreamSignaled.IsBroken() { // Delayed drain in progress (Drain(false) was called, timer pending) if (errors.As(err, &netErr) && netErr.Timeout()) || err.Error() == errBufferTooSmall { // Keep reading until timer fires to preserve pipeline latency timeout return } } w.logger.Debugw("handleReadError, breaking endStreamSignaled", "error", err) // connection closed or EOF - no point in trying to read anymore w.endStreamSignaled.Break() w.notifyPushSamples() case errors.As(err, &netErr) && netErr.Timeout(): lastRecv := w.lastReceived.Load() if lastRecv.IsZero() { lastRecv = w.startTime } // If track was unsubscribed and grace period elapsed, end the stream if w.unsubscribed.IsBroken() && time.Since(lastRecv) > unsubscribedGracePeriod { w.logger.Debugw("unsubscribed grace period elapsed, ending stream") w.ensureRemovedBeforeDrain() w.draining.Break() w.endStreamSignaled.Break() w.notifyPushSamples() return } if !w.active.Load() { return } if w.pub.IsMuted() || time.Since(lastRecv) > w.conf.Latency.JitterBufferLatency { // set track inactive w.logTrackState("track inactive") w.active.Store(false) if w.buildReady.IsBroken() { w.callbacks.OnTrackMuted(w.track.ID()) } } case err.Error() == errBufferTooSmall: w.logger.Warnw("read error", err) default: // ensure selector switches before EOS propagation to avoid encoder errors w.ensureRemovedBeforeDrain() if !errors.Is(err, io.EOF) { w.logger.Errorw("could not read packet", err) } else { w.logger.Debugw("read EOF, signaling end of stream") } w.draining.Break() w.endStreamSignaled.Break() w.notifyPushSamples() } } func (w *AppWriter) SetTimeProvider(tp gstreamer.TimeProvider) { w.tpLock.Lock() if tp == nil { tp = gstreamer.NopTimeProvider() } w.timeProvider = tp w.tpLock.Unlock() } func (w *AppWriter) waitFor(ch <-chan struct{}) bool { if ch == nil { return true } select { case <-ch: return true case <-w.draining.Watch(): return false } } func (w *AppWriter) pipelineRunningTime() (time.Duration, bool) { w.tpLock.RLock() provider := w.timeProvider w.tpLock.RUnlock() return provider.RunningTime() } func (w *AppWriter) pipelinePlayhead() (time.Duration, bool) { w.tpLock.RLock() provider := w.timeProvider w.tpLock.RUnlock() return provider.PlayheadPosition() } func (w *AppWriter) logTrackState(event string) { fields := []any{"timestamp", time.Since(w.startTime)} if pipelineTime, ok := w.pipelineRunningTime(); ok { fields = append(fields, "pipeline_time", pipelineTime) } if playhead, ok := w.pipelinePlayhead(); ok { fields = append(fields, "playhead", playhead) } w.logger.Debugw(event, fields...) } func (w *AppWriter) onKeyframeRequired() { if w.finished.IsBroken() || w.sendPLI == nil { return } w.sendPLI() } func (w *AppWriter) notifyPushSamples() { w.samplesLock.Lock() w.samplesCond.Broadcast() w.samplesLock.Unlock() } func (w *AppWriter) onPacket(sample []jitter.ExtPacket) { w.samplesLock.Lock() item := &sampleItem{sample, nil} if w.samplesHead == nil { w.samplesHead = item w.samplesTail = w.samplesHead w.samplesLen = 1 } else { w.samplesTail.next = item w.samplesTail = item w.samplesLen++ } // drop old samples if queue is overflowing for w.samplesLen > cSamplesQueueDepth { if w.samplesHead != nil { itemToDrop := w.samplesHead w.samplesHead = w.samplesHead.next w.samplesLen-- w.stats.packetsDropped.Add(uint64(len(itemToDrop.sample))) w.logger.Warnw("buffer full, dropping sample", nil, "numPackets", len(itemToDrop.sample)) } if w.samplesHead == nil { w.samplesTail = nil w.samplesLen = 0 } } w.samplesCond.Broadcast() w.samplesLock.Unlock() } func (w *AppWriter) pushSamples() { defer func() { w.endStreamSignaled.Break() w.endStreamProcessed.Break() w.logger.Debugw("pushSamples finished") }() if !w.waitFor(w.callbacks.PipelinePaused()) { return } if !w.waitFor(w.playing.Watch()) { return } for { w.samplesLock.Lock() for w.samplesHead == nil && !w.endStreamSourceProcessed.IsBroken() { w.samplesCond.Wait() } if w.endStreamSourceProcessed.IsBroken() && w.samplesHead == nil { w.samplesLock.Unlock() return } item := w.samplesHead w.samplesHead = item.next w.samplesLen-- if w.samplesHead == nil { w.samplesTail = nil } w.samplesLock.Unlock() for _, pkt := range item.sample { if err := w.pushPacket(pkt); err != nil { if errors.Is(err, errFlowFlushingThreshold) { if w.tryRecoverFromFlushing() { continue } w.draining.Break() w.notifyPushSamples() return } if !utils.ErrorIsOneOf(err, synchronizer.ErrPacketOutOfOrder, synchronizer.ErrPacketTooOld) { w.draining.Break() w.notifyPushSamples() return } } } } } func (w *AppWriter) pushPacket(pkt jitter.ExtPacket) error { w.translator.Translate(pkt.Packet) // get PTS pts, err := w.GetPTS(pkt) if err != nil { w.stats.packetsDropped.Inc() return err } if pts < 0 { // TODO: handle it by sending new gst segment that will reflect the offset w.logger.Debugw("negative packet pts, dropping", "pts", pts) w.stats.packetsDropped.Inc() return nil } p, err := pkt.Marshal() if err != nil { w.stats.packetsDropped.Inc() w.logger.Errorw("could not marshal packet", err) return err } b := gst.NewBufferFromBytes(p) b.SetPresentationTimestamp(gst.ClockTime(uint64(pts))) if isDiscontinuity(w.lastPTS, pts) { if w.shouldHandleDiscontinuity() { w.logger.Debugw("discontinuity detected", "pts", pts, "lastPTS", w.lastPTS) ok := w.src.SendEvent(gst.NewFlushStartEvent()) if !ok { w.logger.Errorw("failed to send flush start event", nil) } ok = w.src.SendEvent(gst.NewFlushStopEvent(false)) if !ok { w.logger.Errorw("failed to send flush stop event", nil) } } b.SetFlags(b.GetFlags() | gst.BufferFlagDiscont) } if flow := w.src.PushBuffer(b); flow != gst.FlowOK { w.stats.packetsDropped.Inc() if flow == gst.FlowFlushing { w.flushingCount++ if w.flushingCount == 1 { w.logger.Infow("FlowFlushing detected", "appsrcState", w.src.Element.GetCurrentState().String()) if w.flushDotRequested.CompareAndSwap(false, true) { w.callbacks.OnDebugDotRequest("appsrc_flush_" + w.track.ID()) } } if w.flushingCount >= flushingThreshold { return errFlowFlushingThreshold } } else { w.logger.Infow("unexpected flow return", "flow", flow, "appsrcState", w.src.Element.GetCurrentState().String()) } } else if w.flushingCount > 0 { w.logger.Infow("FlowFlushing cleared after successful push", "previousCount", w.flushingCount) w.flushingCount = 0 } w.lastPushed.Store(time.Now()) w.lastPTS = pts w.maybeCheckPipelineLag(pts) return nil } // tryRecoverFromFlushing attempts to recover from persistent FlowFlushing by // removing the stuck source bin and replacing it with a new one. // Returns true if recovery succeeded and pushing can continue. func (w *AppWriter) tryRecoverFromFlushing() bool { if w.draining.IsBroken() { w.logger.Debugw("skipping FlowFlushing recovery: draining") return false } if w.unsubscribed.IsBroken() { w.logger.Debugw("skipping FlowFlushing recovery: unsubscribed") return false } if w.endStreamSignaled.IsBroken() { w.logger.Debugw("skipping FlowFlushing recovery: end stream signaled") return false } if w.srcResetCount >= maxSrcResets { w.logger.Warnw("max FlowFlushing recovery attempts reached, giving up", nil, "attempts", w.srcResetCount) return false } w.logger.Infow("attempting FlowFlushing recovery via source bin reset", "flushingCount", w.flushingCount, "attempt", w.srcResetCount+1) oldAppSrc := w.trackSource.AppSrc // Call the builder layer to force-remove the old bin and add a new one. // The callback updates ts.AppSrc to the new appsrc on success. if err := w.callbacks.OnSourceBinReset(w.trackSource); err != nil { w.logger.Errorw("FlowFlushing recovery failed", err) return false } if w.trackSource.AppSrc == oldAppSrc { w.logger.Errorw("FlowFlushing recovery: no handler replaced the appsrc", nil) return false } w.src = w.trackSource.AppSrc w.flushingCount = 0 w.srcResetCount++ w.logger.Infow("FlowFlushing recovery succeeded, continuing with new appsrc", "totalResets", w.srcResetCount) return true } func (w *AppWriter) maybeCheckPipelineLag(pts time.Duration) { if pts-w.lastPipelineCheckPTS < pipelineCheckInterval { return } pipelineTime, ok := w.pipelineRunningTime() if !ok { return } w.lastPipelineCheckPTS = pts if pipelineTime <= w.conf.Latency.AudioMixerLatency { return } if pts < pipelineTime-w.conf.Latency.AudioMixerLatency { w.logger.Warnw( "packet PTS too far in the past compared to the pipeline, mixer will drop the buffer!", nil, "pts", pts, "pipelineRunningTime", pipelineTime, ) } } func (w *AppWriter) Playing() { w.playing.Break() } // Drain blocks until finished func (w *AppWriter) Drain(force bool) { w.draining.Once(func() { w.logger.Debugw("draining", "force", force) endStream := func() { w.endStreamSignaled.Break() w.notifyPushSamples() } if force || !w.active.Load() { endStream() } else { time.AfterFunc(w.conf.Latency.PipelineLatency, endStream) } }) <-w.finished.Watch() w.logger.Debugw("finished fuse broken") w.synchronizer.RemoveTrack(w.track.ID()) } // OnUnsubscribed signals that the track was unsubscribed but allows the reader // to continue reading until an error occurs or grace period elapses. // This allows any remaining buffers in flight from the SFU to be processed. func (w *AppWriter) OnUnsubscribed() { w.unsubscribed.Break() w.logger.Debugw("track unsubscribed, continuing to read until error or grace period") } // Finished returns a channel that is closed when the writer has finished. func (w *AppWriter) Finished() <-chan struct{} { return w.finished.Watch() } func (w *AppWriter) logStats() { ended := w.endStreamSignaled.Watch() ticker := time.NewTicker(time.Second * 10) defer ticker.Stop() for { select { case <-ended: stats := w.getStats() w.csvLogger.Write(stats) w.csvLogger.Close() w.logger.Infow("appwriter stats ", "stats", stats, "requestType", w.conf.RequestType) return case <-ticker.C: stats := w.getStats() w.csvLogger.Write(stats) } } } func (w *AppWriter) getStats() *logging.TrackStats { stats := w.buffer.Stats() return &logging.TrackStats{ Timestamp: time.Now().Format(time.DateTime), PacketsReceived: stats.PacketsPushed, PaddingReceived: stats.PaddingPushed, LastReceived: w.lastReceived.Load().Format(time.DateTime), PacketsDropped: stats.PacketsDropped + w.stats.packetsDropped.Load(), PacketsPushed: stats.PacketsPopped, SamplesPushed: stats.SamplesPopped, LastPushed: w.lastPushed.Load().Format(time.DateTime), Drift: w.drift.Load(), MaxDrift: w.maxDrift.Load(), } } func (w *AppWriter) updateDrift(drift time.Duration) { w.drift.Store(drift) for { maxDrift := w.maxDrift.Load() if drift.Abs() <= maxDrift.Abs() { break } if w.maxDrift.CompareAndSwap(maxDrift, drift) { break } } } func (w *AppWriter) shouldHandleDiscontinuity() bool { return w.track.Kind() == webrtc.RTPCodecTypeAudio && w.conf.AudioTempoController.Enabled } func (w *AppWriter) TrackKind() webrtc.RTPCodecType { return w.track.Kind() } func (w *AppWriter) drainJitterBuffer() { w.logger.Debugw("draining jitter buffer") w.buffer.Close() w.buffer.Flush() w.logger.Debugw("jitter buffer flushed") w.endStreamSourceProcessed.Break() w.notifyPushSamples() } func isDiscontinuity(lastPTS time.Duration, pts time.Duration) bool { return pts > lastPTS+discontinuityTolerance } func (w *AppWriter) shouldRemoveBeforeDrain() bool { return w.track.Kind() == webrtc.RTPCodecTypeVideo && (w.conf.RequestType == types.RequestTypeParticipant || w.conf.RequestType == types.RequestTypeRoomComposite || w.conf.RequestType == types.RequestTypeMedia) } func (w *AppWriter) ensureRemovedBeforeDrain() { if w.shouldRemoveBeforeDrain() && w.removalRequested.CompareAndSwap(false, true) { w.callbacks.OnTrackRemoved(w.track.ID()) } } type G711Packet struct{} func (p *G711Packet) Unmarshal(packet []byte) ([]byte, error) { // G.711 payload is just the raw samples, return as-is (same as OpusPacket) if packet == nil { return nil, errors.New("nil packet") } return packet, nil } func (p *G711Packet) IsPartitionHead(_ []byte) bool { return true } func (p *G711Packet) IsPartitionTail(_ bool, _ []byte) bool { return true } ================================================ FILE: pkg/pipeline/source/sdk/translator.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package sdk import ( "time" "github.com/pion/rtp" "github.com/livekit/livekit-server/pkg/sfu/buffer" "github.com/livekit/livekit-server/pkg/sfu/codecmunger" "github.com/livekit/protocol/logger" ) type Translator interface { Translate(*rtp.Packet) } // VP8 type VP8Translator struct { logger logger.Logger firstPktPushed bool lastSN uint16 vp8Munger *codecmunger.VP8 } func NewVP8Translator(logger logger.Logger) *VP8Translator { return &VP8Translator{ logger: logger, vp8Munger: codecmunger.NewVP8(logger), } } func (t *VP8Translator) Translate(pkt *rtp.Packet) { defer func() { t.lastSN = pkt.SequenceNumber }() if len(pkt.Payload) == 0 { return } vp8Packet := buffer.VP8{} if err := vp8Packet.Unmarshal(pkt.Payload); err != nil { t.logger.Warnw("could not unmarshal VP8 packet", err) return } extPkt := &buffer.ExtPacket{ Packet: pkt, Arrival: time.Now().UnixNano(), Payload: vp8Packet, IsKeyFrame: vp8Packet.IsKeyFrame, VideoLayer: buffer.VideoLayer{ Spatial: -1, Temporal: int32(vp8Packet.TID), }, } if !t.firstPktPushed { t.firstPktPushed = true t.vp8Munger.SetLast(extPkt) } else { payload := make([]byte, 1460) incomingHeaderSize, header, err := t.vp8Munger.UpdateAndGet(extPkt, false, pkt.SequenceNumber != t.lastSN+1, extPkt.Temporal) if err != nil { t.logger.Warnw("could not update VP8 packet", err) return } copy(payload, header) n := copy(payload[len(header):], extPkt.Packet.Payload[incomingHeaderSize:]) pkt.Payload = payload[:len(header)+n] } } // Null type NullTranslator struct{} func NewNullTranslator() Translator { return &NullTranslator{} } func (t *NullTranslator) Translate(_ *rtp.Packet) {} ================================================ FILE: pkg/pipeline/source/sdk.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package source import ( "context" "fmt" "strings" "time" "github.com/frostbyte73/core" "github.com/linkdata/deadlock" "github.com/pion/webrtc/v4" "go.uber.org/atomic" "github.com/livekit/protocol/livekit" "github.com/livekit/protocol/logger" lksdk "github.com/livekit/server-sdk-go/v2" "github.com/livekit/server-sdk-go/v2/pkg/synchronizer" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/gstreamer" "github.com/livekit/egress/pkg/types" ) const ( subscriptionTimeout = time.Second * 30 ) type SDKSource struct { *config.PipelineConfig callbacks *gstreamer.Callbacks room *lksdk.Room sync *synchronizer.Synchronizer mu deadlock.Mutex initialized core.Fuse filenameReplacements map[string]string audioChannels map[string]livekit.AudioChannel workersMu deadlock.RWMutex workers map[string]*trackWorker // subLock prevents a race where a subscription starts during init completion. // Without it, the subscription could see "not yet initialized", then init completes, // leaving the track orphaned (missed by both pipeline build and dynamic add). subLock deadlock.RWMutex closing atomic.Bool active atomic.Int32 closed core.Fuse startRecording core.Fuse endRecording core.Fuse timeProvider atomic.Pointer[gstreamer.TimeProvider] initResultChan atomic.Pointer[chan subscriptionResult] } type subscriptionResult struct { trackID string err error } func NewSDKSource(ctx context.Context, p *config.PipelineConfig, callbacks *gstreamer.Callbacks) (*SDKSource, error) { _, span := tracer.Start(ctx, "SDKInput.New") defer span.End() s := &SDKSource{ PipelineConfig: p, callbacks: callbacks, filenameReplacements: make(map[string]string), audioChannels: make(map[string]livekit.AudioChannel), workers: make(map[string]*trackWorker), } logger.Debugw("latency config", "latency", p.Latency) opts := []synchronizer.SynchronizerOption{ synchronizer.WithMaxTsDiff(p.Latency.RTPMaxAllowedTsDiff), synchronizer.WithMaxDriftAdjustment(p.Latency.RTPMaxDriftAdjustment), synchronizer.WithDriftAdjustmentWindowPercent(p.Latency.RTPDriftAdjustmentWindowPercent), synchronizer.WithOldPacketThreshold(p.Latency.OldPacketThreshold), synchronizer.WithOnStarted(func() { s.startRecording.Break() }), } if p.RequestType == types.RequestTypeRoomComposite || p.RequestType == types.RequestTypeTemplate { // Enable Packet Burst Estimator for Room Composite requests opts = append(opts, synchronizer.WithStartGate()) } // time provider is not available yet, will be set later // add some leeway to the mixer latency opts = append(opts, synchronizer.WithMediaRunningTime(nil, p.Latency.AudioMixerLatency+200*time.Millisecond)) if s.shouldEnableOneShotSenderReportSync() { opts = append(opts, synchronizer.WithSenderReportSyncMode(synchronizer.SenderReportSyncModeOneShot)) opts = append(opts, synchronizer.WithOneShotDriftCorrectionThreshold( time.Duration(float64(p.Latency.AudioMixerLatency)*0.8), )) } else if s.shouldDisableAudioPTSAdjustment() { opts = append(opts, synchronizer.WithSenderReportSyncMode(synchronizer.SenderReportSyncModeWithoutRebase)) opts = append(opts, synchronizer.WithAudioPTSAdjustmentDisabled()) } else { opts = append(opts, synchronizer.WithSenderReportSyncMode(synchronizer.SenderReportSyncModeRebase)) } if p.AudioTempoController.Enabled { logger.Debugw("audio tempo controller enabled", "adjustmentRate", p.AudioTempoController.AdjustmentRate) } s.sync = synchronizer.NewSynchronizerWithOptions( opts..., ) if err := s.joinRoom(); err != nil { s.disconnectRoom() return nil, err } return s, nil } func (s *SDKSource) StartRecording() <-chan struct{} { return s.startRecording.Watch() } func (s *SDKSource) EndRecording() <-chan struct{} { return s.endRecording.Watch() } func (s *SDKSource) Playing(trackID string) { s.workersMu.RLock() w := s.workers[trackID] s.workersMu.RUnlock() if w == nil { return } gen := w.generation.Load() s.submitOp(trackID, Operation{Type: OpPlaying, Generation: gen}) } func (s *SDKSource) GetStartedAt() int64 { return s.sync.GetStartedAt() } func (s *SDKSource) GetEndedAt() int64 { return s.sync.GetEndedAt() } func (s *SDKSource) CloseWriters() { s.closed.Once(func() { s.closing.Store(true) s.sync.End() s.workersMu.RLock() workers := make([]*trackWorker, 0, len(s.workers)) for _, w := range s.workers { workers = append(workers, w) } s.workersMu.RUnlock() for _, w := range workers { select { case w.opChan <- Operation{Type: OpClose}: case <-w.done.Watch(): // already exited } } }) } func (s *SDKSource) StreamStopped(elementName string) { trackID := strings.TrimPrefix(elementName, "app_") // Only send finished if we have a worker for this track s.workersMu.RLock() _, exists := s.workers[trackID] s.workersMu.RUnlock() if !exists { return // No worker for this track, nothing to clean up } s.submitOp(trackID, Operation{Type: OpFinished}) } func (s *SDKSource) Close() { s.disconnectRoom() } func (s *SDKSource) SetTimeProvider(tp gstreamer.TimeProvider) { s.timeProvider.Store(&tp) if tp != nil { s.sync.SetMediaRunningTime(tp.RunningTime) } else { s.sync.SetMediaRunningTime(nil) } s.workersMu.RLock() for _, w := range s.workers { select { case w.opChan <- Operation{Type: OpSetTimeProvider, TimeProvider: tp}: default: logger.Warnw("failed to send SetTimeProvider, channel full", nil, "trackID", w.trackID) } } s.workersMu.RUnlock() } // ----- Subscriptions ----- func (s *SDKSource) joinRoom() error { cb := &lksdk.RoomCallback{ ParticipantCallback: lksdk.ParticipantCallback{ OnTrackSubscribed: s.onTrackSubscribed, OnTrackMuted: s.onTrackMuted, OnTrackUnmuted: s.onTrackUnmuted, OnTrackUnsubscribed: s.onTrackUnsubscribed, }, OnDisconnected: s.onDisconnected, } switch s.RequestType { case types.RequestTypeRoomComposite, types.RequestTypeTemplate, types.RequestTypeMedia: cb.OnTrackPublished = s.onTrackPublished case types.RequestTypeParticipant: cb.OnTrackPublished = s.onTrackPublished cb.OnParticipantDisconnected = s.onParticipantDisconnected } logger.Debugw("connecting to room") room, err := lksdk.ConnectToRoomWithToken(s.WsUrl, s.Token, cb, lksdk.WithAutoSubscribe(false)) if err != nil { return err } s.room = room var fileIdentifier string var w, h uint32 switch s.RequestType { case types.RequestTypeRoomComposite: fileIdentifier = s.room.Name() // room_name and room_id are already handled as replacements err = s.awaitRoomTracks() case types.RequestTypeTemplate: if s.Info.RoomName != "" { fileIdentifier = s.Info.RoomName } else { fileIdentifier = s.room.Name() s.filenameReplacements["{room_name}"] = s.room.Name() } err = s.awaitRoomTracks() case types.RequestTypeParticipant: fileIdentifier = s.Identity s.filenameReplacements["{publisher_identity}"] = s.Identity w, h, err = s.awaitParticipantTracks(s.Identity) case types.RequestTypeTrackComposite: fileIdentifier = s.Info.RoomName tracks := make(map[string]struct{}) if s.AudioEnabled { tracks[s.AudioTrackID] = struct{}{} } if s.VideoEnabled { tracks[s.VideoTrackID] = struct{}{} } w, h, err = s.awaitTracks(tracks) case types.RequestTypeTrack: fileIdentifier = s.TrackID w, h, err = s.awaitTracks(map[string]struct{}{s.TrackID: {}}) case types.RequestTypeMedia: if s.Info.RoomName != "" { fileIdentifier = s.Info.RoomName } else { fileIdentifier = s.room.Name() s.filenameReplacements["{room_name}"] = s.room.Name() } w, h, err = s.awaitMediaTracks() } if err != nil { return err } if err = s.UpdateInfoFromSDK(fileIdentifier, s.filenameReplacements, w, h); err != nil { logger.Errorw("could not update file params", err) return err } return nil } func (s *SDKSource) startAwaitingTracks(expectedCount int) <-chan subscriptionResult { ch := make(chan subscriptionResult, expectedCount) s.initResultChan.Store(&ch) return ch } // StopAwaitingTracks - called after init complete or timeout func (s *SDKSource) stopAwaitingTracks() { s.initResultChan.Store(nil) // just nil out, don't close } func (s *SDKSource) completeInit() { s.subLock.Lock() defer s.subLock.Unlock() s.initialized.Break() } // getInitResultChan returns the current init result channel (nil after init complete) func (s *SDKSource) getInitResultChan() chan<- subscriptionResult { if ptr := s.initResultChan.Load(); ptr != nil { return *ptr } return nil } // sendInitResult sends result to the init channel if non-nil (non-blocking to avoid deadlock) func (s *SDKSource) sendInitResult(ch chan<- subscriptionResult, trackID string, err error) { if ch == nil { return } select { case ch <- subscriptionResult{trackID: trackID, err: err}: default: logger.Warnw("failed to send init result, channel full", nil, "trackID", trackID) } } func (s *SDKSource) awaitRoomTracks() error { // await expected subscriptions expected := 0 for _, rp := range s.room.GetRemoteParticipants() { pubs := rp.TrackPublications() for _, pub := range pubs { if s.shouldSubscribe(pub) { expected++ } } } if err := s.awaitExpected(expected); err != nil { return err } s.completeInit() return nil } func (s *SDKSource) awaitMediaTracks() (uint32, uint32, error) { // Phase 1: Collect prerequisites from config requiredParticipants := make(map[string]struct{}) requiredTracks := make(map[string]struct{}) if s.Identity != "" { requiredParticipants[s.Identity] = struct{}{} } if s.VideoTrackID != "" { requiredTracks[s.VideoTrackID] = struct{}{} } for _, route := range s.AudioRoutes { if route.Match.TrackID != "" { requiredTracks[route.Match.TrackID] = struct{}{} } if route.Match.ParticipantIdentity != "" { requiredParticipants[route.Match.ParticipantIdentity] = struct{}{} } } // Phase 2: Wait for prerequisites with shared deadline deadline := time.Now().Add(subscriptionTimeout) for identity := range requiredParticipants { if _, err := s.getParticipant(identity, deadline); err != nil { return 0, 0, err } } for trackID := range requiredTracks { if err := s.awaitTrackPublication(trackID, deadline); err != nil { return 0, 0, err } } // Phase 3: Count all matching subscriptions and soft-wait expected := 0 for _, rp := range s.room.GetRemoteParticipants() { for _, pub := range rp.TrackPublications() { if s.shouldSubscribeMedia(pub, rp) { expected++ } } } if err := s.awaitExpected(expected); err != nil { return 0, 0, err } // Phase 4: Get video dimensions from subscribed tracks var w, h uint32 for _, rp := range s.room.GetRemoteParticipants() { for _, pub := range rp.TrackPublications() { if pub.IsSubscribed() && pub.Kind() == lksdk.TrackKindVideo { if info := pub.TrackInfo(); info != nil { w = info.Width h = info.Height } } } } s.completeInit() return w, h, nil } func (s *SDKSource) awaitParticipantTracks(identity string) (uint32, uint32, error) { rp, err := s.getParticipant(identity, time.Now().Add(subscriptionTimeout)) if err != nil { return 0, 0, err } // await expected subscriptions pubs := rp.TrackPublications() expected := 0 for _, pub := range pubs { if s.shouldSubscribe(pub) { expected++ } } if err = s.awaitExpected(expected); err != nil { return 0, 0, err } // get dimensions after subscribing so that track info exists var w, h uint32 for _, t := range pubs { if t.TrackInfo().Type == livekit.TrackType_VIDEO && t.IsSubscribed() { w = t.TrackInfo().Width h = t.TrackInfo().Height } } s.completeInit() return w, h, nil } func (s *SDKSource) awaitExpected(expected int) error { if expected == 0 { return nil } resultChan := s.startAwaitingTracks(expected) defer s.stopAwaitingTracks() subscribed := 0 deadline := time.After(time.Second * 3) for subscribed < expected { select { case sub := <-resultChan: if sub.err != nil { return sub.err } subscribed++ case <-deadline: return nil } } return nil } func (s *SDKSource) getParticipant(identity string, deadline time.Time) (*lksdk.RemoteParticipant, error) { for time.Now().Before(deadline) { for _, p := range s.room.GetRemoteParticipants() { if p.Identity() == identity { return p, nil } } time.Sleep(100 * time.Millisecond) } return nil, errors.ErrParticipantNotFound(identity) } func (s *SDKSource) awaitTrackPublication(trackID string, deadline time.Time) error { for time.Now().Before(deadline) { for _, p := range s.room.GetRemoteParticipants() { for _, pub := range p.TrackPublications() { if pub.SID() == trackID { return nil } } } time.Sleep(100 * time.Millisecond) } return errors.ErrTrackNotFound(trackID) } func (s *SDKSource) awaitTracks(expecting map[string]struct{}) (uint32, uint32, error) { trackCount := len(expecting) if trackCount == 0 { s.completeInit() return 0, 0, nil } waiting := make(map[string]struct{}) for trackID := range expecting { waiting[trackID] = struct{}{} } // Set up init coordination - processIdleOp will send results here resultChan := s.startAwaitingTracks(trackCount) defer s.stopAwaitingTracks() deadline := time.After(subscriptionTimeout) tracks, err := s.subscribeToTracks(expecting, deadline) if err != nil { return 0, 0, err } for i := 0; i < trackCount; i++ { select { case result := <-resultChan: if result.err != nil { return 0, 0, result.err } delete(waiting, result.trackID) case <-deadline: for trackID := range waiting { return 0, 0, errors.ErrTrackNotFound(trackID) } } } var w, h uint32 for _, t := range tracks { if t.TrackInfo().Type == livekit.TrackType_VIDEO { w = t.TrackInfo().Width h = t.TrackInfo().Height } } s.completeInit() return w, h, nil } func (s *SDKSource) subscribeToTracks(expecting map[string]struct{}, deadline <-chan time.Time) ([]lksdk.TrackPublication, error) { var tracks []lksdk.TrackPublication for { select { case <-deadline: for trackID := range expecting { return nil, errors.ErrTrackNotFound(trackID) } default: for _, p := range s.room.GetRemoteParticipants() { for _, track := range p.TrackPublications() { trackID := track.SID() if _, ok := expecting[trackID]; ok { if trackID == s.AudioTrackID && track.Kind() == lksdk.TrackKindVideo { return nil, errors.ErrInvalidInput("audio_track_id") } else if trackID == s.VideoTrackID && track.Kind() == lksdk.TrackKindAudio { return nil, errors.ErrInvalidInput("video_track_id") } if err := s.subscribe(track); err != nil { return nil, err } tracks = append(tracks, track) delete(expecting, track.SID()) if len(expecting) == 0 { return tracks, nil } } } } time.Sleep(100 * time.Millisecond) } } } func (s *SDKSource) subscribe(track lksdk.TrackPublication) error { if pub, ok := track.(*lksdk.RemoteTrackPublication); ok { if pub.IsSubscribed() { return nil } logger.Infow("subscribing to track", "trackID", track.SID()) pub.OnRTCP(s.sync.OnRTCP) return pub.SetSubscribed(true) } return errors.ErrSubscriptionFailed } // ----- Callbacks ----- func (s *SDKSource) onTrackSubscribed(track *webrtc.TrackRemote, pub *lksdk.RemoteTrackPublication, rp *lksdk.RemoteParticipant) { // After init, only participant and room composite requests accept new tracks if s.shouldSkipTrackSubscriptions() { return } trackID := pub.SID() // Capture result channel at submission time (nil after init complete) resultChan := s.getInitResultChan() s.submitOp(trackID, Operation{ Type: OpSubscribe, Track: track, Pub: pub, RemoteParticipant: rp, ResultChan: resultChan, }) } func (s *SDKSource) onTrackPublished(pub *lksdk.RemoteTrackPublication, rp *lksdk.RemoteParticipant) { if s.RequestType != types.RequestTypeParticipant && s.RequestType != types.RequestTypeRoomComposite && s.RequestType != types.RequestTypeTemplate && s.RequestType != types.RequestTypeMedia { return } if s.RequestType == types.RequestTypeParticipant && rp.Identity() != s.Identity { return } var shouldSub bool if s.RequestType == types.RequestTypeMedia { shouldSub = s.shouldSubscribeMedia(pub, rp) } else { shouldSub = s.shouldSubscribe(pub) } if shouldSub { if err := s.subscribe(pub); err != nil { logger.Errorw("failed to subscribe to track", err, "trackID", pub.SID()) } } else { logger.Infow("ignoring track", "reason", fmt.Sprintf("source %s", pub.Source())) } } func (s *SDKSource) shouldSubscribe(pub lksdk.TrackPublication) bool { switch s.RequestType { case types.RequestTypeParticipant: switch pub.Source() { case livekit.TrackSource_CAMERA, livekit.TrackSource_MICROPHONE: return !s.ScreenShare default: return s.ScreenShare } case types.RequestTypeRoomComposite, types.RequestTypeTemplate: switch pub.Kind() { case lksdk.TrackKindAudio: return s.AudioEnabled case lksdk.TrackKindVideo: return s.VideoEnabled } } return false } func (s *SDKSource) shouldSubscribeMedia(pub lksdk.TrackPublication, rp *lksdk.RemoteParticipant) bool { if s.matchesMediaVideo(pub, rp) { return true } if route := s.matchesAudioRoute(pub, rp); route != nil { s.mu.Lock() s.audioChannels[pub.SID()] = route.Channel s.mu.Unlock() return true } return false } func (s *SDKSource) matchesAudioRoute(pub lksdk.TrackPublication, rp *lksdk.RemoteParticipant) *config.AudioRouteConfig { if pub.Kind() != lksdk.TrackKindAudio { return nil } for i := range s.AudioRoutes { route := &s.AudioRoutes[i] switch { case route.Match.TrackID != "": if pub.SID() == route.Match.TrackID { return route } case route.Match.ParticipantIdentity != "": if rp.Identity() == route.Match.ParticipantIdentity { return route } case route.Match.ParticipantKind != nil: if rp.Kind() == *route.Match.ParticipantKind { return route } } } return nil } func (s *SDKSource) matchesMediaVideo(pub lksdk.TrackPublication, rp *lksdk.RemoteParticipant) bool { if pub.Kind() != lksdk.TrackKindVideo { return false } if s.VideoTrackID != "" { return pub.SID() == s.VideoTrackID } if s.Identity != "" { if rp.Identity() != s.Identity { return false } if s.ScreenShare { return pub.Source() == livekit.TrackSource_SCREEN_SHARE } return pub.Source() == livekit.TrackSource_CAMERA } return false } func (s *SDKSource) onTrackMuted(pub lksdk.TrackPublication, _ lksdk.Participant) { s.workersMu.RLock() _, exists := s.workers[pub.SID()] s.workersMu.RUnlock() if exists { logger.Debugw("track muted", "trackID", pub.SID()) } } func (s *SDKSource) onTrackUnmuted(pub lksdk.TrackPublication, _ lksdk.Participant) { s.workersMu.RLock() _, exists := s.workers[pub.SID()] s.workersMu.RUnlock() if exists { logger.Debugw("track unmuted", "trackID", pub.SID()) } } func (s *SDKSource) onTrackUnsubscribed(_ *webrtc.TrackRemote, pub *lksdk.RemoteTrackPublication, _ *lksdk.RemoteParticipant) { trackID := pub.SID() // Only send unsubscribe if we have a worker (i.e., we subscribed to this track) s.workersMu.RLock() _, exists := s.workers[trackID] s.workersMu.RUnlock() if !exists { return // Never subscribed to this track, nothing to do } logger.Debugw("track unsubscribed", "trackID", trackID) s.submitOp(trackID, Operation{Type: OpUnsubscribe}) } func (s *SDKSource) onParticipantDisconnected(rp *lksdk.RemoteParticipant) { if rp.Identity() == s.Identity { logger.Debugw("participant disconnected") s.finished() } } func (s *SDKSource) onDisconnected() { logger.Warnw("disconnected from room", nil) s.finished() } func (s *SDKSource) finished() { s.endRecording.Break() } func (s *SDKSource) shouldSkipTrackSubscriptions() bool { return s.initialized.IsBroken() && s.RequestType != types.RequestTypeParticipant && s.RequestType != types.RequestTypeRoomComposite && s.RequestType != types.RequestTypeTemplate && s.RequestType != types.RequestTypeMedia } func (s *SDKSource) disconnectRoom() { if s.room != nil { s.room.Disconnect() s.room = nil } } func (s *SDKSource) shouldUseOneShotSenderReportSync() bool { return s.RequestType == types.RequestTypeRoomComposite // one-shot correction is only useful when the audio mixer can drop late audio } func (s *SDKSource) shouldEnableOneShotSenderReportSync() bool { return s.EnableOneShotSenderReportSync && s.shouldUseOneShotSenderReportSync() } func (s *SDKSource) shouldDisableAudioPTSAdjustment() bool { return s.RequestType == types.RequestTypeRoomComposite || // SDK room composites are audio only - no need to adjust audio timestamps s.RequestType == types.RequestTypeTemplate || // SDK templates are audio only - same as room composite s.RequestType == types.RequestTypeTrack || // no A/V sync needed for single track requests s.AudioTempoController.Enabled } ================================================ FILE: pkg/pipeline/source/source.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package source import ( "context" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/gstreamer" "github.com/livekit/egress/pkg/types" ) type Source interface { StartRecording() <-chan struct{} EndRecording() <-chan struct{} GetStartedAt() int64 GetEndedAt() int64 Close() } type TimeAware interface { SetTimeProvider(gstreamer.TimeProvider) } func New(ctx context.Context, p *config.PipelineConfig, callbacks *gstreamer.Callbacks) (Source, error) { switch p.SourceType { case types.SourceTypeWeb: return NewWebSource(ctx, p) case types.SourceTypeSDK: return NewSDKSource(ctx, p, callbacks) default: return nil, errors.ErrInvalidInput("request") } } ================================================ FILE: pkg/pipeline/source/tracer.go ================================================ // Copyright 2025 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package source import "go.opentelemetry.io/otel" var ( tracer = otel.Tracer("github.com/livekit/egress/pkg/pipeline/source") ) ================================================ FILE: pkg/pipeline/source/track_worker.go ================================================ // Copyright 2026 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package source import ( "fmt" "strings" "github.com/go-gst/go-gst/gst" "github.com/go-gst/go-gst/gst/app" "github.com/pion/webrtc/v4" "go.uber.org/atomic" "github.com/frostbyte73/core" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/gstreamer" "github.com/livekit/egress/pkg/pipeline/source/sdk" "github.com/livekit/egress/pkg/pipeline/tempo" "github.com/livekit/egress/pkg/types" "github.com/livekit/protocol/logger" lksdk "github.com/livekit/server-sdk-go/v2" ) // TrackState represents the state of a track writer in the per-track worker state machine. type TrackState int const ( TrackStateIdle TrackState = iota // no active writer, ready for subscription TrackStateActive // writer is active and processing samples TrackStateCleaning // writer is draining after unsubscribe ) func (s TrackState) String() string { switch s { case TrackStateIdle: return "IDLE" case TrackStateActive: return "ACTIVE" case TrackStateCleaning: return "CLEANING" default: return "UNKNOWN" } } // OpType represents operations that can be sent to a track worker. type OpType int const ( OpSubscribe OpType = iota // track subscribed, create writer OpUnsubscribe // track unsubscribed, start graceful cleanup OpFinished // StreamStopped, immediate cleanup OpPlaying // GStreamer pipeline is playing OpSetTimeProvider // set time provider for PTS calculation OpClose // shutdown, drain and exit worker ) func (o OpType) String() string { switch o { case OpSubscribe: return "Subscribe" case OpUnsubscribe: return "Unsubscribe" case OpFinished: return "Finished" case OpPlaying: return "Playing" case OpSetTimeProvider: return "SetTimeProvider" case OpClose: return "Close" default: return "Unknown" } } // Operation is a message sent to a track worker's operation channel. type Operation struct { Type OpType Track *webrtc.TrackRemote Pub *lksdk.RemoteTrackPublication RemoteParticipant *lksdk.RemoteParticipant Generation uint64 TimeProvider gstreamer.TimeProvider ResultChan chan<- subscriptionResult // for init coordination (nil after init) } // workerState holds the mutable state for a single track worker. // Only accessed by the worker's goroutine, no synchronization needed. type workerState struct { state TrackState writer *sdk.AppWriter generation uint64 } // trackWorker manages the lifecycle of a single track. // Each track gets its own goroutine to serialize operations and avoid cross-track blocking. type trackWorker struct { trackID string opChan chan Operation // buffered channel for operations done core.Fuse // broken when worker exits generation atomic.Uint64 // current generation (for Playing coordination) } func (s *SDKSource) getOrCreateWorker(trackID string) *trackWorker { // Fast path - worker exists s.workersMu.RLock() w, exists := s.workers[trackID] s.workersMu.RUnlock() if exists { return w } // Slow path - need to create worker s.workersMu.Lock() defer s.workersMu.Unlock() if s.closing.Load() { return nil } // Double-check after acquiring write lock if w, exists = s.workers[trackID]; exists { return w } w = &trackWorker{ trackID: trackID, opChan: make(chan Operation, 100), generation: atomic.Uint64{}, } s.workers[trackID] = w go s.runWorker(w) return w } func (s *SDKSource) runWorker(w *trackWorker) { defer func() { w.done.Break() s.workersMu.Lock() delete(s.workers, w.trackID) s.workersMu.Unlock() }() state := &workerState{state: TrackStateIdle} for op := range w.opChan { if exit := s.processOp(w, w.trackID, state, op); exit { return // OpClose processed, exit immediately } } } func (s *SDKSource) submitOp(trackID string, op Operation) { if s.closing.Load() { return } w := s.getOrCreateWorker(trackID) if w == nil { return } logger.Debugw("submitting operation", "trackID", trackID, "op", op.Type.String()) select { case w.opChan <- op: case <-w.done.Watch(): // worker already exited, op dropped } } func (s *SDKSource) reportSubscribeError(isPostInit bool, resultChan chan<- subscriptionResult, trackID string, err error) { if isPostInit { s.callbacks.OnError(err) } else { s.sendInitResult(resultChan, trackID, err) } } func (s *SDKSource) validateSubscription(op Operation) error { // Check websocket/video incompatibility for Track requests if s.RequestType == types.RequestTypeTrack && op.Pub.Kind() == lksdk.TrackKindVideo && s.Outputs[types.EgressTypeWebsocket] != nil { mimeType := types.MimeType(strings.ToLower(op.Track.Codec().MimeType)) return errors.ErrIncompatible("websocket", mimeType) } return nil } func (s *SDKSource) updatePreInitStateLocked(op Operation, ts *config.TrackSource) { // Update codec flags based on mime type switch ts.MimeType { case types.MimeTypeOpus, types.MimeTypePCMU, types.MimeTypePCMA: s.AudioEnabled = true if s.AudioOutCodec == "" { if ts.MimeType == types.MimeTypePCMU || ts.MimeType == types.MimeTypePCMA { s.AudioOutCodec = types.MimeTypeOpus } else { s.AudioOutCodec = ts.MimeType } } s.AudioTranscoding = true s.AudioTracks = append(s.AudioTracks, ts) case types.MimeTypeH264, types.MimeTypeVP8, types.MimeTypeVP9: s.VideoEnabled = true s.VideoInCodec = ts.MimeType if s.VideoOutCodec == "" { s.VideoOutCodec = ts.MimeType } if s.VideoInCodec != s.VideoOutCodec { s.VideoDecoding = true if len(s.GetEncodedOutputs()) > 0 { s.VideoEncoding = true } } s.VideoTrack = ts } // Set identity and filename replacements based on request type track := op.Track pub := op.Pub rp := op.RemoteParticipant switch s.RequestType { case types.RequestTypeTrackComposite: if s.Identity == "" || track.Kind() == webrtc.RTPCodecTypeVideo { s.Identity = rp.Identity() s.filenameReplacements["{publisher_identity}"] = s.Identity } case types.RequestTypeTrack: s.Identity = rp.Identity() s.TrackKind = pub.Kind().String() s.TrackSource = strings.ToLower(pub.Source().String()) if o := s.GetFileConfig(); o != nil { o.OutputType = types.TrackOutputTypes[ts.MimeType] } s.filenameReplacements["{track_id}"] = s.TrackID s.filenameReplacements["{track_type}"] = s.TrackKind s.filenameReplacements["{track_source}"] = s.TrackSource s.filenameReplacements["{publisher_identity}"] = s.Identity } } func (s *SDKSource) handleSubscribe(w *trackWorker, trackID string, state *workerState, op Operation) *sdk.AppWriter { s.subLock.RLock() isPostInit := s.initialized.IsBroken() isPreInit := !isPostInit var subscribeErr error defer func() { if subscribeErr != nil { s.reportSubscribeError(isPostInit, op.ResultChan, trackID, subscribeErr) } }() // Early validation before creating writer if err := s.validateSubscription(op); err != nil { subscribeErr = err logger.Errorw("subscription validation failed", err, "trackID", trackID) s.subLock.RUnlock() return nil } state.generation++ w.generation.Store(state.generation) writer, ts, err := s.createWriterForOp(op) if err != nil { subscribeErr = err logger.Errorw("failed to create writer", err, "trackID", trackID) s.subLock.RUnlock() return nil } if s.closing.Load() { // Release subLock before blocking drain s.subLock.RUnlock() s.handleOrphanedWriter(trackID, writer) return nil } s.mu.Lock() if isPreInit { s.updatePreInitStateLocked(op, ts) } s.mu.Unlock() // All validation passed - report success s.sendInitResult(op.ResultChan, trackID, nil) // Release subLock before transitioning to ACTIVE - we're done with pre-init work s.subLock.RUnlock() // For post-init subscriptions, notify pipeline to add track if isPostInit { <-s.callbacks.BuildReady s.callbacks.OnTrackAdded(ts) } return writer } func (s *SDKSource) processOp(w *trackWorker, trackID string, ws *workerState, op Operation) bool { logger.Debugw("processing operation", "trackID", trackID, "op", op.Type.String(), "state", ws.state.String()) switch ws.state { case TrackStateIdle: return s.processIdleOp(w, trackID, ws, op) case TrackStateActive: return s.processActiveOp(w, trackID, ws, op) case TrackStateCleaning: // Unreachable: worker blocks in startCleanup while state is CLEANING. // Ops queue in opChan and are processed after cleanup completes (in IDLE state). return false default: logger.Warnw("invalid state", nil, "trackID", trackID, "state", ws.state.String()) return false } } func (s *SDKSource) processIdleOp(w *trackWorker, trackID string, state *workerState, op Operation) bool { switch op.Type { case OpSubscribe: if writer := s.handleSubscribe(w, trackID, state, op); writer != nil { state.state = TrackStateActive state.writer = writer s.active.Inc() } case OpPlaying: logger.Warnw("invalid op in IDLE", nil, "trackID", trackID, "op", op.Type.String(), "generation", op.Generation) case OpClose: return true case OpSetTimeProvider, OpUnsubscribe, OpFinished: logger.Warnw("invalid op in IDLE", nil, "trackID", trackID, "op", op.Type.String()) } return false } func (s *SDKSource) processActiveOp(_ *trackWorker, trackID string, state *workerState, op Operation) bool { switch op.Type { case OpSubscribe: // Not possible, double subscribe shouldn't be possible, nothing to do logger.Warnw("unexpected subscribe in ACTIVE state", nil, "trackID", trackID) case OpPlaying: if op.Generation == state.generation && state.writer != nil { state.writer.Playing() } else { logger.Warnw("playing for previous writer", nil, "trackID", trackID, "op", op.Type.String(), "generation", op.Generation) } case OpSetTimeProvider: if state.writer != nil { state.writer.SetTimeProvider(op.TimeProvider) } case OpClose: // Drain writer (non-blocking for shutdown) if state.writer != nil { state.writer.Drain(false) } state.writer = nil state.state = TrackStateIdle s.active.Dec() return true // signal worker to exit case OpUnsubscribe: state.state = TrackStateCleaning s.startCleanup(trackID, state) case OpFinished: // StreamStopped - immediate cleanup state.state = TrackStateCleaning s.doCleanup(trackID, state) } return false } // Cleanup functions func (s *SDKSource) startCleanup(trackID string, state *workerState) { writer := state.writer writer.OnUnsubscribed() // Wait for writer to finish, but also handle shutdown select { case <-writer.Finished(): // normal completion case <-s.closed.Watch(): // shutdown - force drain like old CloseWriters did writer.Drain(false) } s.doCleanup(trackID, state) } func (s *SDKSource) doCleanup(trackID string, state *workerState) { writer := state.writer if writer == nil { // Already cleaned up (defensive guard for future code paths) state.state = TrackStateIdle return } state.writer = nil // Blocking cleanup - only affects this track's worker active := s.active.Dec() shouldContinue := s.RequestType == types.RequestTypeParticipant || s.RequestType == types.RequestTypeRoomComposite || s.RequestType == types.RequestTypeTemplate || s.RequestType == types.RequestTypeMedia if shouldContinue { trackKind := writer.TrackKind() if trackKind == webrtc.RTPCodecTypeAudio { writer.Drain(true) } s.sync.RemoveTrack(trackID) <-s.callbacks.BuildReady s.callbacks.OnTrackRemoved(trackID) if trackKind == webrtc.RTPCodecTypeVideo { writer.Drain(true) } } else { writer.Drain(true) if active == 0 { s.finished() } } state.state = TrackStateIdle } // ---------------- Helper functions ---------------- func (s *SDKSource) createWriterForOp(op Operation) (*sdk.AppWriter, *config.TrackSource, error) { track, pub, rp := op.Track, op.Pub, op.RemoteParticipant <-s.callbacks.GstReady src, err := gst.NewElementWithName("appsrc", fmt.Sprintf("app_%s", track.ID())) if err != nil { return nil, nil, errors.ErrGstPipelineError(err) } ts := &config.TrackSource{ TrackID: pub.SID(), TrackKind: pub.Kind(), ParticipantKind: rp.Kind(), MimeType: types.MimeType(strings.ToLower(track.Codec().MimeType)), PayloadType: track.Codec().PayloadType, ClockRate: track.Codec().ClockRate, } // Set audio channel from route match (RequestTypeMedia) s.mu.Lock() if ch, ok := s.audioChannels[pub.SID()]; ok { ts.AudioChannel = &ch } s.mu.Unlock() ts.AppSrc = app.SrcFromElement(src) var tc sdk.DriftHandler // Handle codec-specific setup (tempo controller for audio) switch ts.MimeType { case types.MimeTypeOpus, types.MimeTypePCMU, types.MimeTypePCMA: if s.AudioTempoController.Enabled { c := tempo.NewController() ts.TempoController = c tc = c } case types.MimeTypeH264, types.MimeTypeVP8, types.MimeTypeVP9: // Video codecs - no special setup needed here default: return nil, nil, errors.ErrNotSupported(string(ts.MimeType)) } writer, err := sdk.NewAppWriter(s.PipelineConfig, track, pub, rp, ts, s.sync, tc, s.callbacks) if err != nil { return nil, nil, err } if tp := s.timeProvider.Load(); tp != nil { writer.SetTimeProvider(*tp) } return writer, ts, nil } func (s *SDKSource) handleOrphanedWriter(trackID string, writer *sdk.AppWriter) { writer.Drain(true) logger.Debugw("orphaned writer cleaned up", "trackID", trackID) } ================================================ FILE: pkg/pipeline/source/track_worker_test.go ================================================ // Copyright 2026 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package source import ( "testing" "github.com/stretchr/testify/assert" "go.uber.org/atomic" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/gstreamer" "github.com/livekit/egress/pkg/types" "github.com/livekit/server-sdk-go/v2/pkg/synchronizer" ) // testSDKSource creates a minimal SDKSource for testing state transitions func testSDKSource(t *testing.T) *SDKSource { t.Helper() buildReady := make(chan struct{}) close(buildReady) // already ready callbacks := &gstreamer.Callbacks{ BuildReady: buildReady, } callbacks.AddOnTrackRemoved(func(_ string) {}) pipelineConfig := &config.PipelineConfig{ RequestType: types.RequestTypeRoomComposite, } return &SDKSource{ PipelineConfig: pipelineConfig, callbacks: callbacks, sync: synchronizer.NewSynchronizer(nil), workers: make(map[string]*trackWorker), filenameReplacements: make(map[string]string), active: atomic.Int32{}, closing: atomic.Bool{}, } } func TestGetOrCreateWorker_ReturnsExistingWorker(t *testing.T) { s := testSDKSource(t) w1 := s.getOrCreateWorker("track-1") w2 := s.getOrCreateWorker("track-1") assert.Equal(t, w1, w2, "should return same worker for same trackID") } func TestGetOrCreateWorker_ReturnsNilWhenClosing(t *testing.T) { s := testSDKSource(t) s.closing.Store(true) w := s.getOrCreateWorker("track-1") assert.Nil(t, w, "should return nil when closing") } func TestSubmitOp_DropsOpWhenClosing(t *testing.T) { s := testSDKSource(t) s.closing.Store(true) // This should not panic or block s.submitOp("track-1", Operation{Type: OpPlaying}) s.workersMu.RLock() _, exists := s.workers["track-1"] s.workersMu.RUnlock() assert.False(t, exists) } func TestStateTransitions_IdleState(t *testing.T) { tests := []struct { name string op OpType wantState TrackState wantExit bool }{ // Valid ops in IDLE {"OpClose exits", OpClose, TrackStateIdle, true}, {"OpPlaying stays IDLE", OpPlaying, TrackStateIdle, false}, {"OpSetTimeProvider stays IDLE", OpSetTimeProvider, TrackStateIdle, false}, // Invalid ops in IDLE (should log warning but not crash) {"OpUnsubscribe stays IDLE", OpUnsubscribe, TrackStateIdle, false}, {"OpFinished stays IDLE", OpFinished, TrackStateIdle, false}, // Note: OpSubscribe requires GStreamer, tested in integration tests } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { s := testSDKSource(t) w := &trackWorker{ trackID: "test-track", generation: atomic.Uint64{}, } state := &workerState{state: TrackStateIdle} exit := s.processOp(w, "test-track", state, Operation{Type: tt.op}) assert.Equal(t, tt.wantExit, exit, "exit mismatch") assert.Equal(t, tt.wantState, state.state, "state mismatch") }) } } // Note: Operations that need a real writer (Unsubscribe, Finished) require integration tests func TestStateTransitions_ActiveState(t *testing.T) { tests := []struct { name string op OpType wantState TrackState wantExit bool }{ {"OpClose drains and exits", OpClose, TrackStateIdle, true}, {"OpPlaying stays ACTIVE", OpPlaying, TrackStateActive, false}, {"OpSetTimeProvider stays ACTIVE", OpSetTimeProvider, TrackStateActive, false}, {"OpSubscribe stays ACTIVE (invalid)", OpSubscribe, TrackStateActive, false}, // Note: OpUnsubscribe and OpFinished trigger cleanup which needs a real writer } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { s := testSDKSource(t) w := &trackWorker{ trackID: "test-track", generation: atomic.Uint64{}, } // Start in ACTIVE state with nil writer (ok for ops that don't use it) state := &workerState{ state: TrackStateActive, writer: nil, generation: 1, } s.active.Store(1) // simulate one active track exit := s.processOp(w, "test-track", state, Operation{Type: tt.op, Generation: 1}) assert.Equal(t, tt.wantExit, exit, "exit mismatch") assert.Equal(t, tt.wantState, state.state, "state mismatch") }) } } ================================================ FILE: pkg/pipeline/source/web.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package source import ( "bytes" "context" "encoding/json" "fmt" "math/rand" "net/url" "os" "os/exec" "path/filepath" "strings" "time" "gopkg.in/natefinch/lumberjack.v2" "github.com/chromedp/cdproto/inspector" "github.com/chromedp/cdproto/runtime" "github.com/chromedp/cdproto/target" "github.com/chromedp/chromedp" "github.com/frostbyte73/core" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/errors" "github.com/livekit/protocol/livekit" "github.com/livekit/protocol/logger" "github.com/livekit/protocol/logger/medialogutils" ) const ( startRecordingLog = "START_RECORDING" endRecordingLog = "END_RECORDING" chromeFailedToStart = "chrome failed to start:" chromeCertVerifierChanged = "net::ERR_CERT_VERIFIER_CHANGED" chromeTimeout = time.Second * 30 chromeRetries = 3 ) type WebSource struct { pulseSink string xvfb *exec.Cmd closeChrome context.CancelFunc chromeLogger *lumberjack.Logger startRecording core.Fuse endRecording core.Fuse closed core.Fuse info *livekit.EgressInfo } func NewWebSource(ctx context.Context, p *config.PipelineConfig) (*WebSource, error) { ctx, span := tracer.Start(ctx, "WebInput.New") defer span.End() p.Display = fmt.Sprintf(":%d", 10+rand.Intn(2147483637)) s := &WebSource{ info: p.Info, } if !p.AwaitStartSignal { s.startRecording.Break() } if err := s.createPulseSink(ctx, p); err != nil { logger.Errorw("failed to create pulse sink", err) s.Close() return nil, err } if err := s.launchXvfb(ctx, p); err != nil { logger.Errorw("failed to launch xvfb", err, "display", p.Display) s.Close() return nil, err } if err := s.launchChrome(ctx, p); err != nil { logger.Warnw("failed to launch chrome", err) s.Close() return nil, err } return s, nil } func (s *WebSource) StartRecording() <-chan struct{} { return s.startRecording.Watch() } func (s *WebSource) EndRecording() <-chan struct{} { return s.endRecording.Watch() } func (s *WebSource) GetStartedAt() int64 { return time.Now().UnixNano() } func (s *WebSource) GetEndedAt() int64 { return time.Now().UnixNano() } func (s *WebSource) Close() { s.closed.Once(func() { if s.closeChrome != nil { logger.Debugw("closing chrome") s.closeChrome() } if s.xvfb != nil { logger.Debugw("closing X display") _ = s.xvfb.Process.Kill() _ = s.xvfb.Wait() } if s.pulseSink != "" { logger.Debugw("unloading pulse module") if err := exec.Command("pactl", "unload-module", s.pulseSink).Run(); err != nil { logger.Errorw("failed to unload pulse sink", err) } } if s.chromeLogger != nil { _ = s.chromeLogger.Close() s.chromeLogger = nil } }) } // creates a new pulse audio sink func (s *WebSource) createPulseSink(ctx context.Context, p *config.PipelineConfig) error { _, span := tracer.Start(ctx, "WebInput.createPulseSink") defer span.End() logger.Debugw("creating pulse sink") cmd := exec.Command("pactl", "load-module", "module-null-sink", fmt.Sprintf("sink_name=\"%s\"", p.Info.EgressId), fmt.Sprintf("sink_properties=device.description=\"%s\"", p.Info.EgressId), ) var b bytes.Buffer l := medialogutils.NewCmdLogger(func(s string) { logger.Infow(fmt.Sprintf("pactl: %s", s)) }) cmd.Stdout = &b cmd.Stderr = l err := cmd.Run() if err != nil { if out := b.Bytes(); out != nil { _, _ = l.Write(out) } return errors.ErrProcessFailed("pulse", err) } s.pulseSink = strings.TrimRight(b.String(), "\n") return nil } // creates a new xvfb display func (s *WebSource) launchXvfb(ctx context.Context, p *config.PipelineConfig) error { _, span := tracer.Start(ctx, "WebInput.launchXvfb") defer span.End() dims := fmt.Sprintf("%dx%dx%d", p.Width, p.Height, p.Depth) logger.Debugw("creating X display", "display", p.Display, "dims", dims) xvfb := exec.Command("Xvfb", p.Display, "-screen", "0", dims, "-ac", "-nolisten", "tcp", "-nolisten", "unix") if err := xvfb.Start(); err != nil { return errors.ErrProcessFailed("xvfb", err) } s.xvfb = xvfb return nil } func newChromeLogger(tmpDir string) *lumberjack.Logger { writer := &lumberjack.Logger{ Filename: filepath.Join(tmpDir, "chrome.log"), MaxSize: 100, // MB per file (smallest unit) MaxBackups: 1, // current + 1 backup = 2 files total MaxAge: 7, // days Compress: false, } return writer } // launches chrome and navigates to the url func (s *WebSource) launchChrome(ctx context.Context, p *config.PipelineConfig) error { _, span := tracer.Start(ctx, "WebInput.launchChrome") defer span.End() webUrl := p.WebUrl if webUrl == "" { // build input url inputUrl, err := url.Parse(p.BaseUrl) if err != nil { return err } values := inputUrl.Query() values.Set("layout", p.Layout) values.Set("url", p.WsUrl) values.Set("token", p.Token) inputUrl.RawQuery = values.Encode() webUrl = inputUrl.String() } if p.Debug.EnableChromeLogging { s.chromeLogger = newChromeLogger(os.TempDir()) } logger.Debugw("launching chrome", "url", webUrl, "sandbox", p.EnableChromeSandbox, "insecure", p.Insecure) opts := []chromedp.ExecAllocatorOption{ chromedp.NoFirstRun, chromedp.NoDefaultBrowserCheck, chromedp.DisableGPU, // puppeteer default behavior chromedp.Flag("disable-infobars", true), chromedp.Flag("excludeSwitches", "enable-automation"), chromedp.Flag("disable-background-networking", true), chromedp.Flag("enable-features", "NetworkService,NetworkServiceInProcess"), chromedp.Flag("disable-background-timer-throttling", true), chromedp.Flag("disable-backgrounding-occluded-windows", true), chromedp.Flag("disable-breakpad", true), chromedp.Flag("disable-client-side-phishing-detection", true), chromedp.Flag("disable-default-apps", true), chromedp.Flag("disable-dev-shm-usage", true), chromedp.Flag("disable-extensions", true), chromedp.Flag("disable-features", "AudioServiceOutOfProcess,site-per-process,Translate,TranslateUI,BlinkGenPropertyTrees"), chromedp.Flag("disable-hang-monitor", true), chromedp.Flag("disable-ipc-flooding-protection", true), chromedp.Flag("disable-popup-blocking", true), chromedp.Flag("disable-prompt-on-repost", true), chromedp.Flag("disable-renderer-backgrounding", true), chromedp.Flag("disable-sync", true), chromedp.Flag("force-color-profile", "srgb"), chromedp.Flag("metrics-recording-only", true), chromedp.Flag("safebrowsing-disable-auto-update", true), chromedp.Flag("password-store", "basic"), chromedp.Flag("use-mock-keychain", true), // custom args chromedp.Flag("kiosk", true), chromedp.Flag("disable-translate", true), chromedp.Flag("enable-automation", false), chromedp.Flag("autoplay-policy", "no-user-gesture-required"), chromedp.Flag("window-position", "0,0"), // config chromedp.Flag("window-size", fmt.Sprintf("%d,%d", p.Width, p.Height)), chromedp.Flag("disable-web-security", p.Insecure), chromedp.Flag("allow-running-insecure-content", p.Insecure), chromedp.Flag("no-sandbox", !p.EnableChromeSandbox), // output chromedp.Env(fmt.Sprintf("PULSE_SINK=%s", p.Info.EgressId)), chromedp.Flag("display", p.Display), } // custom for k, v := range p.ChromeFlags { opts = append(opts, chromedp.Flag(k, v)) } allocCtx, allocCancel := chromedp.NewExecAllocator(context.Background(), opts...) var err error var retryable bool for i := range chromeRetries { if i > 0 { logger.Debugw("navigation timed out, reloading") } chromeCtx, chromeCancel := chromedp.NewContext(allocCtx) s.closeChrome = func() { chromeCancel() allocCancel() } err, retryable = s.navigate(chromeCtx, chromeCancel, webUrl) if !retryable { break } } return err } func (s *WebSource) navigate(chromeCtx context.Context, chromeCancel context.CancelFunc, webUrl string) (error, bool) { chromedp.ListenTarget(chromeCtx, func(ev interface{}) { switch ev := ev.(type) { case *runtime.EventConsoleAPICalled: if s.chromeLogger != nil { if b, err := json.Marshal(ev); err == nil { _, _ = s.chromeLogger.Write(append(b, '\n')) } } for _, arg := range ev.Args { var val interface{} err := json.Unmarshal(arg.Value, &val) if err != nil { continue } switch fmt.Sprint(val) { case startRecordingLog: logger.Infow("chrome: START_RECORDING") s.startRecording.Break() case endRecordingLog: logger.Infow("chrome: END_RECORDING") s.endRecording.Break() } } case *runtime.EventExceptionThrown: if s.chromeLogger != nil { if b, err := json.Marshal(ev); err == nil { _, _ = s.chromeLogger.Write(append(b, '\n')) } } logger.Debugw("chrome exception", "err", ev.ExceptionDetails.Error()) case *target.EventTargetCrashed: logger.Errorw("chrome crashed", nil, "targetId", ev.TargetID, "status", ev.Status, "errorCode", ev.ErrorCode) case *inspector.EventTargetCrashed: logger.Errorw("chrome crashed", nil) } }) // navigate var timeout *time.Timer var errString string if err := chromedp.Run(chromeCtx, chromedp.ActionFunc(func(_ context.Context) error { logger.Debugw("chrome initialized") // set page load timeout timeout = time.AfterFunc(chromeTimeout, chromeCancel) return nil }), chromedp.ActionFunc(func(ctx context.Context) error { // use RunResponse wrapped in ActionFunc to get the response details r, err := chromedp.RunResponse(ctx, chromedp.Navigate(webUrl)) if err != nil { return err } if r.Status >= 400 { return errors.PageLoadError(r.StatusText) } return nil }), chromedp.ActionFunc(func(_ context.Context) error { // cancel timer timeout.Stop() return nil }), chromedp.Evaluate(` if (document.querySelector('div.error')) { document.querySelector('div.error').innerText; } else { '' }`, &errString), ); err != nil { if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { return errors.PageLoadError("timed out"), true } if strings.HasPrefix(err.Error(), chromeFailedToStart) { return errors.ChromeError(err), false } if strings.Contains(err.Error(), chromeCertVerifierChanged) { logger.Warnw("chrome cert verifier changed, retrying", nil) return errors.PageLoadError(err.Error()), true } return errors.PageLoadError(err.Error()), false } else if errString != "" { return errors.TemplateError(errString), false } return nil, false } ================================================ FILE: pkg/pipeline/tempo/controller.go ================================================ package tempo import ( "time" "github.com/linkdata/deadlock" ) const ( DefaultThreshold = 10 * time.Millisecond // don’t start tiny corrections MaxDriftBudget = 2 * time.Second // cap on processed drift magnitude ) type Controller struct { mu deadlock.Mutex pending time.Duration // accumulated, not yet started current time.Duration // currently being corrected processed time.Duration // signed sum of ALL corrections already applied cb func(time.Duration) // invoked with the next 'current' to apply } func NewController() *Controller { return &Controller{} } // EnqueueDrift adds signed drift. It may synchronously arm a new correction // if idle, above threshold, and starting it would not exceed the budget. func (tc *Controller) EnqueueDrift(drift time.Duration) { if drift == 0 { return } tc.mu.Lock() tc.pending += drift var toStart time.Duration if tc.current == 0 && tc.pending.Abs() >= DefaultThreshold { // Only start if applying 'pending' keeps processed within budget. if (tc.processed + tc.pending).Abs() < MaxDriftBudget { toStart = tc.pending tc.current = toStart tc.pending = 0 } } cb := tc.cb tc.mu.Unlock() if toStart != 0 && cb != nil { cb(toStart) } } // DriftProcessed marks the *current* correction as finished and may start the next // one if available and within budget. func (tc *Controller) DriftProcessed() { tc.mu.Lock() tc.processed += tc.current tc.current = 0 var toStart time.Duration if tc.pending.Abs() >= DefaultThreshold && (tc.processed+tc.pending).Abs() < MaxDriftBudget { toStart = tc.pending tc.current = toStart tc.pending = 0 } cb := tc.cb tc.mu.Unlock() if toStart != 0 && cb != nil { cb(toStart) } } // OnDriftDetectedCallback sets the callback. If a correction is already armed, // it’s invoked immediately with that value. func (tc *Controller) OnDriftDetectedCallback(cb func(time.Duration)) { tc.mu.Lock() tc.cb = cb cur := tc.current tc.mu.Unlock() if cb != nil && cur != 0 { cb(cur) } } // Processed returns the total of already-applied corrections. func (tc *Controller) Processed() time.Duration { tc.mu.Lock() defer tc.mu.Unlock() return tc.processed } ================================================ FILE: pkg/pipeline/tempo/controller_test.go ================================================ package tempo import ( "testing" "time" ) func TestEnqueueStartsWithinBudget(t *testing.T) { tc := NewController() var calls []time.Duration tc.OnDriftDetectedCallback(func(d time.Duration) { calls = append(calls, d) }) tc.EnqueueDrift(30 * time.Millisecond) // > threshold, under budget if len(calls) != 1 || calls[0] != 30*time.Millisecond { t.Fatalf("callback: got %v, want [30ms]", calls) } if got := tc.Processed(); got != 0 { t.Fatalf("processed before DriftProcessed: got %v, want 0", got) } } func TestThresholdAccumulation(t *testing.T) { tc := NewController() var calls []time.Duration tc.OnDriftDetectedCallback(func(d time.Duration) { calls = append(calls, d) }) tc.EnqueueDrift(5 * time.Millisecond) // below threshold → no start tc.EnqueueDrift(6 * time.Millisecond) // total 11ms → start now if len(calls) != 1 || calls[0] != 11*time.Millisecond { t.Fatalf("callback: got %v, want [11ms]", calls) } } func TestDriftProcessedStartsNext(t *testing.T) { tc := NewController() var calls []time.Duration tc.OnDriftDetectedCallback(func(d time.Duration) { calls = append(calls, d) }) tc.EnqueueDrift(30 * time.Millisecond) // starts immediately if len(calls) != 1 || calls[0] != 30*time.Millisecond { t.Fatalf("first start: got %v", calls) } tc.EnqueueDrift(40 * time.Millisecond) // pending, not started yet if len(calls) != 1 { t.Fatalf("should not start second yet: got %v", calls) } tc.DriftProcessed() // finish first → second starts if len(calls) != 2 || calls[1] != 40*time.Millisecond { t.Fatalf("second start: got %v", calls) } if got := tc.Processed(); got != 30*time.Millisecond { t.Fatalf("processed after first completion: got %v, want 30ms", got) } } func TestBudgetBlocksAndResumes(t *testing.T) { tc := NewController() var calls []time.Duration tc.OnDriftDetectedCallback(func(d time.Duration) { calls = append(calls, d) }) // Spend most of the budget (1.9s) tc.EnqueueDrift(1900 * time.Millisecond) if len(calls) != 1 || calls[0] != 1900*time.Millisecond { t.Fatalf("start 1.9s: got %v", calls) } tc.DriftProcessed() if got := tc.Processed(); got != 1900*time.Millisecond { t.Fatalf("processed after 1.9s: got %v", got) } // +300ms would exceed 2s budget → must NOT start tc.EnqueueDrift(300 * time.Millisecond) if len(calls) != 1 { t.Fatalf("over-budget should not start: got %v", calls) } // Add -650ms → pending becomes -350ms → net processed+pending = 1.55s → start tc.EnqueueDrift(-650 * time.Millisecond) if len(calls) != 2 || calls[1] != -350*time.Millisecond { t.Fatalf("start -350ms: got %v", calls) } tc.DriftProcessed() if got := tc.Processed(); got != (1900*time.Millisecond - 350*time.Millisecond) { t.Fatalf("processed signed total: got %v, want 1.55s", got) } } func TestImmediateCallbackOnRegister(t *testing.T) { tc := NewController() // Arm a correction before registering callback tc.EnqueueDrift(20 * time.Millisecond) var calls []time.Duration tc.OnDriftDetectedCallback(func(d time.Duration) { calls = append(calls, d) }) // Should fire immediately with current if len(calls) != 1 || calls[0] != 20*time.Millisecond { t.Fatalf("immediate callback: got %v, want [20ms]", calls) } } func TestZeroDriftNoop(t *testing.T) { tc := NewController() var calls []time.Duration tc.OnDriftDetectedCallback(func(d time.Duration) { calls = append(calls, d) }) tc.EnqueueDrift(0) if len(calls) != 0 { t.Fatalf("zero drift should do nothing, got %v", calls) } } func TestSignedProcessedAccumulation(t *testing.T) { tc := NewController() var calls []time.Duration tc.OnDriftDetectedCallback(func(d time.Duration) { calls = append(calls, d) }) tc.EnqueueDrift(30 * time.Millisecond) tc.DriftProcessed() if got := tc.Processed(); got != 30*time.Millisecond { t.Fatalf("after +30ms processed: got %v", got) } tc.EnqueueDrift(-10 * time.Millisecond) tc.DriftProcessed() if got := tc.Processed(); got != 20*time.Millisecond { t.Fatalf("after +30-10 processed: got %v, want 20ms", got) } } ================================================ FILE: pkg/pipeline/watch.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pipeline import ( "context" "fmt" "regexp" "strings" "time" "github.com/go-gst/go-gst/gst" "github.com/livekit/protocol/logger" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/gstreamer" "github.com/livekit/egress/pkg/pipeline/builder" "github.com/livekit/egress/pkg/pipeline/source" ) const ( // noisy gst errors msgWrongThread = "Called from wrong thread" // noisy gst warnings msgKeyframe = "Could not request a keyframe. Files may not split at the exact location they should" msgLatencyQuery = "Latency query failed" msgTaps = "can't find exact taps" msgInputDisappeared = "Can't copy metadata because input buffer disappeared" msgSkippingSegment = "error reading data -1 (reason: Success), skipping segment" fnGstAudioResampleCheckDiscont = "gst_audio_resample_check_discont" // noisy colorimetry warnings from decoders that omit VUI color info msgColorMatrix = "Need to specify a color matrix when using YUV format (I420)" msgInvalidColorimetry = "invalid colorimetry, using default" // noisy gst fixmes msgStreamStart = "stream-start event without group-id. Consider implementing group-id handling in the upstream elements" msgCreatingStream = "Creating random stream-id, consider implementing a deterministic way of creating a stream-id" msgAggregateSubclass = "Subclass should call gst_aggregator_selected_samples() from its aggregate implementation." // rtmp client catRtmpClient = "rtmpclient" fnSendCreateStream = "send_create_stream" ) var ( logLevels = map[gst.DebugLevel]string{ gst.LevelError: "error", gst.LevelWarning: "warning", gst.LevelFixMe: "fixme", gst.LevelInfo: "info", gst.LevelDebug: "debug", gst.LevelLog: "log", gst.LevelTrace: "trace", gst.LevelMemDump: "memdump", } ignore = map[string]bool{ msgWrongThread: true, msgKeyframe: true, msgLatencyQuery: true, msgTaps: true, msgInputDisappeared: true, msgSkippingSegment: true, fnGstAudioResampleCheckDiscont: true, msgColorMatrix: true, msgInvalidColorimetry: true, msgStreamStart: true, msgCreatingStream: true, msgAggregateSubclass: true, } ) func (c *Controller) gstLog( cat *gst.DebugCategory, level gst.DebugLevel, file, function string, line int, _ *gst.LoggedObject, debugMsg *gst.DebugMessage, ) { category := cat.GetName() message := debugMsg.Get() lvl, ok := logLevels[level] if !ok || ignore[message] || ignore[function] { return } if category == catRtmpClient { if function == fnSendCreateStream { streamID := strings.Split(message, "'")[1] c.updateStreamStartTime(streamID) } return } var msg string if function != "" { msg = fmt.Sprintf("[%s %s] %s: %s", category, lvl, function, message) } else { msg = fmt.Sprintf("[%s %s] %s", category, lvl, message) } caller := fmt.Sprintf("%s:%d", file, line) c.gstLogger.Infow(msg, "caller", caller) } func (c *Controller) messageWatch(msg *gst.Message) bool { var err error switch msg.Type() { case gst.MessageEOS: logger.Infow("pipeline received EOS") if c.eosTimer != nil { c.eosTimer.Stop() } // Capture pipeline running time at EOS — all content has been flushed // to sinks at this point, so this reflects the actual file duration. // Used as a floor for endedAt to account for pipeline-generated content // beyond the last RTP packet (e.g. mixer silence after all tracks leave). if rt, ok := c.p.RunningTime(); ok { c.pipelineEndedAt = c.src.GetStartedAt() + rt.Nanoseconds() } c.eosReceived.Break() c.p.Stop() return false case gst.MessageWarning: err = c.handleMessageWarning(msg.ParseWarning()) case gst.MessageError: err = c.handleMessageError(msg.ParseError()) case gst.MessageStateChanged: c.handleMessageStateChanged(msg) case gst.MessageElement: err = c.handleMessageElement(msg) case gst.MessageQoS: c.handleMessageQoS(msg) } if err != nil { c.OnError(err) return false } return true } const ( msgClockProblem = "GStreamer error: clock problem." ) func (c *Controller) handleMessageWarning(gErr *gst.GError) error { element, name, message := parseDebugInfo(gErr) if gErr.Message() == msgClockProblem { err := errors.ErrGstPipelineError(gErr) logger.Errorw(gErr.Error(), errors.New(message), "element", element) return err } if element == elementGstSrtSink { streamName := strings.Split(name, "_")[1] stream, err := c.getStreamSink().GetStream(streamName) if err != nil { return err } return c.streamFailed(context.Background(), stream, gErr) } logger.Warnw(gErr.Message(), errors.New(message), "element", element) return nil } const ( elementGstAppSrc = "GstAppSrc" elementGstRtmp2Sink = "GstRtmp2Sink" elementGstSplitMuxSink = "GstSplitMuxSink" elementGstSrtSink = "GstSRTSink" msgStreamingNotNegotiated = "streaming stopped, reason not-negotiated (-4)" msgMuxer = ":muxer" ) // handleMessageError returns true if the error has been handled, false if the pipeline should quit func (c *Controller) handleMessageError(gErr *gst.GError) error { element, name, message := parseDebugInfo(gErr) switch element { case elementGstRtmp2Sink: streamSink := c.getStreamSink() streamName := strings.Split(name, "_")[1] stream, err := streamSink.GetStream(streamName) if err != nil { return err } if !c.eosSent.IsBroken() { // try reconnecting ok, err := streamSink.ResetStream(stream, gErr) if err != nil { logger.Errorw("failed to reset stream", err) } else if ok { c.trackStreamRetry(context.Background(), stream) return nil } } // remove sink return c.streamFailed(context.Background(), stream, gErr) case elementGstSrtSink: streamName := strings.Split(name, "_")[1] stream, err := c.getStreamSink().GetStream(streamName) if err != nil { return err } return c.streamFailed(context.Background(), stream, gErr) case elementGstAppSrc: if message == msgStreamingNotNegotiated { // send eosSent to app src logger.Debugw("streaming stopped", "name", name) if sdkSrc, ok := c.src.(*source.SDKSource); ok { sdkSrc.StreamStopped(name) } return nil } case elementGstSplitMuxSink: // We sometimes get GstSplitMuxSink errors if EOS was received before any data if message == msgMuxer { if c.eosSent.IsBroken() { logger.Debugw("GstSplitMuxSink failure after sending EOS") return nil } } } // input failure or file write failure. Fatal err := errors.ErrGstPipelineError(gErr) logger.Errorw(gErr.Error(), errors.New(message), "element", element, "name", name) return err } func (c *Controller) handleMessageStateChanged(msg *gst.Message) { oldState, newState := msg.ParseStateChanged() s := msg.Source() if s == pipelineName { if newState == gst.StatePaused { c.paused.Once(func() { logger.Infow("pipeline paused") c.callbacks.OnPipelinePaused() }) } if newState == gst.StatePlaying { c.playing.Once(func() { var timeToPlaying time.Duration if !c.pipelineCreatedAt.IsZero() { timeToPlaying = time.Since(c.pipelineCreatedAt) } logger.Infow("pipeline playing", "timeToPlaying", timeToPlaying) c.updateStartTime(c.src.GetStartedAt()) }) } return } if strings.HasPrefix(s, "app_") { trackID := s[4:] logger.Debugw("appsrc state change", "trackID", trackID, "oldState", oldState.String(), "newState", newState.String()) if newState == gst.StatePlaying { if sdkSrc, ok := c.src.(*source.SDKSource); ok { sdkSrc.Playing(trackID) } } return } } const ( msgFirstSampleMetadata = "FirstSampleMetadata" msgFragmentOpened = "splitmuxsink-fragment-opened" msgFragmentClosed = "splitmuxsink-fragment-closed" msgGstMultiFileSink = "GstMultiFileSink" ) func (c *Controller) handleMessageElement(msg *gst.Message) error { s := msg.GetStructure() if s != nil { switch s.Name() { case gstreamer.LeakyQueueStatsMessage: queueName, dropped, err := parseLeakyQueueStats(s) if err != nil { logger.Debugw("failed to parse leaky queue stats message", err) return nil } if strings.HasPrefix(queueName, "video") { c.stats.droppedVideoBuffers.Add(dropped) c.stats.droppedVideoBuffersByQueue[queueName] = dropped } if strings.HasPrefix(queueName, "audio") { c.stats.queuesDroppedAudioBuffers.Add(dropped) c.stats.droppedAudioBuffersByQueue[queueName] = dropped } case msgFirstSampleMetadata: startDate, err := getFirstSampleMetadataFromGstStructure(s) if err != nil { return err } logger.Debugw("received FirstSampleMetadata message", "startDate", startDate) c.getSegmentSink().UpdateStartDate(startDate) case msgFragmentOpened: filepath, t, err := getSegmentParamsFromGstStructure(s) if err != nil { logger.Errorw("failed to retrieve segment parameters from event", err) return err } if err = c.getSegmentSink().FragmentOpened(filepath, t); err != nil { logger.Errorw("failed to register new segment with playlist writer", err, "location", filepath, "runningTime", t) return err } case msgFragmentClosed: filepath, t, err := getSegmentParamsFromGstStructure(s) if err != nil { logger.Errorw("failed to retrieve segment parameters from event", err, "location", filepath, "runningTime", t) return err } // We need to dispatch to a queue to: // 1. Avoid concurrent access to the SegmentsInfo structure // 2. Ensure that playlists are uploaded in the same order they are enqueued to avoid an older playlist overwriting a newer one if err = c.getSegmentSink().FragmentClosed(filepath, t); err != nil { logger.Errorw("failed to end segment with playlist writer", err, "runningTime", t) return err } case msgGstMultiFileSink: location, ts, err := getImageInformationFromGstStructure(s) if err != nil { return err } imageSink := c.getImageSink(msg.Source()) if imageSink == nil { return errors.ErrSinkNotFound } err = imageSink.NewImage(location, ts) if err != nil { return err } } } return nil } func parseLeakyQueueStats(s *gst.Structure) (queue string, dropped uint64, err error) { queueValue, err := s.GetValue("queue") if err != nil { return "", 0, err } queue, _ = queueValue.(string) droppedValue, err := s.GetValue("dropped") if err != nil { return queue, 0, err } dropped = normalizeUint64(droppedValue) return queue, dropped, nil } func normalizeUint64(value interface{}) uint64 { switch v := value.(type) { case uint64: return v case uint: return uint64(v) case uint32: return uint64(v) case int: if v > 0 { return uint64(v) } case int64: if v > 0 { return uint64(v) } case int32: if v > 0 { return uint64(v) } } return 0 } func (c *Controller) handleMessageQoS(msg *gst.Message) { if isQosForAudioMixer(msg) { qos := msg.ParseQoS() if qos == nil { logger.Debugw("failed to parse audio mixer QoS message") return } c.handleAudioMixerQoS(qos) return } } func (c *Controller) handleAudioMixerQoS(qosValues *gst.QoSValues) { c.stats.mixerDroppedAudioBuffers.Inc() c.stats.mixerDroppedAudioDuration.Add(qosValues.Duration) } // Debug info comes in the following format: // file.c(line): method_name (): /GstPipeline:pipeline/GstBin:bin_name/GstElement:element_name:\nError message var gstDebug = regexp.MustCompile("(?s)(.*?)GstPipeline:pipeline/GstBin:(.*?)/(.*?):([^:]*)(:\n)?(.*)") func parseDebugInfo(gErr *gst.GError) (element, name, message string) { match := gstDebug.FindStringSubmatch(gErr.DebugString()) if len(match) == 0 { return } element = match[3] name = match[4] message = match[6] return } const ( fragmentLocation = "location" fragmentRunningTime = "running-time" ) func getSegmentParamsFromGstStructure(s *gst.Structure) (filepath string, time uint64, err error) { loc, err := s.GetValue(fragmentLocation) if err != nil { return "", 0, err } filepath, ok := loc.(string) if !ok { return "", 0, errors.ErrGstPipelineError(errors.New("invalid type for location")) } t, err := s.GetValue(fragmentRunningTime) if err != nil { return "", 0, err } ti, ok := t.(uint64) if !ok { return "", 0, errors.ErrGstPipelineError(errors.New("invalid type for time")) } return filepath, ti, nil } func getFirstSampleMetadataFromGstStructure(s *gst.Structure) (startDate time.Time, err error) { firstSampleMetadata := builder.FirstSampleMetadata{} err = s.UnmarshalInto(&firstSampleMetadata) if err != nil { return time.Time{}, err } return time.Unix(0, firstSampleMetadata.StartDate), nil } const ( gstMultiFileSinkFilename = "filename" gstMultiFileSinkTimestamp = "timestamp" ) func getImageInformationFromGstStructure(s *gst.Structure) (string, uint64, error) { loc, err := s.GetValue(gstMultiFileSinkFilename) if err != nil { return "", 0, err } filepath, ok := loc.(string) if !ok { return "", 0, errors.ErrGstPipelineError(errors.New("invalid type for location")) } t, err := s.GetValue(gstMultiFileSinkTimestamp) if err != nil { return "", 0, err } ti, ok := t.(uint64) if !ok { return "", 0, errors.ErrGstPipelineError(errors.New("invalid type for time")) } return filepath, ti, nil } func isQosForAudioMixer(msg *gst.Message) bool { src := msg.SourceObject() if src == nil { return false } srcName := src.GetName() parent := src.GetParent() var parentName string if parent != nil { parentName = parent.GetName() } // a bit brittle as it relies on mixer name not being changed return strings.HasPrefix(srcName, "sink_") && strings.HasPrefix(parentName, "audiomixer") } ================================================ FILE: pkg/server/integration.go ================================================ // Copyright 2026 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build integration package server import ( "context" "time" "github.com/livekit/protocol/rpc" ) func (s *Server) ReplayReady(context.Context, *rpc.EgressReadyRequest) (*rpc.EgressReadyResponse, error) { return &rpc.EgressReadyResponse{ StartAt: time.Now().UnixNano(), DurationMs: 0, }, nil } ================================================ FILE: pkg/server/server.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package server import ( "encoding/json" "fmt" "io/fs" "net" "net/http" "os" "path" "time" "github.com/frostbyte73/core" "go.uber.org/atomic" "google.golang.org/grpc" "github.com/livekit/protocol/logger" "github.com/livekit/protocol/rpc" "github.com/livekit/psrpc" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/info" "github.com/livekit/egress/pkg/ipc" "github.com/livekit/egress/pkg/service" "github.com/livekit/egress/pkg/stats" "github.com/livekit/egress/version" ) type Server struct { ipc.UnimplementedEgressServiceServer conf *config.ServiceConfig service.ProcessManager *service.MetricsService *service.DebugService monitor *stats.Monitor psrpcServer rpc.EgressInternalServer ipcServiceServer *grpc.Server promServer *http.Server ioClient info.SessionReporter activeRequests atomic.Int32 terminating core.Fuse shutdown core.Fuse } func NewServer(conf *config.ServiceConfig, bus psrpc.MessageBus, ioClient info.SessionReporter) (*Server, error) { pm := service.NewProcessManager() s := &Server{ conf: conf, ProcessManager: pm, MetricsService: service.NewMetricsService(pm), DebugService: service.NewDebugService(pm), ipcServiceServer: grpc.NewServer(), ioClient: ioClient, } ioClient.SetWatchdogHandler(func() { logger.Errorw("shutting down server on io client watchdog trigger", errors.New("io client failure")) s.Shutdown(false, false) }) monitor, err := stats.NewMonitor(conf, s) if err != nil { return nil, err } s.monitor = monitor if conf.DebugHandlerPort > 0 { s.StartDebugHandlers(conf.DebugHandlerPort) } if conf.PrometheusPort > 0 { s.promServer = &http.Server{ Addr: fmt.Sprintf(":%d", conf.PrometheusPort), Handler: s.PromHandler(), } promListener, err := net.Listen("tcp", s.promServer.Addr) if err != nil { return nil, err } go func() { _ = s.promServer.Serve(promListener) }() } ipcSvcDir := path.Join(config.TmpDir, s.conf.NodeID) if err = os.MkdirAll(ipcSvcDir, 0755); err != nil { return nil, err } ipc.RegisterEgressServiceServer(s.ipcServiceServer, s) if err := ipc.StartServiceListener(s.ipcServiceServer, ipcSvcDir); err != nil { return nil, err } psrpcServer, err := rpc.NewEgressInternalServer(s, bus) if err != nil { return nil, err } if err = psrpcServer.RegisterListActiveEgressTopic(""); err != nil { return nil, err } s.psrpcServer = psrpcServer return s, nil } func (s *Server) StartTemplatesServer(fs fs.FS) error { if s.conf.TemplatePort == 0 { logger.Debugw("templates server disabled") return nil } h := http.FileServer(http.FS(fs)) mux := http.NewServeMux() mux.Handle("/", h) go func() { addr := fmt.Sprintf("localhost:%d", s.conf.TemplatePort) logger.Debugw(fmt.Sprintf("starting template server on address %s", addr)) _ = http.ListenAndServe(addr, mux) }() return nil } func (s *Server) Run() error { logger.Debugw("starting service", "version", version.Version) if err := s.psrpcServer.RegisterStartEgressTopic(s.conf.ClusterID); err != nil { return err } logger.Infow("service ready") <-s.shutdown.Watch() logger.Infow("draining") s.Drain() logger.Infow("service stopped") return nil } func (s *Server) Status() ([]byte, error) { status := map[string]interface{}{ "CpuLoad": s.monitor.GetAvailableCPU(), } s.GetStatus(status) return json.Marshal(status) } func (s *Server) IsIdle() bool { return s.activeRequests.Load() == 0 } func (s *Server) IsDisabled() bool { return s.shutdown.IsBroken() || !s.ioClient.IsHealthy() } func (s *Server) IsTerminating() bool { return s.terminating.IsBroken() } func (s *Server) Shutdown(terminating, kill bool) { if terminating { s.terminating.Break() } s.shutdown.Once(func() { s.psrpcServer.DeregisterStartEgressTopic(s.conf.ClusterID) }) if kill { s.KillAll() } } func (s *Server) Drain() { for !s.IsIdle() { time.Sleep(time.Second) } s.psrpcServer.Shutdown() logger.Infow("draining io client") s.ioClient.Drain() } ================================================ FILE: pkg/server/server_ipc.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package server import ( "context" "net/http" "google.golang.org/protobuf/types/known/emptypb" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/ipc" "github.com/livekit/protocol/livekit" "github.com/livekit/protocol/logger" ) func (s *Server) HandlerReady(_ context.Context, req *ipc.HandlerReadyRequest) (*emptypb.Empty, error) { logger.Debugw("handler ready", "egressID", req.EgressId) if err := s.HandlerStarted(req.EgressId); err != nil { return nil, err } logger.Debugw("handler ready completed", "egressID", req.EgressId) return &emptypb.Empty{}, nil } func (s *Server) HandlerUpdate(_ context.Context, info *livekit.EgressInfo) (*emptypb.Empty, error) { logger.Debugw("handler update", "egressID", info.EgressId) if err := s.ioClient.UpdateEgress(context.Background(), info); err != nil { logger.Errorw("failed to update egress", err, "egressID", info.EgressId) } if info.ErrorCode == int32(http.StatusInternalServerError) { logger.Errorw("internal error, shutting down", errors.New(info.Error)) s.Shutdown(false, false) } logger.Debugw("handler update completed", "egressID", info.EgressId) return &emptypb.Empty{}, nil } func (s *Server) HandlerFinished(_ context.Context, req *ipc.HandlerFinishedRequest) (*emptypb.Empty, error) { logger.Debugw("handler finished", "egressID", req.EgressId) if err := s.ioClient.UpdateEgress(context.Background(), req.Info); err != nil { logger.Errorw("failed to update egress", err, "egressID", req.EgressId) } if err := s.StoreProcessEndedMetrics(req.EgressId, req.Metrics); err != nil { logger.Errorw("failed to store metrics", err, "egressID", req.EgressId) } logger.Debugw("handler finished completed", "egressID", req.EgressId) return &emptypb.Empty{}, nil } func (s *Server) StorageEvent(_ context.Context, _ *ipc.StorageEventRequest) (*emptypb.Empty, error) { return &emptypb.Empty{}, nil } ================================================ FILE: pkg/server/server_rpc.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package server import ( "context" "net/http" "os" "os/exec" "path" "syscall" "time" "google.golang.org/protobuf/encoding/protojson" "gopkg.in/yaml.v3" "go.opentelemetry.io/otel" "github.com/livekit/protocol/egress" "github.com/livekit/protocol/livekit" "github.com/livekit/protocol/logger" "github.com/livekit/protocol/rpc" "github.com/livekit/protocol/utils" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/logging" ) var ( tracer = otel.Tracer("github.com/livekit/egress/pkg/server") ) func (s *Server) StartEgress(ctx context.Context, req *rpc.StartEgressRequest) (*livekit.EgressInfo, error) { s.activeRequests.Inc() ctx, span := tracer.Start(ctx, "Service.StartEgress") defer span.End() if s.IsDisabled() { s.activeRequests.Dec() return nil, errors.ErrShuttingDown } if s.AlreadyExists(req.EgressId) { s.activeRequests.Dec() return nil, errors.ErrEgressAlreadyExists } if err := s.monitor.AcceptRequest(req); err != nil { s.activeRequests.Dec() return nil, err } logger.Infow("request received", "egressID", req.EgressId) p, err := config.GetValidatedPipelineConfig(s.conf, req) if err != nil { s.monitor.EgressAborted(req) s.activeRequests.Dec() return nil, err } var typesInput any = p.Info.Request if e, ok := p.Info.Request.(*livekit.EgressInfo_Replay); ok { typesInput = e.Replay } requestType, outputType := egress.GetTypes(typesInput) logger.Infow("request validated", "egressID", req.EgressId, "requestType", requestType, "sourceType", p.Info.SourceType, "outputType", outputType, "room", p.Info.RoomName, "request", p.Info.Request, ) errChan := s.ioClient.CreateEgress(ctx, p.Info) launchErr := s.launchProcess(req, p.Info) createErr := <-errChan switch { case launchErr != nil && createErr != nil: s.processEnded(req, p.Info, nil) return nil, launchErr case launchErr != nil: s.processEnded(req, p.Info, launchErr) return nil, launchErr case createErr != nil: // launched but failed to save - abort and return error p.Info.Error = createErr.Error() p.Info.ErrorCode = int32(http.StatusInternalServerError) s.AbortProcess(req.EgressId, createErr) return nil, createErr default: return p.Info, nil } } func (s *Server) launchProcess(req *rpc.StartEgressRequest, info *livekit.EgressInfo) error { _, span := tracer.Start(context.Background(), "Service.launchProcess") defer span.End() s.monitor.EgressStarted(req) handlerID := utils.NewGuid("EGH_") p := &config.PipelineConfig{ BaseConfig: s.conf.BaseConfig, HandlerID: handlerID, TmpDir: path.Join(config.TmpDir, req.EgressId), } confString, err := yaml.Marshal(p) if err != nil { span.RecordError(err) logger.Errorw("could not marshal config", err) return err } reqString, err := protojson.Marshal(req) if err != nil { span.RecordError(err) logger.Errorw("could not marshal request", err) return err } cmd := exec.Command("egress", "run-handler", "--config", string(confString), "--request", string(reqString), ) cmd.Dir = "/" l := logging.NewHandlerLogger(handlerID, req.EgressId) cmd.Stdout = l cmd.Stderr = l cmd.SysProcAttr = &syscall.SysProcAttr{Setsid: true} if err = s.Launch(context.Background(), handlerID, req, info, cmd); err != nil { return err } s.monitor.UpdatePID(info.EgressId, cmd.Process.Pid) go func() { err = cmd.Wait() _ = l.Close() s.processEnded(req, info, err) }() return nil } func (s *Server) processEnded(req *rpc.StartEgressRequest, info *livekit.EgressInfo, err error) { if err != nil { // should only happen if process failed catashrophically now := time.Now().UnixNano() info.UpdatedAt = now info.EndedAt = now info.Status = livekit.EgressStatus_EGRESS_FAILED if info.Error == "" { info.Error = err.Error() info.ErrorCode = int32(http.StatusInternalServerError) } _ = s.ioClient.UpdateEgress(context.Background(), info) logger.Errorw("process failed", err, "egressID", info.EgressId) } avgCPU, maxCPU, maxMemory := s.monitor.EgressEnded(req) if maxCPU > 0 { logger.Debugw("egress metrics", "egressID", info.EgressId, "avgCPU", avgCPU, "maxCPU", maxCPU, "maxMemory", maxMemory, ) } // Make sure we delete all the handler context regardless of the handler termination status tmpDir := path.Join(config.TmpDir, req.EgressId) os.RemoveAll(tmpDir) s.ProcessFinished(info.EgressId) s.activeRequests.Dec() } func (s *Server) StartEgressAffinity(_ context.Context, req *rpc.StartEgressRequest) float32 { if s.IsDisabled() || !s.monitor.CanAcceptRequest(req) { // cannot accept return -1 } if s.activeRequests.Load() == 0 { // group multiple track and track composite requests. // if this instance is idle and another is already handling some, the request will go to that server. // this avoids having many instances with one track request each, taking availability from room composite. return 0.5 } // already handling a request and has available cpu return 1 } func (s *Server) ListActiveEgress(ctx context.Context, _ *rpc.ListActiveEgressRequest) (*rpc.ListActiveEgressResponse, error) { _, span := tracer.Start(ctx, "Service.ListActiveEgress") defer span.End() return &rpc.ListActiveEgressResponse{ EgressIds: s.GetActiveEgressIDs(), }, nil } ================================================ FILE: pkg/service/debug.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package service import ( "context" "fmt" "net/http" "strconv" "strings" "github.com/livekit/protocol/logger" "github.com/livekit/protocol/pprof" "github.com/livekit/psrpc" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/ipc" ) const ( gstPipelineDotFileApp = "gst_pipeline" pprofApp = "pprof" ) type DebugService struct { pm ProcessManager } func NewDebugService(pm ProcessManager) *DebugService { return &DebugService{ pm: pm, } } func (s *DebugService) StartDebugHandlers(port int) { if port == 0 { logger.Debugw("debug handler disabled") return } mux := http.NewServeMux() mux.HandleFunc(fmt.Sprintf("/%s/", gstPipelineDotFileApp), s.handleGstPipelineDotFile) mux.HandleFunc(fmt.Sprintf("/%s/", pprofApp), s.handlePProf) go func() { addr := fmt.Sprintf(":%d", port) logger.Debugw(fmt.Sprintf("starting debug handler on address %s", addr)) _ = http.ListenAndServe(addr, mux) }() } // URL path format is "///" func (s *DebugService) handleGstPipelineDotFile(w http.ResponseWriter, r *http.Request) { pathElements := strings.Split(r.URL.Path, "/") if len(pathElements) < 3 { http.Error(w, "malformed url", http.StatusNotFound) return } egressID := pathElements[2] dotFile, err := s.GetGstPipelineDotFile(egressID) if err != nil { http.Error(w, err.Error(), getErrorCode(err)) return } _, _ = w.Write([]byte(dotFile)) } func (s *DebugService) GetGstPipelineDotFile(egressID string) (string, error) { c, err := s.pm.GetGRPCClient(egressID) if err != nil { return "", err } res, err := c.GetPipelineDot(context.Background(), &ipc.GstPipelineDebugDotRequest{}) if err != nil { return "", err } return res.DotFile, nil } // URL path format is "///" or "//" to profile the service func (s *DebugService) handlePProf(w http.ResponseWriter, r *http.Request) { var err error var b []byte timeout, err := strconv.ParseInt(r.URL.Query().Get("timeout"), 10, 32) if err != nil { http.Error(w, "bad timeout parameter", http.StatusBadRequest) return } debug, err := strconv.ParseInt(r.URL.Query().Get("debug"), 10, 32) if err != nil { http.Error(w, "bad debug parameter", http.StatusBadRequest) return } pathElements := strings.Split(r.URL.Path, "/") switch len(pathElements) { case 3: // profile main service b, err = pprof.GetProfileData(context.Background(), pathElements[2], int(timeout), int(debug)) case 4: egressID := pathElements[2] c, err := s.pm.GetGRPCClient(egressID) if err != nil { http.Error(w, "handler not found", http.StatusNotFound) return } res, err := c.GetPProf(context.Background(), &ipc.PProfRequest{ ProfileName: pathElements[3], Timeout: int32(timeout), Debug: int32(debug), }) if err == nil { b = res.PprofFile } default: http.Error(w, "malformed url", http.StatusNotFound) return } if err == nil { w.Header().Add("Content-Type", "application/octet-stream") _, err = w.Write(b) } if err != nil { http.Error(w, err.Error(), getErrorCode(err)) return } } func getErrorCode(err error) int { var e psrpc.Error switch { case errors.As(err, &e): return e.ToHttp() case err == nil: return http.StatusOK default: return http.StatusInternalServerError } } ================================================ FILE: pkg/service/metrics.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package service import ( "context" "net/http" "strings" "github.com/linkdata/deadlock" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/collectors" "github.com/prometheus/client_golang/prometheus/promhttp" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" "github.com/prometheus/common/model" "golang.org/x/exp/maps" "github.com/livekit/protocol/logger" "go.opentelemetry.io/otel" ) type MetricsService struct { pm ProcessManager mu deadlock.Mutex pendingMetrics []*dto.MetricFamily } var ( tracer = otel.Tracer("github.com/livekit/egress/pkg/service") ) func NewMetricsService(pm ProcessManager) *MetricsService { prometheus.Unregister(collectors.NewGoCollector()) prometheus.MustRegister(collectors.NewGoCollector(collectors.WithGoCollectorRuntimeMetrics(collectors.MetricsAll))) return &MetricsService{ pm: pm, } } func (s *MetricsService) PromHandler() http.Handler { return promhttp.InstrumentMetricHandler( prometheus.DefaultRegisterer, promhttp.HandlerFor(s.CreateGatherer(), promhttp.HandlerOpts{}), ) } func (s *MetricsService) CreateGatherer() prometheus.Gatherer { return prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { _, span := tracer.Start(context.Background(), "Service.GathererOfHandlerMetrics") defer span.End() gatherers := prometheus.Gatherers{} // Include the default repo gatherers = append(gatherers, prometheus.DefaultGatherer) // Include Process ended ms gatherers = append(gatherers, prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { s.mu.Lock() m := s.pendingMetrics s.pendingMetrics = nil s.mu.Unlock() return m, nil })) gatherers = append(gatherers, s.pm.GetGatherers()...) return gatherers.Gather() }) } func (s *MetricsService) StoreProcessEndedMetrics(egressID string, metrics string) error { m, err := deserializeMetrics(egressID, metrics) if err != nil { return err } s.mu.Lock() s.pendingMetrics = append(s.pendingMetrics, m...) s.mu.Unlock() return nil } func deserializeMetrics(egressID string, s string) ([]*dto.MetricFamily, error) { parser := expfmt.NewTextParser(model.LegacyValidation) families, err := parser.TextToMetricFamilies(strings.NewReader(s)) if err != nil { logger.Warnw("failed to parse ms from handler", err, "egress_id", egressID) return make([]*dto.MetricFamily, 0), nil // don't return an error, just skip this handler } // Add an egress_id label to every metric all the families, if it doesn't already have one applyDefaultLabel(families, egressID) return maps.Values(families), nil } func applyDefaultLabel(families map[string]*dto.MetricFamily, egressID string) { egressIDLabel := "egress_id" egressLabelPair := &dto.LabelPair{ Name: &egressIDLabel, Value: &egressID, } for _, family := range families { for _, metric := range family.Metric { if metric.Label == nil { metric.Label = make([]*dto.LabelPair, 0) } found := false for _, label := range metric.Label { if label.GetName() == "egress_id" { found = true break } } if !found { metric.Label = append(metric.Label, egressLabelPair) } } } } ================================================ FILE: pkg/service/process.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package service import ( "context" "os" "os/exec" "path" "syscall" "time" "github.com/frostbyte73/core" "github.com/linkdata/deadlock" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/ipc" "github.com/livekit/protocol/livekit" "github.com/livekit/protocol/logger" "github.com/livekit/protocol/rpc" ) const launchTimeout = 10 * time.Second //go:generate go tool github.com/maxbrunsfeld/counterfeiter/v6 . ProcessManager type ProcessManager interface { Launch(ctx context.Context, handlerID string, req *rpc.StartEgressRequest, info *livekit.EgressInfo, cmd *exec.Cmd) error GetContext(egressID string) context.Context AlreadyExists(egressID string) bool HandlerStarted(egressID string) error GetActiveEgressIDs() []string GetStatus(info map[string]interface{}) GetGatherers() []prometheus.Gatherer GetGRPCClient(egressID string) (ipc.EgressHandlerClient, error) KillAll() AbortProcess(egressID string, err error) KillProcess(egressID string, err error) ProcessFinished(egressID string) } type processManager struct { mu deadlock.RWMutex activeHandlers map[string]*Process } func NewProcessManager() ProcessManager { return &processManager{ activeHandlers: make(map[string]*Process), } } func (pm *processManager) Launch( ctx context.Context, handlerID string, req *rpc.StartEgressRequest, info *livekit.EgressInfo, cmd *exec.Cmd, ) error { ipcHandlerDir := path.Join(config.TmpDir, handlerID) if err := os.MkdirAll(ipcHandlerDir, 0755); err != nil { return err } ipcClient, err := ipc.NewHandlerClient(ipcHandlerDir) if err != nil { return err } p := &Process{ ctx: ctx, handlerID: handlerID, req: req, info: info, cmd: cmd, ipcHandlerClient: ipcClient, ready: make(chan struct{}), } pm.mu.Lock() pm.activeHandlers[info.EgressId] = p pm.mu.Unlock() if err = cmd.Start(); err != nil { logger.Errorw("could not launch process", err) return err } select { case <-p.ready: return nil case <-time.After(launchTimeout): logger.Warnw("no response from handler", nil, "egressID", info.EgressId) _ = cmd.Process.Kill() _ = cmd.Wait() return errors.ErrHandlerFailedToStart } } func (pm *processManager) GetContext(egressID string) context.Context { pm.mu.RLock() defer pm.mu.RUnlock() if p, ok := pm.activeHandlers[egressID]; ok { return p.ctx } return context.Background() } func (pm *processManager) AlreadyExists(egressID string) bool { pm.mu.RLock() defer pm.mu.RUnlock() _, ok := pm.activeHandlers[egressID] return ok } func (pm *processManager) HandlerStarted(egressID string) error { pm.mu.RLock() defer pm.mu.RUnlock() if p, ok := pm.activeHandlers[egressID]; ok { close(p.ready) return nil } return errors.ErrEgressNotFound } func (pm *processManager) GetActiveEgressIDs() []string { pm.mu.RLock() defer pm.mu.RUnlock() egressIDs := make([]string, 0, len(pm.activeHandlers)) for egressID := range pm.activeHandlers { egressIDs = append(egressIDs, egressID) } return egressIDs } func (pm *processManager) GetStatus(info map[string]interface{}) { pm.mu.RLock() defer pm.mu.RUnlock() for _, h := range pm.activeHandlers { info[h.req.EgressId] = h.req.Request } } func (pm *processManager) GetGatherers() []prometheus.Gatherer { pm.mu.RLock() defer pm.mu.RUnlock() handlers := make([]prometheus.Gatherer, 0, len(pm.activeHandlers)) for _, p := range pm.activeHandlers { handlers = append(handlers, p) } return handlers } func (pm *processManager) GetGRPCClient(egressID string) (ipc.EgressHandlerClient, error) { pm.mu.RLock() defer pm.mu.RUnlock() h, ok := pm.activeHandlers[egressID] if !ok { return nil, errors.ErrEgressNotFound } return h.ipcHandlerClient, nil } func (pm *processManager) KillAll() { pm.mu.RLock() defer pm.mu.RUnlock() for _, h := range pm.activeHandlers { h.kill(errors.ErrShuttingDown) } } func (pm *processManager) AbortProcess(egressID string, err error) { logger.Infow("aborting egress", err, "egressID", egressID) pm.mu.RLock() defer pm.mu.RUnlock() if h, ok := pm.activeHandlers[egressID]; ok { logger.Warnw("aborting handler", err, "egressID", egressID) h.kill(err) h.ipcHandlerClient.Close() delete(pm.activeHandlers, egressID) } logger.Infow("aborting egress completed", "egressID", egressID) } func (pm *processManager) KillProcess(egressID string, err error) { logger.Infow("killing egress", err, "egressID", egressID) pm.mu.RLock() defer pm.mu.RUnlock() if h, ok := pm.activeHandlers[egressID]; ok { logger.Errorw("killing handler", err, "egressID", egressID) h.kill(err) } logger.Infow("killing egress completed", "egressID", egressID) } func (pm *processManager) ProcessFinished(egressID string) { logger.Debugw("process finished", "egressID", egressID) pm.mu.Lock() defer pm.mu.Unlock() p, ok := pm.activeHandlers[egressID] if ok { logger.Debugw("process finished, closing handler client", "egressID", egressID) p.ipcHandlerClient.Close() p.closed.Break() } delete(pm.activeHandlers, egressID) logger.Debugw("process finished, deleted from active handlers", "egressID", egressID) } type Process struct { ctx context.Context handlerID string req *rpc.StartEgressRequest info *livekit.EgressInfo cmd *exec.Cmd ipcHandlerClient *ipc.EgressHandlerClientWrapper ready chan struct{} closed core.Fuse } // Gather implements the prometheus.Gatherer interface on server-side to allow aggregation of handler ms func (p *Process) Gather() ([]*dto.MetricFamily, error) { // Get the ms from the handler via IPC metricsResponse, err := p.ipcHandlerClient.GetMetrics(context.Background(), &ipc.MetricsRequest{}) if err != nil { if !p.closed.IsBroken() { logger.Warnw("failed to obtain ms from handler", err, "egressID", p.req.EgressId) } return make([]*dto.MetricFamily, 0), nil // don't return an error, just skip this handler } // Parse the result to match the Gatherer interface return deserializeMetrics(p.info.EgressId, metricsResponse.Metrics) } func (p *Process) kill(e error) { p.closed.Once(func() { if _, err := p.ipcHandlerClient.KillEgress(p.ctx, &ipc.KillEgressRequest{ Error: e.Error(), }); err != nil { if err = p.cmd.Process.Signal(syscall.SIGINT); err != nil { logger.Errorw("failed to kill Process", err, "egressID", p.req.EgressId) } } }) } ================================================ FILE: pkg/service/servicefakes/fake_process_manager.go ================================================ // Code generated by counterfeiter. DO NOT EDIT. package servicefakes import ( "context" "os/exec" "sync" "github.com/livekit/egress/pkg/ipc" "github.com/livekit/egress/pkg/service" "github.com/livekit/protocol/livekit" "github.com/livekit/protocol/rpc" "github.com/prometheus/client_golang/prometheus" ) type FakeProcessManager struct { AbortProcessStub func(string, error) abortProcessMutex sync.RWMutex abortProcessArgsForCall []struct { arg1 string arg2 error } AlreadyExistsStub func(string) bool alreadyExistsMutex sync.RWMutex alreadyExistsArgsForCall []struct { arg1 string } alreadyExistsReturns struct { result1 bool } alreadyExistsReturnsOnCall map[int]struct { result1 bool } GetActiveEgressIDsStub func() []string getActiveEgressIDsMutex sync.RWMutex getActiveEgressIDsArgsForCall []struct { } getActiveEgressIDsReturns struct { result1 []string } getActiveEgressIDsReturnsOnCall map[int]struct { result1 []string } GetContextStub func(string) context.Context getContextMutex sync.RWMutex getContextArgsForCall []struct { arg1 string } getContextReturns struct { result1 context.Context } getContextReturnsOnCall map[int]struct { result1 context.Context } GetGRPCClientStub func(string) (ipc.EgressHandlerClient, error) getGRPCClientMutex sync.RWMutex getGRPCClientArgsForCall []struct { arg1 string } getGRPCClientReturns struct { result1 ipc.EgressHandlerClient result2 error } getGRPCClientReturnsOnCall map[int]struct { result1 ipc.EgressHandlerClient result2 error } GetGatherersStub func() []prometheus.Gatherer getGatherersMutex sync.RWMutex getGatherersArgsForCall []struct { } getGatherersReturns struct { result1 []prometheus.Gatherer } getGatherersReturnsOnCall map[int]struct { result1 []prometheus.Gatherer } GetStatusStub func(map[string]interface{}) getStatusMutex sync.RWMutex getStatusArgsForCall []struct { arg1 map[string]interface{} } HandlerStartedStub func(string) error handlerStartedMutex sync.RWMutex handlerStartedArgsForCall []struct { arg1 string } handlerStartedReturns struct { result1 error } handlerStartedReturnsOnCall map[int]struct { result1 error } KillAllStub func() killAllMutex sync.RWMutex killAllArgsForCall []struct { } KillProcessStub func(string, error) killProcessMutex sync.RWMutex killProcessArgsForCall []struct { arg1 string arg2 error } LaunchStub func(context.Context, string, *rpc.StartEgressRequest, *livekit.EgressInfo, *exec.Cmd) error launchMutex sync.RWMutex launchArgsForCall []struct { arg1 context.Context arg2 string arg3 *rpc.StartEgressRequest arg4 *livekit.EgressInfo arg5 *exec.Cmd } launchReturns struct { result1 error } launchReturnsOnCall map[int]struct { result1 error } ProcessFinishedStub func(string) processFinishedMutex sync.RWMutex processFinishedArgsForCall []struct { arg1 string } invocations map[string][][]interface{} invocationsMutex sync.RWMutex } func (fake *FakeProcessManager) AbortProcess(arg1 string, arg2 error) { fake.abortProcessMutex.Lock() fake.abortProcessArgsForCall = append(fake.abortProcessArgsForCall, struct { arg1 string arg2 error }{arg1, arg2}) stub := fake.AbortProcessStub fake.recordInvocation("AbortProcess", []interface{}{arg1, arg2}) fake.abortProcessMutex.Unlock() if stub != nil { fake.AbortProcessStub(arg1, arg2) } } func (fake *FakeProcessManager) AbortProcessCallCount() int { fake.abortProcessMutex.RLock() defer fake.abortProcessMutex.RUnlock() return len(fake.abortProcessArgsForCall) } func (fake *FakeProcessManager) AbortProcessCalls(stub func(string, error)) { fake.abortProcessMutex.Lock() defer fake.abortProcessMutex.Unlock() fake.AbortProcessStub = stub } func (fake *FakeProcessManager) AbortProcessArgsForCall(i int) (string, error) { fake.abortProcessMutex.RLock() defer fake.abortProcessMutex.RUnlock() argsForCall := fake.abortProcessArgsForCall[i] return argsForCall.arg1, argsForCall.arg2 } func (fake *FakeProcessManager) AlreadyExists(arg1 string) bool { fake.alreadyExistsMutex.Lock() ret, specificReturn := fake.alreadyExistsReturnsOnCall[len(fake.alreadyExistsArgsForCall)] fake.alreadyExistsArgsForCall = append(fake.alreadyExistsArgsForCall, struct { arg1 string }{arg1}) stub := fake.AlreadyExistsStub fakeReturns := fake.alreadyExistsReturns fake.recordInvocation("AlreadyExists", []interface{}{arg1}) fake.alreadyExistsMutex.Unlock() if stub != nil { return stub(arg1) } if specificReturn { return ret.result1 } return fakeReturns.result1 } func (fake *FakeProcessManager) AlreadyExistsCallCount() int { fake.alreadyExistsMutex.RLock() defer fake.alreadyExistsMutex.RUnlock() return len(fake.alreadyExistsArgsForCall) } func (fake *FakeProcessManager) AlreadyExistsCalls(stub func(string) bool) { fake.alreadyExistsMutex.Lock() defer fake.alreadyExistsMutex.Unlock() fake.AlreadyExistsStub = stub } func (fake *FakeProcessManager) AlreadyExistsArgsForCall(i int) string { fake.alreadyExistsMutex.RLock() defer fake.alreadyExistsMutex.RUnlock() argsForCall := fake.alreadyExistsArgsForCall[i] return argsForCall.arg1 } func (fake *FakeProcessManager) AlreadyExistsReturns(result1 bool) { fake.alreadyExistsMutex.Lock() defer fake.alreadyExistsMutex.Unlock() fake.AlreadyExistsStub = nil fake.alreadyExistsReturns = struct { result1 bool }{result1} } func (fake *FakeProcessManager) AlreadyExistsReturnsOnCall(i int, result1 bool) { fake.alreadyExistsMutex.Lock() defer fake.alreadyExistsMutex.Unlock() fake.AlreadyExistsStub = nil if fake.alreadyExistsReturnsOnCall == nil { fake.alreadyExistsReturnsOnCall = make(map[int]struct { result1 bool }) } fake.alreadyExistsReturnsOnCall[i] = struct { result1 bool }{result1} } func (fake *FakeProcessManager) GetActiveEgressIDs() []string { fake.getActiveEgressIDsMutex.Lock() ret, specificReturn := fake.getActiveEgressIDsReturnsOnCall[len(fake.getActiveEgressIDsArgsForCall)] fake.getActiveEgressIDsArgsForCall = append(fake.getActiveEgressIDsArgsForCall, struct { }{}) stub := fake.GetActiveEgressIDsStub fakeReturns := fake.getActiveEgressIDsReturns fake.recordInvocation("GetActiveEgressIDs", []interface{}{}) fake.getActiveEgressIDsMutex.Unlock() if stub != nil { return stub() } if specificReturn { return ret.result1 } return fakeReturns.result1 } func (fake *FakeProcessManager) GetActiveEgressIDsCallCount() int { fake.getActiveEgressIDsMutex.RLock() defer fake.getActiveEgressIDsMutex.RUnlock() return len(fake.getActiveEgressIDsArgsForCall) } func (fake *FakeProcessManager) GetActiveEgressIDsCalls(stub func() []string) { fake.getActiveEgressIDsMutex.Lock() defer fake.getActiveEgressIDsMutex.Unlock() fake.GetActiveEgressIDsStub = stub } func (fake *FakeProcessManager) GetActiveEgressIDsReturns(result1 []string) { fake.getActiveEgressIDsMutex.Lock() defer fake.getActiveEgressIDsMutex.Unlock() fake.GetActiveEgressIDsStub = nil fake.getActiveEgressIDsReturns = struct { result1 []string }{result1} } func (fake *FakeProcessManager) GetActiveEgressIDsReturnsOnCall(i int, result1 []string) { fake.getActiveEgressIDsMutex.Lock() defer fake.getActiveEgressIDsMutex.Unlock() fake.GetActiveEgressIDsStub = nil if fake.getActiveEgressIDsReturnsOnCall == nil { fake.getActiveEgressIDsReturnsOnCall = make(map[int]struct { result1 []string }) } fake.getActiveEgressIDsReturnsOnCall[i] = struct { result1 []string }{result1} } func (fake *FakeProcessManager) GetContext(arg1 string) context.Context { fake.getContextMutex.Lock() ret, specificReturn := fake.getContextReturnsOnCall[len(fake.getContextArgsForCall)] fake.getContextArgsForCall = append(fake.getContextArgsForCall, struct { arg1 string }{arg1}) stub := fake.GetContextStub fakeReturns := fake.getContextReturns fake.recordInvocation("GetContext", []interface{}{arg1}) fake.getContextMutex.Unlock() if stub != nil { return stub(arg1) } if specificReturn { return ret.result1 } return fakeReturns.result1 } func (fake *FakeProcessManager) GetContextCallCount() int { fake.getContextMutex.RLock() defer fake.getContextMutex.RUnlock() return len(fake.getContextArgsForCall) } func (fake *FakeProcessManager) GetContextCalls(stub func(string) context.Context) { fake.getContextMutex.Lock() defer fake.getContextMutex.Unlock() fake.GetContextStub = stub } func (fake *FakeProcessManager) GetContextArgsForCall(i int) string { fake.getContextMutex.RLock() defer fake.getContextMutex.RUnlock() argsForCall := fake.getContextArgsForCall[i] return argsForCall.arg1 } func (fake *FakeProcessManager) GetContextReturns(result1 context.Context) { fake.getContextMutex.Lock() defer fake.getContextMutex.Unlock() fake.GetContextStub = nil fake.getContextReturns = struct { result1 context.Context }{result1} } func (fake *FakeProcessManager) GetContextReturnsOnCall(i int, result1 context.Context) { fake.getContextMutex.Lock() defer fake.getContextMutex.Unlock() fake.GetContextStub = nil if fake.getContextReturnsOnCall == nil { fake.getContextReturnsOnCall = make(map[int]struct { result1 context.Context }) } fake.getContextReturnsOnCall[i] = struct { result1 context.Context }{result1} } func (fake *FakeProcessManager) GetGRPCClient(arg1 string) (ipc.EgressHandlerClient, error) { fake.getGRPCClientMutex.Lock() ret, specificReturn := fake.getGRPCClientReturnsOnCall[len(fake.getGRPCClientArgsForCall)] fake.getGRPCClientArgsForCall = append(fake.getGRPCClientArgsForCall, struct { arg1 string }{arg1}) stub := fake.GetGRPCClientStub fakeReturns := fake.getGRPCClientReturns fake.recordInvocation("GetGRPCClient", []interface{}{arg1}) fake.getGRPCClientMutex.Unlock() if stub != nil { return stub(arg1) } if specificReturn { return ret.result1, ret.result2 } return fakeReturns.result1, fakeReturns.result2 } func (fake *FakeProcessManager) GetGRPCClientCallCount() int { fake.getGRPCClientMutex.RLock() defer fake.getGRPCClientMutex.RUnlock() return len(fake.getGRPCClientArgsForCall) } func (fake *FakeProcessManager) GetGRPCClientCalls(stub func(string) (ipc.EgressHandlerClient, error)) { fake.getGRPCClientMutex.Lock() defer fake.getGRPCClientMutex.Unlock() fake.GetGRPCClientStub = stub } func (fake *FakeProcessManager) GetGRPCClientArgsForCall(i int) string { fake.getGRPCClientMutex.RLock() defer fake.getGRPCClientMutex.RUnlock() argsForCall := fake.getGRPCClientArgsForCall[i] return argsForCall.arg1 } func (fake *FakeProcessManager) GetGRPCClientReturns(result1 ipc.EgressHandlerClient, result2 error) { fake.getGRPCClientMutex.Lock() defer fake.getGRPCClientMutex.Unlock() fake.GetGRPCClientStub = nil fake.getGRPCClientReturns = struct { result1 ipc.EgressHandlerClient result2 error }{result1, result2} } func (fake *FakeProcessManager) GetGRPCClientReturnsOnCall(i int, result1 ipc.EgressHandlerClient, result2 error) { fake.getGRPCClientMutex.Lock() defer fake.getGRPCClientMutex.Unlock() fake.GetGRPCClientStub = nil if fake.getGRPCClientReturnsOnCall == nil { fake.getGRPCClientReturnsOnCall = make(map[int]struct { result1 ipc.EgressHandlerClient result2 error }) } fake.getGRPCClientReturnsOnCall[i] = struct { result1 ipc.EgressHandlerClient result2 error }{result1, result2} } func (fake *FakeProcessManager) GetGatherers() []prometheus.Gatherer { fake.getGatherersMutex.Lock() ret, specificReturn := fake.getGatherersReturnsOnCall[len(fake.getGatherersArgsForCall)] fake.getGatherersArgsForCall = append(fake.getGatherersArgsForCall, struct { }{}) stub := fake.GetGatherersStub fakeReturns := fake.getGatherersReturns fake.recordInvocation("GetGatherers", []interface{}{}) fake.getGatherersMutex.Unlock() if stub != nil { return stub() } if specificReturn { return ret.result1 } return fakeReturns.result1 } func (fake *FakeProcessManager) GetGatherersCallCount() int { fake.getGatherersMutex.RLock() defer fake.getGatherersMutex.RUnlock() return len(fake.getGatherersArgsForCall) } func (fake *FakeProcessManager) GetGatherersCalls(stub func() []prometheus.Gatherer) { fake.getGatherersMutex.Lock() defer fake.getGatherersMutex.Unlock() fake.GetGatherersStub = stub } func (fake *FakeProcessManager) GetGatherersReturns(result1 []prometheus.Gatherer) { fake.getGatherersMutex.Lock() defer fake.getGatherersMutex.Unlock() fake.GetGatherersStub = nil fake.getGatherersReturns = struct { result1 []prometheus.Gatherer }{result1} } func (fake *FakeProcessManager) GetGatherersReturnsOnCall(i int, result1 []prometheus.Gatherer) { fake.getGatherersMutex.Lock() defer fake.getGatherersMutex.Unlock() fake.GetGatherersStub = nil if fake.getGatherersReturnsOnCall == nil { fake.getGatherersReturnsOnCall = make(map[int]struct { result1 []prometheus.Gatherer }) } fake.getGatherersReturnsOnCall[i] = struct { result1 []prometheus.Gatherer }{result1} } func (fake *FakeProcessManager) GetStatus(arg1 map[string]interface{}) { fake.getStatusMutex.Lock() fake.getStatusArgsForCall = append(fake.getStatusArgsForCall, struct { arg1 map[string]interface{} }{arg1}) stub := fake.GetStatusStub fake.recordInvocation("GetStatus", []interface{}{arg1}) fake.getStatusMutex.Unlock() if stub != nil { fake.GetStatusStub(arg1) } } func (fake *FakeProcessManager) GetStatusCallCount() int { fake.getStatusMutex.RLock() defer fake.getStatusMutex.RUnlock() return len(fake.getStatusArgsForCall) } func (fake *FakeProcessManager) GetStatusCalls(stub func(map[string]interface{})) { fake.getStatusMutex.Lock() defer fake.getStatusMutex.Unlock() fake.GetStatusStub = stub } func (fake *FakeProcessManager) GetStatusArgsForCall(i int) map[string]interface{} { fake.getStatusMutex.RLock() defer fake.getStatusMutex.RUnlock() argsForCall := fake.getStatusArgsForCall[i] return argsForCall.arg1 } func (fake *FakeProcessManager) HandlerStarted(arg1 string) error { fake.handlerStartedMutex.Lock() ret, specificReturn := fake.handlerStartedReturnsOnCall[len(fake.handlerStartedArgsForCall)] fake.handlerStartedArgsForCall = append(fake.handlerStartedArgsForCall, struct { arg1 string }{arg1}) stub := fake.HandlerStartedStub fakeReturns := fake.handlerStartedReturns fake.recordInvocation("HandlerStarted", []interface{}{arg1}) fake.handlerStartedMutex.Unlock() if stub != nil { return stub(arg1) } if specificReturn { return ret.result1 } return fakeReturns.result1 } func (fake *FakeProcessManager) HandlerStartedCallCount() int { fake.handlerStartedMutex.RLock() defer fake.handlerStartedMutex.RUnlock() return len(fake.handlerStartedArgsForCall) } func (fake *FakeProcessManager) HandlerStartedCalls(stub func(string) error) { fake.handlerStartedMutex.Lock() defer fake.handlerStartedMutex.Unlock() fake.HandlerStartedStub = stub } func (fake *FakeProcessManager) HandlerStartedArgsForCall(i int) string { fake.handlerStartedMutex.RLock() defer fake.handlerStartedMutex.RUnlock() argsForCall := fake.handlerStartedArgsForCall[i] return argsForCall.arg1 } func (fake *FakeProcessManager) HandlerStartedReturns(result1 error) { fake.handlerStartedMutex.Lock() defer fake.handlerStartedMutex.Unlock() fake.HandlerStartedStub = nil fake.handlerStartedReturns = struct { result1 error }{result1} } func (fake *FakeProcessManager) HandlerStartedReturnsOnCall(i int, result1 error) { fake.handlerStartedMutex.Lock() defer fake.handlerStartedMutex.Unlock() fake.HandlerStartedStub = nil if fake.handlerStartedReturnsOnCall == nil { fake.handlerStartedReturnsOnCall = make(map[int]struct { result1 error }) } fake.handlerStartedReturnsOnCall[i] = struct { result1 error }{result1} } func (fake *FakeProcessManager) KillAll() { fake.killAllMutex.Lock() fake.killAllArgsForCall = append(fake.killAllArgsForCall, struct { }{}) stub := fake.KillAllStub fake.recordInvocation("KillAll", []interface{}{}) fake.killAllMutex.Unlock() if stub != nil { fake.KillAllStub() } } func (fake *FakeProcessManager) KillAllCallCount() int { fake.killAllMutex.RLock() defer fake.killAllMutex.RUnlock() return len(fake.killAllArgsForCall) } func (fake *FakeProcessManager) KillAllCalls(stub func()) { fake.killAllMutex.Lock() defer fake.killAllMutex.Unlock() fake.KillAllStub = stub } func (fake *FakeProcessManager) KillProcess(arg1 string, arg2 error) { fake.killProcessMutex.Lock() fake.killProcessArgsForCall = append(fake.killProcessArgsForCall, struct { arg1 string arg2 error }{arg1, arg2}) stub := fake.KillProcessStub fake.recordInvocation("KillProcess", []interface{}{arg1, arg2}) fake.killProcessMutex.Unlock() if stub != nil { fake.KillProcessStub(arg1, arg2) } } func (fake *FakeProcessManager) KillProcessCallCount() int { fake.killProcessMutex.RLock() defer fake.killProcessMutex.RUnlock() return len(fake.killProcessArgsForCall) } func (fake *FakeProcessManager) KillProcessCalls(stub func(string, error)) { fake.killProcessMutex.Lock() defer fake.killProcessMutex.Unlock() fake.KillProcessStub = stub } func (fake *FakeProcessManager) KillProcessArgsForCall(i int) (string, error) { fake.killProcessMutex.RLock() defer fake.killProcessMutex.RUnlock() argsForCall := fake.killProcessArgsForCall[i] return argsForCall.arg1, argsForCall.arg2 } func (fake *FakeProcessManager) Launch(arg1 context.Context, arg2 string, arg3 *rpc.StartEgressRequest, arg4 *livekit.EgressInfo, arg5 *exec.Cmd) error { fake.launchMutex.Lock() ret, specificReturn := fake.launchReturnsOnCall[len(fake.launchArgsForCall)] fake.launchArgsForCall = append(fake.launchArgsForCall, struct { arg1 context.Context arg2 string arg3 *rpc.StartEgressRequest arg4 *livekit.EgressInfo arg5 *exec.Cmd }{arg1, arg2, arg3, arg4, arg5}) stub := fake.LaunchStub fakeReturns := fake.launchReturns fake.recordInvocation("Launch", []interface{}{arg1, arg2, arg3, arg4, arg5}) fake.launchMutex.Unlock() if stub != nil { return stub(arg1, arg2, arg3, arg4, arg5) } if specificReturn { return ret.result1 } return fakeReturns.result1 } func (fake *FakeProcessManager) LaunchCallCount() int { fake.launchMutex.RLock() defer fake.launchMutex.RUnlock() return len(fake.launchArgsForCall) } func (fake *FakeProcessManager) LaunchCalls(stub func(context.Context, string, *rpc.StartEgressRequest, *livekit.EgressInfo, *exec.Cmd) error) { fake.launchMutex.Lock() defer fake.launchMutex.Unlock() fake.LaunchStub = stub } func (fake *FakeProcessManager) LaunchArgsForCall(i int) (context.Context, string, *rpc.StartEgressRequest, *livekit.EgressInfo, *exec.Cmd) { fake.launchMutex.RLock() defer fake.launchMutex.RUnlock() argsForCall := fake.launchArgsForCall[i] return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5 } func (fake *FakeProcessManager) LaunchReturns(result1 error) { fake.launchMutex.Lock() defer fake.launchMutex.Unlock() fake.LaunchStub = nil fake.launchReturns = struct { result1 error }{result1} } func (fake *FakeProcessManager) LaunchReturnsOnCall(i int, result1 error) { fake.launchMutex.Lock() defer fake.launchMutex.Unlock() fake.LaunchStub = nil if fake.launchReturnsOnCall == nil { fake.launchReturnsOnCall = make(map[int]struct { result1 error }) } fake.launchReturnsOnCall[i] = struct { result1 error }{result1} } func (fake *FakeProcessManager) ProcessFinished(arg1 string) { fake.processFinishedMutex.Lock() fake.processFinishedArgsForCall = append(fake.processFinishedArgsForCall, struct { arg1 string }{arg1}) stub := fake.ProcessFinishedStub fake.recordInvocation("ProcessFinished", []interface{}{arg1}) fake.processFinishedMutex.Unlock() if stub != nil { fake.ProcessFinishedStub(arg1) } } func (fake *FakeProcessManager) ProcessFinishedCallCount() int { fake.processFinishedMutex.RLock() defer fake.processFinishedMutex.RUnlock() return len(fake.processFinishedArgsForCall) } func (fake *FakeProcessManager) ProcessFinishedCalls(stub func(string)) { fake.processFinishedMutex.Lock() defer fake.processFinishedMutex.Unlock() fake.ProcessFinishedStub = stub } func (fake *FakeProcessManager) ProcessFinishedArgsForCall(i int) string { fake.processFinishedMutex.RLock() defer fake.processFinishedMutex.RUnlock() argsForCall := fake.processFinishedArgsForCall[i] return argsForCall.arg1 } func (fake *FakeProcessManager) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() copiedInvocations := map[string][][]interface{}{} for key, value := range fake.invocations { copiedInvocations[key] = value } return copiedInvocations } func (fake *FakeProcessManager) recordInvocation(key string, args []interface{}) { fake.invocationsMutex.Lock() defer fake.invocationsMutex.Unlock() if fake.invocations == nil { fake.invocations = map[string][][]interface{}{} } if fake.invocations[key] == nil { fake.invocations[key] = [][]interface{}{} } fake.invocations[key] = append(fake.invocations[key], args) } var _ service.ProcessManager = new(FakeProcessManager) ================================================ FILE: pkg/stats/handler.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stats import ( "github.com/prometheus/client_golang/prometheus" ) type HandlerMonitor struct { uploadsCounter *prometheus.CounterVec uploadsResponseTime *prometheus.HistogramVec backupCounter *prometheus.CounterVec } func NewHandlerMonitor(nodeID, clusterID, egressID string) *HandlerMonitor { m := &HandlerMonitor{} constantLabels := prometheus.Labels{"node_id": nodeID, "cluster_id": clusterID, "egress_id": egressID} m.uploadsCounter = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "livekit", Subsystem: "egress", Name: "pipeline_uploads", Help: "Number of uploads per pipeline with type and status labels", ConstLabels: constantLabels, }, []string{"type", "status"}) // type: file, manifest, segment, liveplaylist, playlist; status: success,failure m.uploadsResponseTime = prometheus.NewHistogramVec(prometheus.HistogramOpts{ Namespace: "livekit", Subsystem: "egress", Name: "pipline_upload_response_time_ms", Help: "A histogram of latencies for upload requests in milliseconds.", Buckets: []float64{10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000, 15000, 20000, 30000}, ConstLabels: constantLabels, }, []string{"type", "status"}) m.backupCounter = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "livekit", Subsystem: "egress", Name: "backup_storage_writes", Help: "number of writes to backup storage location by output type", ConstLabels: constantLabels, }, []string{"output_type"}) prometheus.MustRegister(m.uploadsCounter, m.uploadsResponseTime, m.backupCounter) return m } func (m *HandlerMonitor) IncUploadCountSuccess(uploadType string, elapsed float64) { labels := prometheus.Labels{"type": uploadType, "status": "success"} m.uploadsCounter.With(labels).Add(1) m.uploadsResponseTime.With(labels).Observe(elapsed) } func (m *HandlerMonitor) IncUploadCountFailure(uploadType string, elapsed float64) { labels := prometheus.Labels{"type": uploadType, "status": "failure"} m.uploadsCounter.With(labels).Add(1) m.uploadsResponseTime.With(labels).Observe(elapsed) } func (m *HandlerMonitor) IncBackupStorageWrites(outputType string) { m.backupCounter.With(prometheus.Labels{"output_type": outputType}).Add(1) } func (m *HandlerMonitor) RegisterSegmentsChannelSizeGauge(nodeID, clusterID, egressID string, channelSizeFunction func() float64) { segmentsUploadsGauge := prometheus.NewGaugeFunc( prometheus.GaugeOpts{ Namespace: "livekit", Subsystem: "egress", Name: "segments_uploads_channel_size", Help: "number of segment uploads pending in channel", ConstLabels: prometheus.Labels{"node_id": nodeID, "cluster_id": clusterID, "egress_id": egressID}, }, channelSizeFunction) prometheus.MustRegister(segmentsUploadsGauge) } func (m *HandlerMonitor) RegisterPlaylistChannelSizeGauge(nodeID, clusterID, egressID string, channelSizeFunction func() float64) { playlistUploadsGauge := prometheus.NewGaugeFunc( prometheus.GaugeOpts{ Namespace: "livekit", Subsystem: "egress", Name: "playlist_uploads_channel_size", Help: "number of playlist updates pending in channel", ConstLabels: prometheus.Labels{"node_id": nodeID, "cluster_id": clusterID, "egress_id": egressID}, }, channelSizeFunction) prometheus.MustRegister(playlistUploadsGauge) } ================================================ FILE: pkg/stats/monitor.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stats import ( "fmt" "sort" "time" "github.com/linkdata/deadlock" "github.com/pbnjay/memory" "github.com/prometheus/client_golang/prometheus" "go.uber.org/atomic" "github.com/livekit/protocol/livekit" "github.com/livekit/protocol/logger" "github.com/livekit/protocol/rpc" "github.com/livekit/protocol/utils/hwstats" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/errors" "github.com/livekit/egress/pkg/pipeline/source/pulse" "github.com/livekit/egress/pkg/types" ) const ( cpuHoldDuration = time.Second * 15 defaultKillThreshold = 0.95 minKillDuration = 10 gb = 1024.0 * 1024.0 * 1024.0 pulseClientHold = 4 memoryHeadroomGB = 1.0 memoryUsageDumpInterval = 10 * time.Minute ) type Service interface { IsIdle() bool IsDisabled() bool IsTerminating() bool KillProcess(string, error) } type Monitor struct { nodeID string clusterID string cpuCostConfig *config.CPUCostConfig promCPULoad prometheus.Gauge promCgroupMemory prometheus.Gauge promCgroupReadSuccess prometheus.Gauge promProcRSS prometheus.Gauge promWouldRejectCgroup prometheus.Gauge requestGauge *prometheus.GaugeVec svc Service cpuStats *hwstats.CPUStats cgroupMemStats *hwstats.MemoryStats requests atomic.Int32 webRequests atomic.Int32 pendingPulseClients atomic.Int32 pendingMemoryUsage atomic.Float64 mu deadlock.Mutex highCPUDuration int highMemoryStart time.Time lastMemoryDump time.Time pending map[string]*processStats procStats map[int]*processStats memoryUsage float64 cgroupUsageBytes uint64 cgroupOK bool cgroupErrorLogged atomic.Bool } type processStats struct { egressID string pendingCPU float64 lastCPU float64 allowedCPU float64 totalCPU float64 cpuCounter int maxCPU float64 maxMemory int countedAsWeb bool } func NewMonitor(conf *config.ServiceConfig, svc Service) (*Monitor, error) { m := &Monitor{ nodeID: conf.NodeID, clusterID: conf.ClusterID, cpuCostConfig: conf.CPUCostConfig, svc: svc, pending: make(map[string]*processStats), procStats: make(map[int]*processStats), lastMemoryDump: time.Now(), } m.initPrometheus() procStats, err := hwstats.NewProcMonitor(m.updateEgressStats) if err != nil { return nil, err } m.cpuStats = procStats if err = m.validateCPUConfig(); err != nil { return nil, err } memStats, err := hwstats.NewMemoryStats() if err != nil { logger.Warnw("failed to initialize cgroup memory stats", err) } else { m.cgroupMemStats = memStats } return m, nil } func (m *Monitor) validateCPUConfig() error { requirements := []float64{ m.cpuCostConfig.RoomCompositeCpuCost, m.cpuCostConfig.AudioRoomCompositeCpuCost, m.cpuCostConfig.WebCpuCost, m.cpuCostConfig.AudioWebCpuCost, m.cpuCostConfig.ParticipantCpuCost, m.cpuCostConfig.TrackCompositeCpuCost, m.cpuCostConfig.TrackCpuCost, } sort.Float64s(requirements) recommendedMinimum := requirements[len(requirements)-1] if recommendedMinimum < 3 { recommendedMinimum = 3 } if m.cpuStats.NumCPU() < requirements[0] { logger.Errorw("not enough cpu", nil, "minimumCpu", requirements[0], "recommended", recommendedMinimum, "available", m.cpuStats.NumCPU(), ) return errors.New("not enough cpu") } if m.cpuStats.NumCPU() < requirements[len(requirements)-1] { logger.Errorw("not enough cpu for some egress types", nil, "minimumCpu", requirements[len(requirements)-1], "recommended", recommendedMinimum, "available", m.cpuStats.NumCPU(), ) } logger.Infow(fmt.Sprintf("cpu available: %f max cost: %f", m.cpuStats.NumCPU(), requirements[len(requirements)-1])) return nil } func (m *Monitor) CanAcceptRequest(req *rpc.StartEgressRequest) bool { m.mu.Lock() fields, canAccept := m.canAcceptRequestLocked(req) m.mu.Unlock() logger.Debugw("cpu check", fields...) return canAccept } func (m *Monitor) CanAcceptWebRequest() bool { m.mu.Lock() defer m.mu.Unlock() return m.canAcceptWebLocked() } func (m *Monitor) canAcceptRequestLocked(req *rpc.StartEgressRequest) ([]interface{}, bool) { total, available, pending, used := m.getCPUUsageLocked() fields := []interface{}{ "total", total, "available", available, "pending", pending, "used", used, "activeRequests", m.requests.Load(), "activeWeb", m.webRequests.Load(), "memory", m.memoryUsage, "memorySource", m.cpuCostConfig.MemorySource, } // Memory admission check based on configured source if reject, reason := m.checkMemoryAdmissionLocked(); reject { fields = append(fields, "canAccept", false, "reason", reason) return fields, false } required := req.EstimatedCpu switch r := req.Request.(type) { case *rpc.StartEgressRequest_RoomComposite: useSDK := config.ShouldUseSDKSource(r.RoomComposite) if !useSDK && !m.canAcceptWebLocked() { fields = append(fields, "canAccept", false, "reason", "pulse clients") return fields, false } if required == 0 { if r.RoomComposite.AudioOnly { required = m.cpuCostConfig.AudioRoomCompositeCpuCost } else { required = m.cpuCostConfig.RoomCompositeCpuCost } } case *rpc.StartEgressRequest_Web: if !m.canAcceptWebLocked() { fields = append(fields, "canAccept", false, "reason", "pulse clients") return fields, false } if required == 0 { if r.Web.AudioOnly { required = m.cpuCostConfig.AudioWebCpuCost } else { required = m.cpuCostConfig.WebCpuCost } } case *rpc.StartEgressRequest_Participant: if required == 0 { required = m.cpuCostConfig.ParticipantCpuCost } case *rpc.StartEgressRequest_TrackComposite: if required == 0 { required = m.cpuCostConfig.TrackCompositeCpuCost } case *rpc.StartEgressRequest_Track: if required == 0 { required = m.cpuCostConfig.TrackCpuCost } case *rpc.StartEgressRequest_Replay: replayReq := r.Replay switch source := replayReq.Source.(type) { case *livekit.ExportReplayRequest_Template: useSDK := config.ShouldUseSDKSource(source.Template) if !useSDK && !m.canAcceptWebLocked() { fields = append(fields, "canAccept", false, "reason", "pulse clients") return fields, false } if required == 0 { if source.Template.AudioOnly { required = m.cpuCostConfig.AudioRoomCompositeCpuCost } else { required = m.cpuCostConfig.RoomCompositeCpuCost } } case *livekit.ExportReplayRequest_Web: if !m.canAcceptWebLocked() { fields = append(fields, "canAccept", false, "reason", "pulse clients") return fields, false } if required == 0 { if source.Web.AudioOnly { required = m.cpuCostConfig.AudioWebCpuCost } else { required = m.cpuCostConfig.WebCpuCost } } case *livekit.ExportReplayRequest_Media: if required == 0 { required = m.cpuCostConfig.ParticipantCpuCost } } } accept := available >= required fields = append(fields, "required", required, "canAccept", accept, ) if !accept { fields = append(fields, "reason", "cpu") } return fields, accept } func (m *Monitor) canAcceptWebLocked() bool { clients, err := pulse.Clients() if err != nil { return false } return clients+int(m.pendingPulseClients.Load())+pulseClientHold <= m.cpuCostConfig.MaxPulseClients } // checkMemoryAdmissionLocked checks if a request should be rejected due to memory constraints. // Returns (reject, reason) where reject=true means the request should be rejected. func (m *Monitor) checkMemoryAdmissionLocked() (bool, string) { if m.cpuCostConfig.MaxMemory == 0 { return false, "" } pendingMem := m.pendingMemoryUsage.Load() memoryCost := m.cpuCostConfig.MemoryCost headroom := memoryHeadroomGB maxMem := m.cpuCostConfig.MaxMemory switch m.cpuCostConfig.MemorySource { case config.MemorySourceCgroup: if !m.cgroupOK { // Fallback to proc_rss return m.checkProcRSSMemoryAdmission(pendingMem, memoryCost, headroom, maxMem) } cgroupGB := float64(m.cgroupUsageBytes) / gb if cgroupGB+pendingMem+memoryCost+headroom >= maxMem { return true, "memory_cgroup" } default: // proc_rss return m.checkProcRSSMemoryAdmission(pendingMem, memoryCost, headroom, maxMem) } return false, "" } // checkProcRSSMemoryAdmission implements the original per-process RSS based admission. func (m *Monitor) checkProcRSSMemoryAdmission(pendingMem, memoryCost, headroom, maxMem float64) (bool, string) { memoryUsage := m.memoryUsage + pendingMem if memoryUsage+memoryCost+headroom >= maxMem { return true, "memory" } return false, "" } func (m *Monitor) AcceptRequest(req *rpc.StartEgressRequest) error { m.mu.Lock() defer m.mu.Unlock() if m.pending[req.EgressId] != nil { return errors.ErrEgressAlreadyExists } if _, ok := m.canAcceptRequestLocked(req); !ok { logger.Debugw("can not accept request", nil) return errors.ErrNotEnoughCPU } m.requests.Inc() var cpuHold float64 var pulseClients int32 var countedAsWeb bool switch r := req.Request.(type) { case *rpc.StartEgressRequest_RoomComposite: useSDK := config.ShouldUseSDKSource(r.RoomComposite) if !useSDK { m.webRequests.Inc() countedAsWeb = true pulseClients = pulseClientHold } if r.RoomComposite.AudioOnly { cpuHold = m.cpuCostConfig.AudioRoomCompositeCpuCost } else { cpuHold = m.cpuCostConfig.RoomCompositeCpuCost } case *rpc.StartEgressRequest_Web: pulseClients = pulseClientHold m.webRequests.Inc() countedAsWeb = true if r.Web.AudioOnly { cpuHold = m.cpuCostConfig.AudioWebCpuCost } else { cpuHold = m.cpuCostConfig.WebCpuCost } case *rpc.StartEgressRequest_Participant: cpuHold = m.cpuCostConfig.ParticipantCpuCost case *rpc.StartEgressRequest_TrackComposite: cpuHold = m.cpuCostConfig.TrackCompositeCpuCost case *rpc.StartEgressRequest_Track: cpuHold = m.cpuCostConfig.TrackCpuCost case *rpc.StartEgressRequest_Replay: replayReq := r.Replay switch source := replayReq.Source.(type) { case *livekit.ExportReplayRequest_Template: useSDK := config.ShouldUseSDKSource(source.Template) if !useSDK { m.webRequests.Inc() countedAsWeb = true pulseClients = pulseClientHold } if source.Template.AudioOnly { cpuHold = m.cpuCostConfig.AudioRoomCompositeCpuCost } else { cpuHold = m.cpuCostConfig.RoomCompositeCpuCost } case *livekit.ExportReplayRequest_Web: pulseClients = pulseClientHold m.webRequests.Inc() countedAsWeb = true if source.Web.AudioOnly { cpuHold = m.cpuCostConfig.AudioWebCpuCost } else { cpuHold = m.cpuCostConfig.WebCpuCost } case *livekit.ExportReplayRequest_Media: cpuHold = m.cpuCostConfig.ParticipantCpuCost } } ps := &processStats{ egressID: req.EgressId, pendingCPU: cpuHold, allowedCPU: cpuHold, countedAsWeb: countedAsWeb, } m.pendingMemoryUsage.Add(m.cpuCostConfig.MemoryCost) m.pendingPulseClients.Add(pulseClients) time.AfterFunc(cpuHoldDuration, func() { ps.pendingCPU = 0 m.pendingMemoryUsage.Add(-m.cpuCostConfig.MemoryCost) m.pendingPulseClients.Add(-pulseClients) }) m.pending[req.EgressId] = ps return nil } func (m *Monitor) UpdatePID(egressID string, pid int) { m.mu.Lock() defer m.mu.Unlock() ps := m.pending[egressID] delete(m.pending, egressID) if ps == nil { logger.Warnw("missing pending procStats", nil, "egressID", egressID) ps = &processStats{ egressID: egressID, allowedCPU: m.cpuCostConfig.WebCpuCost, } } if existing := m.procStats[pid]; existing != nil { ps.maxCPU = existing.maxCPU ps.totalCPU = existing.totalCPU ps.cpuCounter = existing.cpuCounter ps.countedAsWeb = existing.countedAsWeb } m.procStats[pid] = ps } func (m *Monitor) EgressStarted(req *rpc.StartEgressRequest) { switch req.Request.(type) { case *rpc.StartEgressRequest_RoomComposite: m.requestGauge.With(prometheus.Labels{"type": types.RequestTypeRoomComposite}).Add(1) case *rpc.StartEgressRequest_Web: m.requestGauge.With(prometheus.Labels{"type": types.RequestTypeWeb}).Add(1) case *rpc.StartEgressRequest_Participant: m.requestGauge.With(prometheus.Labels{"type": types.RequestTypeParticipant}).Add(1) case *rpc.StartEgressRequest_TrackComposite: m.requestGauge.With(prometheus.Labels{"type": types.RequestTypeTrackComposite}).Add(1) case *rpc.StartEgressRequest_Track: m.requestGauge.With(prometheus.Labels{"type": types.RequestTypeTrack}).Add(1) case *rpc.StartEgressRequest_Replay: replayReq := req.Request.(*rpc.StartEgressRequest_Replay).Replay switch replayReq.Source.(type) { case *livekit.ExportReplayRequest_Template: m.requestGauge.With(prometheus.Labels{"type": types.RequestTypeTemplate}).Add(1) case *livekit.ExportReplayRequest_Web: m.requestGauge.With(prometheus.Labels{"type": types.RequestTypeWeb}).Add(1) case *livekit.ExportReplayRequest_Media: m.requestGauge.With(prometheus.Labels{"type": types.RequestTypeMedia}).Add(1) } } } func (m *Monitor) EgressAborted(req *rpc.StartEgressRequest) { m.mu.Lock() defer m.mu.Unlock() ps := m.pending[req.EgressId] delete(m.pending, req.EgressId) m.requests.Dec() switch req.Request.(type) { case *rpc.StartEgressRequest_RoomComposite, *rpc.StartEgressRequest_Web, *rpc.StartEgressRequest_Replay: if ps != nil && ps.countedAsWeb { m.webRequests.Dec() } } } func (m *Monitor) EgressEnded(req *rpc.StartEgressRequest) (float64, float64, int) { m.mu.Lock() defer m.mu.Unlock() var countedAsWeb bool if ps := m.pending[req.EgressId]; ps != nil { countedAsWeb = ps.countedAsWeb } else { for _, s := range m.procStats { if s.egressID == req.EgressId { countedAsWeb = s.countedAsWeb break } } } switch req.Request.(type) { case *rpc.StartEgressRequest_RoomComposite: m.requestGauge.With(prometheus.Labels{"type": types.RequestTypeRoomComposite}).Sub(1) if countedAsWeb { m.webRequests.Dec() } case *rpc.StartEgressRequest_Web: m.requestGauge.With(prometheus.Labels{"type": types.RequestTypeWeb}).Sub(1) m.webRequests.Dec() case *rpc.StartEgressRequest_Participant: m.requestGauge.With(prometheus.Labels{"type": types.RequestTypeParticipant}).Sub(1) case *rpc.StartEgressRequest_TrackComposite: m.requestGauge.With(prometheus.Labels{"type": types.RequestTypeTrackComposite}).Sub(1) case *rpc.StartEgressRequest_Track: m.requestGauge.With(prometheus.Labels{"type": types.RequestTypeTrack}).Sub(1) case *rpc.StartEgressRequest_Replay: replayReq := req.Request.(*rpc.StartEgressRequest_Replay).Replay switch replayReq.Source.(type) { case *livekit.ExportReplayRequest_Template: m.requestGauge.With(prometheus.Labels{"type": types.RequestTypeTemplate}).Sub(1) if countedAsWeb { m.webRequests.Dec() } case *livekit.ExportReplayRequest_Web: m.requestGauge.With(prometheus.Labels{"type": types.RequestTypeWeb}).Sub(1) m.webRequests.Dec() case *livekit.ExportReplayRequest_Media: m.requestGauge.With(prometheus.Labels{"type": types.RequestTypeMedia}).Sub(1) } } delete(m.pending, req.EgressId) m.requests.Dec() for pid, ps := range m.procStats { if ps.egressID == req.EgressId { delete(m.procStats, pid) return ps.totalCPU / float64(ps.cpuCounter), ps.maxCPU, ps.maxMemory } } return 0, 0, 0 } func (m *Monitor) GetAvailableCPU() float64 { m.mu.Lock() defer m.mu.Unlock() _, available, _, _ := m.getCPUUsageLocked() return available } func (m *Monitor) getCPUUsageLocked() (total, available, pending, used float64) { total = m.cpuStats.NumCPU() if m.requests.Load() == 0 { // if no requests, use total available = total return } for _, ps := range m.pending { if ps.pendingCPU > ps.lastCPU { pending += ps.pendingCPU } else { pending += ps.lastCPU } } for _, ps := range m.procStats { if ps.pendingCPU > ps.lastCPU { used += ps.pendingCPU } else { used += ps.lastCPU } } // if already running requests, cap usage at MaxCpuUtilization available = total*m.cpuCostConfig.MaxCpuUtilization - pending - used return } func (m *Monitor) GetAvailableMemory() float64 { m.mu.Lock() defer m.mu.Unlock() if m.cpuCostConfig.MaxMemory == 0 { return float64(memory.FreeMemory()) / gb } return m.cpuCostConfig.MaxMemory - m.memoryUsage } func (m *Monitor) updateEgressStats(stats *hwstats.ProcStats) { load := 1 - stats.CpuIdle/m.cpuStats.NumCPU() m.promCPULoad.Set(load) m.mu.Lock() defer m.mu.Unlock() maxCPU := 0.0 var maxCPUEgress string for pid, cpuUsage := range stats.Cpu { procStats := m.procStats[pid] if procStats == nil { continue } procStats.lastCPU = cpuUsage procStats.totalCPU += cpuUsage procStats.cpuCounter++ if cpuUsage > procStats.maxCPU { procStats.maxCPU = cpuUsage } if cpuUsage > procStats.allowedCPU && cpuUsage > maxCPU { maxCPU = cpuUsage maxCPUEgress = procStats.egressID } } cpuKillThreshold := defaultKillThreshold if cpuKillThreshold <= m.cpuCostConfig.MaxCpuUtilization { cpuKillThreshold = (1 + m.cpuCostConfig.MaxCpuUtilization) / 2 } if load > cpuKillThreshold { logger.Warnw("high cpu usage", nil, "cpu", load, "requests", m.requests.Load(), ) if m.requests.Load() > 1 { m.highCPUDuration++ if m.highCPUDuration >= minKillDuration { m.svc.KillProcess(maxCPUEgress, errors.ErrCPUExhausted(maxCPU)) m.highCPUDuration = 0 } } } totalMemory := 0 maxMemory := 0 var maxMemoryEgress string var maxMemoryGroup *hwstats.GroupMemory for pid, gm := range stats.Memory { totalMemory += gm.Total procStats := m.procStats[pid] if procStats == nil { continue } if gm.Total > procStats.maxMemory { procStats.maxMemory = gm.Total } if gm.Total > maxMemory { maxMemory = gm.Total maxMemoryEgress = procStats.egressID maxMemoryGroup = gm } } m.memoryUsage = float64(totalMemory) / gb m.promProcRSS.Set(float64(totalMemory)) m.maybeLogMemoryUsage(stats.Memory) m.updateCgroupStats() m.updateWouldRejectMetrics() m.checkMemoryKill(maxMemoryEgress, maxMemoryGroup) } // maybeLogMemoryUsage periodically logs per-group process RSS to aid memory leak diagnosis. func (m *Monitor) maybeLogMemoryUsage(memory map[int]*hwstats.GroupMemory) { now := time.Now() if now.Sub(m.lastMemoryDump) < memoryUsageDumpInterval { return } m.lastMemoryDump = now for groupPID, gm := range memory { egressID := "" if ps := m.procStats[groupPID]; ps != nil { egressID = ps.egressID } logger.Infow("current memory usage", "egressID", egressID, "groupPID", groupPID, "totalRSSBytes", gm.Total, "processes", gm.Procs, ) } } // updateCgroupStats reads cgroup memory statistics and updates metrics. func (m *Monitor) updateCgroupStats() { if m.cgroupMemStats == nil { m.cgroupOK = false m.promCgroupReadSuccess.Set(0) return } usageBytes, _, err := m.cgroupMemStats.GetMemory() if err != nil { m.cgroupOK = false m.promCgroupReadSuccess.Set(0) // Throttle error logging (CompareAndSwap ensures we log only once) if m.cgroupErrorLogged.CompareAndSwap(false, true) { logger.Warnw("failed to read cgroup memory stats, falling back to proc_rss", err) } return } m.cgroupOK = true m.cgroupErrorLogged.Store(false) m.cgroupUsageBytes = usageBytes m.promCgroupReadSuccess.Set(1) m.promCgroupMemory.Set(float64(usageBytes)) } // updateWouldRejectMetrics computes what admission would do with alternative memory sources. func (m *Monitor) updateWouldRejectMetrics() { if !m.cgroupOK || m.cpuCostConfig.MaxMemory == 0 { return } pendingMem := m.pendingMemoryUsage.Load() headroom := memoryHeadroomGB maxMem := m.cpuCostConfig.MaxMemory // Would reject with cgroup? cgroupUsageGB := float64(m.cgroupUsageBytes) / gb if cgroupUsageGB+pendingMem+m.cpuCostConfig.MemoryCost+headroom >= maxMem { m.promWouldRejectCgroup.Set(1) } else { m.promWouldRejectCgroup.Set(0) } } // checkMemoryKill evaluates whether to kill a process based on memory usage. func (m *Monitor) checkMemoryKill(maxMemoryEgress string, maxMemoryGroup *hwstats.GroupMemory) { if m.cpuCostConfig.MaxMemory == 0 { return } maxMemoryBytes := uint64(m.cpuCostConfig.MaxMemory * gb) killTriggerBytes := uint64(m.memoryUsage * gb) switch m.cpuCostConfig.MemorySource { case config.MemorySourceCgroup: if m.cgroupOK { killTriggerBytes = m.cgroupUsageBytes } default: // proc_rss } if killTriggerBytes > maxMemoryBytes { // Apply grace period if configured. if m.highMemoryStart.IsZero() { m.highMemoryStart = time.Now() } if time.Since(m.highMemoryStart) >= time.Duration(m.cpuCostConfig.MemoryKillGraceSec)*time.Second { killTriggerGB := float64(killTriggerBytes) / gb logger.Warnw("high memory usage", nil, "source", m.cpuCostConfig.MemorySource, "memoryGB", killTriggerGB, "maxMemoryGB", m.cpuCostConfig.MaxMemory, "requests", m.requests.Load(), ) if maxMemoryGroup != nil { logger.Infow("killing egress process memory", "egressID", maxMemoryEgress, "processes", maxMemoryGroup.Procs) } // Report the actual memory that triggered the kill, not per-process max m.svc.KillProcess(maxMemoryEgress, errors.ErrOOM(killTriggerGB)) m.highMemoryStart = time.Time{} } } else { m.highMemoryStart = time.Time{} } } ================================================ FILE: pkg/stats/monitor_memory_test.go ================================================ // Copyright 2026 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stats import ( "testing" "github.com/stretchr/testify/require" "github.com/livekit/egress/pkg/config" ) func TestCheckMemoryAdmissionLocked_Legacy(t *testing.T) { m := &Monitor{ cpuCostConfig: &config.CPUCostConfig{ MaxMemory: 10, // 10 GB MemoryCost: 1, // 1 GB per request MemorySource: config.MemorySourceProcRSS, }, memoryUsage: 5, // 5 GB current usage } // 5 + 0 (pending) + 1 (cost) + 1 (headroom) = 7 < 10, should accept reject, _ := m.checkMemoryAdmissionLocked() require.False(t, reject) // Increase usage to trigger rejection m.memoryUsage = 8 // 8 + 0 + 1 + 1 = 10 >= 10, should reject reject, reason := m.checkMemoryAdmissionLocked() require.True(t, reject) require.Equal(t, "memory", reason) } func TestCheckMemoryAdmissionLocked_CgroupWorkingSet(t *testing.T) { m := &Monitor{ cpuCostConfig: &config.CPUCostConfig{ MaxMemory: 10, MemoryCost: 1, MemorySource: config.MemorySourceCgroup, }, cgroupUsageBytes: 5 * gb, cgroupOK: true, } // Working set is 5 GB, should accept reject, _ := m.checkMemoryAdmissionLocked() require.False(t, reject) // Increase working set to trigger rejection m.cgroupUsageBytes = 8 * gb reject, reason := m.checkMemoryAdmissionLocked() require.True(t, reject) require.Equal(t, "memory_cgroup", reason) } func TestCheckMemoryAdmissionLocked_FallbackToProcRSS(t *testing.T) { m := &Monitor{ cpuCostConfig: &config.CPUCostConfig{ MaxMemory: 10, MemoryCost: 1, MemorySource: config.MemorySourceCgroup, }, memoryUsage: 5, cgroupOK: false, // cgroup not available } // Should fall back to proc_rss reject, _ := m.checkMemoryAdmissionLocked() require.False(t, reject) // 5 + 0 + 1 + 1 = 7 < 10 m.memoryUsage = 8 reject, reason := m.checkMemoryAdmissionLocked() require.True(t, reject) require.Equal(t, "memory", reason) // proc_rss reason } func TestCheckMemoryAdmissionLocked_NoMaxMemory(t *testing.T) { m := &Monitor{ cpuCostConfig: &config.CPUCostConfig{ MaxMemory: 0, // disabled MemorySource: config.MemorySourceCgroup, }, memoryUsage: 100, } // Should not reject when MaxMemory is 0 reject, _ := m.checkMemoryAdmissionLocked() require.False(t, reject) } func TestCheckMemoryAdmissionLocked_WithPendingMemory(t *testing.T) { m := &Monitor{ cpuCostConfig: &config.CPUCostConfig{ MaxMemory: 10, MemoryCost: 1, MemorySource: config.MemorySourceProcRSS, }, memoryUsage: 5, } // Add pending memory m.pendingMemoryUsage.Store(2) // 5 + 2 + 1 + 1 = 9 < 10 reject, _ := m.checkMemoryAdmissionLocked() require.False(t, reject) m.pendingMemoryUsage.Store(3) // 5 + 3 + 1 + 1 = 10 >= 10 reject, reason := m.checkMemoryAdmissionLocked() require.True(t, reject) require.Equal(t, "memory", reason) } func TestCheckProcRSSMemoryAdmission(t *testing.T) { m := &Monitor{ memoryUsage: 5, } // Various scenarios reject, _ := m.checkProcRSSMemoryAdmission(0, 1, 1, 10) require.False(t, reject) // 5 + 0 + 1 + 1 = 7 < 10 reject, _ = m.checkProcRSSMemoryAdmission(2, 1, 1, 10) require.False(t, reject) // 5 + 2 + 1 + 1 = 9 < 10 reject, reason := m.checkProcRSSMemoryAdmission(3, 1, 1, 10) require.True(t, reject) // 5 + 3 + 1 + 1 = 10 >= 10 require.Equal(t, "memory", reason) } ================================================ FILE: pkg/stats/monitor_prom.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stats import ( "github.com/prometheus/client_golang/prometheus" "github.com/livekit/protocol/livekit" "github.com/livekit/protocol/rpc" ) func (m *Monitor) initPrometheus() { promNodeAvailable := prometheus.NewGaugeFunc(prometheus.GaugeOpts{ Namespace: "livekit", Subsystem: "egress", Name: "available", ConstLabels: prometheus.Labels{"node_id": m.nodeID, "cluster_id": m.clusterID}, }, m.promIsIdle) promCanAcceptRequest := prometheus.NewGaugeFunc(prometheus.GaugeOpts{ Namespace: "livekit", Subsystem: "egress", Name: "can_accept_request", ConstLabels: prometheus.Labels{"node_id": m.nodeID, "cluster_id": m.clusterID}, }, m.promCanAcceptRequest) promIsDisabled := prometheus.NewGaugeFunc(prometheus.GaugeOpts{ Namespace: "livekit", Subsystem: "egress", Name: "is_disabled", ConstLabels: prometheus.Labels{"node_id": m.nodeID, "cluster_id": m.clusterID}, }, m.promIsDisabled) promIsTerminating := prometheus.NewGaugeFunc(prometheus.GaugeOpts{ Namespace: "livekit", Subsystem: "egress", Name: "is_terminating", ConstLabels: prometheus.Labels{"node_id": m.nodeID, "cluster_id": m.clusterID}, }, m.promIsTerminating) m.promCPULoad = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: "livekit", Subsystem: "node", Name: "cpu_load", ConstLabels: prometheus.Labels{"node_id": m.nodeID, "node_type": "EGRESS", "cluster_id": m.clusterID}, }) m.requestGauge = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "livekit", Subsystem: "egress", Name: "requests", ConstLabels: prometheus.Labels{"node_id": m.nodeID, "cluster_id": m.clusterID}, }, []string{"type"}) // Cgroup memory metrics m.promCgroupMemory = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: "livekit", Subsystem: "egress", Name: "cgroup_memory_bytes", Help: "Cgroup memory usage in bytes", ConstLabels: prometheus.Labels{"node_id": m.nodeID, "cluster_id": m.clusterID}, }) m.promCgroupReadSuccess = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: "livekit", Subsystem: "egress", Name: "cgroup_read_success", Help: "Whether cgroup memory read succeeded (1) or failed (0)", ConstLabels: prometheus.Labels{"node_id": m.nodeID, "cluster_id": m.clusterID}, }) m.promProcRSS = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: "livekit", Subsystem: "egress", Name: "proc_rss_bytes", Help: "Per-process RSS sum in bytes", ConstLabels: prometheus.Labels{"node_id": m.nodeID, "cluster_id": m.clusterID}, }) m.promWouldRejectCgroup = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: "livekit", Subsystem: "egress", Name: "would_reject_cgroup", Help: "Whether request would be rejected using cgroup mode (1) or not (0)", ConstLabels: prometheus.Labels{"node_id": m.nodeID, "cluster_id": m.clusterID}, }) prometheus.MustRegister( promNodeAvailable, promCanAcceptRequest, promIsDisabled, promIsTerminating, m.promCPULoad, m.requestGauge, m.promCgroupMemory, m.promCgroupReadSuccess, m.promProcRSS, m.promWouldRejectCgroup, ) } func (m *Monitor) promIsIdle() float64 { if m.svc.IsIdle() { return 1 } return 0 } func (m *Monitor) promCanAcceptRequest() float64 { m.mu.Lock() _, canAccept := m.canAcceptRequestLocked(&rpc.StartEgressRequest{ Request: &rpc.StartEgressRequest_Web{Web: &livekit.WebEgressRequest{}}, }) m.mu.Unlock() if !m.svc.IsDisabled() && canAccept { return 1 } return 0 } func (m *Monitor) promIsDisabled() float64 { if m.svc.IsDisabled() { return 1 } return 0 } func (m *Monitor) promIsTerminating() float64 { if m.svc.IsTerminating() { return 1 } return 0 } ================================================ FILE: pkg/types/types.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types type RequestType string type SourceType string type EgressType string type MimeType string type Profile string type OutputType string type FileExtension string const ( // request types RequestTypeTemplate = "template" RequestTypeWeb = "web" RequestTypeMedia = "media" RequestTypeRoomComposite = "room_composite" RequestTypeParticipant = "participant" RequestTypeTrackComposite = "track_composite" RequestTypeTrack = "track" // source types SourceTypeWeb SourceType = "web" SourceTypeSDK SourceType = "sdk" // egress types EgressTypeStream EgressType = "stream" EgressTypeWebsocket EgressType = "websocket" EgressTypeFile EgressType = "file" EgressTypeSegments EgressType = "segments" EgressTypeImages EgressType = "images" // input types MimeTypeAAC MimeType = "audio/aac" MimeTypeOpus MimeType = "audio/opus" MimeTypeRawAudio MimeType = "audio/x-raw" MimeTypeH264 MimeType = "video/h264" MimeTypeVP8 MimeType = "video/vp8" MimeTypeVP9 MimeType = "video/vp9" MimeTypeJPEG MimeType = "image/jpeg" MimeTypeRawVideo MimeType = "video/x-raw" MimeTypeMP3 MimeType = "audio/mpeg" MimeTypePCMU MimeType = "audio/pcmu" MimeTypePCMA MimeType = "audio/pcma" // video profiles ProfileBaseline Profile = "baseline" ProfileMain Profile = "main" ProfileHigh Profile = "high" // output types OutputTypeUnknownFile OutputType = "" OutputTypeRaw OutputType = "audio/x-raw" OutputTypeOGG OutputType = "audio/ogg" OutputTypeMP3 OutputType = "audio/mpeg" OutputTypeIVF OutputType = "video/x-ivf" OutputTypeMP4 OutputType = "video/mp4" OutputTypeTS OutputType = "video/mp2t" OutputTypeWebM OutputType = "video/webm" OutputTypeJPEG OutputType = "image/jpeg" OutputTypeRTMP OutputType = "rtmp" OutputTypeSRT OutputType = "srt" OutputTypeHLS OutputType = "application/x-mpegurl" OutputTypeJSON OutputType = "application/json" OutputTypeBlob OutputType = "application/octet-stream" // file extensions FileExtensionRaw = ".raw" FileExtensionOGG = ".ogg" FileExtensionMP3 = ".mp3" FileExtensionIVF = ".ivf" FileExtensionMP4 = ".mp4" FileExtensionTS = ".ts" FileExtensionWebM = ".webm" FileExtensionM3U8 = ".m3u8" FileExtensionJPEG = ".jpeg" ) var ( DefaultAudioCodecs = map[OutputType]MimeType{ OutputTypeRaw: MimeTypeRawAudio, OutputTypeOGG: MimeTypeOpus, OutputTypeMP3: MimeTypeMP3, OutputTypeMP4: MimeTypeAAC, OutputTypeTS: MimeTypeAAC, OutputTypeWebM: MimeTypeOpus, OutputTypeRTMP: MimeTypeAAC, OutputTypeSRT: MimeTypeAAC, OutputTypeHLS: MimeTypeAAC, } DefaultVideoCodecs = map[OutputType]MimeType{ OutputTypeIVF: MimeTypeVP8, OutputTypeMP4: MimeTypeH264, OutputTypeTS: MimeTypeH264, OutputTypeWebM: MimeTypeVP8, OutputTypeRTMP: MimeTypeH264, OutputTypeSRT: MimeTypeH264, OutputTypeHLS: MimeTypeH264, } FileExtensions = map[FileExtension]struct{}{ FileExtensionRaw: {}, FileExtensionOGG: {}, FileExtensionMP3: {}, FileExtensionIVF: {}, FileExtensionMP4: {}, FileExtensionTS: {}, FileExtensionWebM: {}, FileExtensionM3U8: {}, FileExtensionJPEG: {}, } FileExtensionForOutputType = map[OutputType]FileExtension{ OutputTypeRaw: FileExtensionRaw, OutputTypeOGG: FileExtensionOGG, OutputTypeMP3: FileExtensionMP3, OutputTypeIVF: FileExtensionIVF, OutputTypeMP4: FileExtensionMP4, OutputTypeTS: FileExtensionTS, OutputTypeWebM: FileExtensionWebM, OutputTypeHLS: FileExtensionM3U8, OutputTypeJPEG: FileExtensionJPEG, } CodecCompatibility = map[OutputType]map[MimeType]bool{ OutputTypeRaw: { MimeTypeRawAudio: true, }, OutputTypeOGG: { MimeTypeOpus: true, }, OutputTypeIVF: { MimeTypeVP8: true, MimeTypeVP9: true, }, OutputTypeMP4: { MimeTypeAAC: true, MimeTypeOpus: true, MimeTypeH264: true, }, OutputTypeTS: { MimeTypeAAC: true, MimeTypeOpus: true, MimeTypeH264: true, }, OutputTypeWebM: { MimeTypeOpus: true, MimeTypeVP8: true, MimeTypeVP9: true, }, OutputTypeRTMP: { MimeTypeAAC: true, MimeTypeH264: true, }, OutputTypeSRT: { MimeTypeAAC: true, MimeTypeH264: true, }, OutputTypeHLS: { MimeTypeAAC: true, MimeTypeH264: true, }, OutputTypeMP3: { MimeTypeMP3: true, MimeTypeOpus: true, MimeTypeAAC: true, MimeTypeRawAudio: true, }, OutputTypeUnknownFile: { MimeTypeAAC: true, MimeTypeOpus: true, MimeTypeMP3: true, MimeTypeH264: true, MimeTypeVP8: true, MimeTypeVP9: true, }, } AllOutputAudioCodecs = map[MimeType]bool{ MimeTypeAAC: true, MimeTypeOpus: true, MimeTypeRawAudio: true, MimeTypeMP3: true, } AllOutputVideoCodecs = map[MimeType]bool{ MimeTypeH264: true, } AudioOnlyFileOutputTypes = []OutputType{ OutputTypeOGG, OutputTypeMP4, OutputTypeMP3, } VideoOnlyFileOutputTypes = []OutputType{ OutputTypeMP4, } AudioVideoFileOutputTypes = []OutputType{ OutputTypeMP4, } TrackOutputTypes = map[MimeType]OutputType{ MimeTypeOpus: OutputTypeOGG, MimeTypePCMU: OutputTypeOGG, MimeTypePCMA: OutputTypeOGG, MimeTypeH264: OutputTypeMP4, MimeTypeVP8: OutputTypeWebM, MimeTypeVP9: OutputTypeWebM, } StreamOutputTypes = map[string]OutputType{ "rtmp": OutputTypeRTMP, "rtmps": OutputTypeRTMP, "mux": OutputTypeRTMP, "twitch": OutputTypeRTMP, "srt": OutputTypeSRT, "ws": OutputTypeRaw, "wss": OutputTypeRaw, } ) func GetOutputTypeCompatibleWithCodecs(types []OutputType, audioCodecs map[MimeType]bool, videoCodecs map[MimeType]bool) OutputType { for _, t := range types { if audioCodecs != nil && !IsOutputTypeCompatibleWithCodecs(t, audioCodecs) { continue } if videoCodecs != nil && !IsOutputTypeCompatibleWithCodecs(t, videoCodecs) { continue } return t } return OutputTypeUnknownFile } func IsOutputTypeCompatibleWithCodecs(ot OutputType, codecs map[MimeType]bool) bool { for k := range codecs { if CodecCompatibility[ot][k] { return true } } return false } func GetMapIntersection[K comparable](mapA map[K]bool, mapB map[K]bool) map[K]bool { res := make(map[K]bool) for k := range mapA { if mapB[k] { res[k] = true } } return res } ================================================ FILE: pkg/types/types_test.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import ( "testing" "github.com/stretchr/testify/require" ) func TestGetMapIntersection(t *testing.T) { list := make(map[MimeType]bool) res := GetMapIntersection(list, CodecCompatibility[OutputTypeUnknownFile]) require.Empty(t, res) list[MimeTypeH264] = true res = GetMapIntersection(list, CodecCompatibility[OutputTypeOGG]) require.Empty(t, res) list[MimeTypeVP8] = true res = GetMapIntersection(list, CodecCompatibility[OutputTypeMP4]) require.Equal(t, map[MimeType]bool{MimeTypeH264: true}, res) } func TestGetOutputTypesCompatibleWithCodecs(t *testing.T) { outputTypes := make([]OutputType, 0) audioCodecs := make(map[MimeType]bool) videoCodecs := make(map[MimeType]bool) res := GetOutputTypeCompatibleWithCodecs(outputTypes, audioCodecs, videoCodecs) require.Empty(t, res) outputTypes = append(outputTypes, OutputTypeOGG, OutputTypeMP4) res = GetOutputTypeCompatibleWithCodecs(outputTypes, audioCodecs, videoCodecs) require.Empty(t, res) audioCodecs[MimeTypeAAC] = true outputTypes = append(outputTypes, OutputTypeMP4) res = GetOutputTypeCompatibleWithCodecs(outputTypes, audioCodecs, videoCodecs) require.Empty(t, res) videoCodecs[MimeTypeVP8] = true outputTypes = append(outputTypes, OutputTypeMP4) res = GetOutputTypeCompatibleWithCodecs(outputTypes, audioCodecs, videoCodecs) require.Empty(t, res) videoCodecs[MimeTypeH264] = true outputTypes = append(outputTypes, OutputTypeMP4) res = GetOutputTypeCompatibleWithCodecs(outputTypes, audioCodecs, videoCodecs) require.Equal(t, OutputTypeMP4, res) } ================================================ FILE: renovate.json ================================================ { "$schema": "https://docs.renovatebot.com/renovate-schema.json", "extends": [ "config:base" ], "commitBody": "Generated by renovateBot", "packageRules": [ { "matchManagers": ["github-actions"], "groupName": "github workflows" }, { "matchManagers": ["dockerfile"], "groupName": "docker deps" }, { "matchManagers": ["npm"], "groupName": "npm deps" }, { "matchManagers": ["gomod"], "groupName": "go deps" }, { "matchPackagePrefixes": ["github.com/grafov/m3u8"], "enabled": false } ], "postUpdateOptions": [ "gomodTidy" ], "schedule": ["on sunday"], "updateNotScheduled": false } ================================================ FILE: template-default/.gitignore ================================================ # Logs logs *.log npm-debug.log* yarn-debug.log* yarn-error.log* pnpm-debug.log* lerna-debug.log* node_modules build dist-ssr *.local # Editor directories and files .vscode/* !.vscode/extensions.json .idea .DS_Store *.suo *.ntvs* *.njsproj *.sln *.sw? ================================================ FILE: template-default/.prettierrc ================================================ { "singleQuote": true, "trailingComma": "all", "semi": true, "tabWidth": 2, "printWidth": 100, "plugins": [], "pluginSearchDirs": ["."] } ================================================ FILE: template-default/README.md ================================================ # Default LiveKit Recording Templates This repo contains the default recording template used with LiveKit Egress. The templates are deployed alongside and served by the egress service. See docs [here](https://docs.livekit.io/guides/egress/room-composite/#default-layouts) ================================================ FILE: template-default/eslint.config.js ================================================ import js from '@eslint/js' import globals from 'globals' import reactHooks from 'eslint-plugin-react-hooks' import reactRefresh from 'eslint-plugin-react-refresh' import tseslint from 'typescript-eslint' import { globalIgnores } from 'eslint/config' export default tseslint.config([ globalIgnores(['build']), { files: ['**/*.{ts,tsx}'], extends: [ js.configs.recommended, tseslint.configs.recommended, reactHooks.configs['recommended-latest'], reactRefresh.configs.vite, ], languageOptions: { ecmaVersion: 2020, globals: globals.browser, }, }, ]) ================================================ FILE: template-default/index.html ================================================ LiveKit Egress
================================================ FILE: template-default/package.json ================================================ { "name": "livekit-egress-web", "homepage": "https://livekit.io", "description": "Default templates for RoomComposite egress", "version": "0.2.1", "private": true, "dependencies": { "@livekit/components-core": "^0.11.11", "@livekit/components-react": "^2.9.14", "@livekit/components-styles": "^1.1.6", "@livekit/egress-sdk": "^0.2.1", "livekit-client": "^2.15.6", "react": "^19.1.1", "react-dom": "^19.1.1" }, "scripts": { "dev": "vite", "build": "vite build", "lint": "eslint .", "preview": "vite preview" }, "devDependencies": { "@eslint/js": "^9.33.0", "@types/react": "^19.1.10", "@types/react-dom": "^19.1.7", "@vitejs/plugin-react": "^5.0.0", "eslint": "^9.39.1", "eslint-plugin-react-hooks": "^5.2.0", "eslint-plugin-react-refresh": "^0.4.20", "globals": "^16.3.0", "typescript": "~5.8.3", "typescript-eslint": "^8.39.1", "vite": "^7.2.2" } } ================================================ FILE: template-default/public/manifest.json ================================================ { "short_name": "livekit-egress-web", "name": "Web template for LiveKit Egress", "icons": [ { "src": "favicon.ico", "sizes": "64x64 32x32 24x24 16x16", "type": "image/x-icon" }, { "src": "logo.png", "type": "image/png", "sizes": "150x150" } ], "start_url": ".", "display": "standalone", "theme_color": "#000000", "background_color": "#ffffff" } ================================================ FILE: template-default/public/robots.txt ================================================ # https://www.robotstxt.org/robotstxt.html User-agent: * Disallow: ================================================ FILE: template-default/src/App.css ================================================ /** * Copyright 2023 LiveKit, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ body { padding: 0; font-family: Avenir, -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue', sans-serif; -webkit-font-smoothing: antialiased; -moz-osx-font-smoothing: grayscale; background: black; color: rgb(211, 210, 210); box-sizing: border-box; margin: 0; height: 100vh; font-size: 12px; overflow: hidden; } .light { background: white; } .roomContainer { height: 100vh; } .error { color: red; } .lk-grid-layout-wrapper { height: 100%; } .lk-focus-layout { height: 100%; } /* things like name, connection quality, etc make less sense in a recording, hide for now */ .lk-participant-metadata { display: none; } ================================================ FILE: template-default/src/App.tsx ================================================ /** * Copyright 2023 LiveKit, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import '@livekit/components-styles'; import '@livekit/components-styles/prefabs'; import EgressHelper from '@livekit/egress-sdk'; import './App.css'; import RoomPage from './Room'; function App() { return (
); } export default App; ================================================ FILE: template-default/src/Room.tsx ================================================ /** * Copyright 2023 LiveKit, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { GridLayout, LiveKitRoom, ParticipantTile, RoomAudioRenderer, useRoomContext, useTracks, } from '@livekit/components-react'; import EgressHelper from '@livekit/egress-sdk'; import { ConnectionState, Track } from 'livekit-client'; import { ReactElement, useEffect, useState } from 'react'; import SingleSpeakerLayout from './SingleSpeakerLayout'; import SpeakerLayout from './SpeakerLayout'; const FRAME_DECODE_TIMEOUT = 5000; interface RoomPageProps { url: string; token: string; layout: string; } export default function RoomPage({ url, token, layout }: RoomPageProps) { const [error, setError] = useState(); if (!url || !token) { return
missing required params url and token
; } return ( {error ?
{error.message}
: }
); } interface CompositeTemplateProps { layout: string; } function CompositeTemplate({ layout: initialLayout }: CompositeTemplateProps) { const room = useRoomContext(); const [layout] = useState(initialLayout); const [hasScreenShare, setHasScreenShare] = useState(false); const screenshareTracks = useTracks([Track.Source.ScreenShare], { onlySubscribed: true, }); EgressHelper.setRoom(room); useEffect(() => { // determines when to start recording // the algorithm used is: // * if there are video tracks published, wait for frames to be decoded // * if there are no video tracks published, start immediately // * if it's been more than 10s, record as long as there are tracks subscribed const startTime = Date.now(); const interval = setInterval(async () => { let shouldStartRecording = false; let hasVideoTracks = false; let hasSubscribedTracks = false; let hasDecodedFrames = false; for (const p of Array.from(room.remoteParticipants.values())) { for (const pub of Array.from(p.trackPublications.values())) { if (pub.isSubscribed) { hasSubscribedTracks = true; } if (pub.kind === Track.Kind.Video) { hasVideoTracks = true; if (pub.videoTrack) { const stats = await pub.videoTrack.getRTCStatsReport(); if (stats) { hasDecodedFrames = Array.from(stats).some( (item) => item[1].type === 'inbound-rtp' && item[1].framesDecoded > 0, ); } } } } } const timeDelta = Date.now() - startTime; if (hasDecodedFrames) { shouldStartRecording = true; } else if (!hasVideoTracks && hasSubscribedTracks && timeDelta > 500) { // adding a small timeout to ensure video tracks has a chance to be published shouldStartRecording = true; } else if (timeDelta > FRAME_DECODE_TIMEOUT && hasSubscribedTracks) { shouldStartRecording = true; } if (shouldStartRecording) { EgressHelper.startRecording(); clearInterval(interval); } }, 100); /* eslint-disable-next-line react-hooks/exhaustive-deps */ }, []); useEffect(() => { if (screenshareTracks.length > 0 && screenshareTracks[0].publication) { setHasScreenShare(true); } else { setHasScreenShare(false); } }, [screenshareTracks]); const allTracks = useTracks( [Track.Source.Camera, Track.Source.ScreenShare, Track.Source.Unknown], { onlySubscribed: true, }, ); const filteredTracks = allTracks.filter( (tr) => tr.publication.kind === Track.Kind.Video && tr.participant.identity !== room.localParticipant.identity, ); let interfaceStyle = 'dark'; if (layout.endsWith('-light')) { interfaceStyle = 'light'; } let containerClass = 'roomContainer'; if (interfaceStyle) { containerClass += ` ${interfaceStyle}`; } // determine layout to use let main: ReactElement = <>; let effectiveLayout = layout; if (hasScreenShare && layout.startsWith('grid')) { effectiveLayout = layout.replace('grid', 'speaker'); } if (room.state !== ConnectionState.Disconnected) { if (effectiveLayout.startsWith('speaker')) { main = ; } else if (effectiveLayout.startsWith('single-speaker')) { main = ; } else { main = ( ); } } return (
{main}
); } ================================================ FILE: template-default/src/SingleSpeakerLayout.tsx ================================================ /** * Copyright 2023 LiveKit, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { TrackReference, useVisualStableUpdate, VideoTrack } from '@livekit/components-react'; import { LayoutProps } from './common'; const SingleSpeakerLayout = ({ tracks: references }: LayoutProps) => { const sortedReferences = useVisualStableUpdate(references, 1); if (sortedReferences.length === 0) { return null; } return ; }; export default SingleSpeakerLayout; ================================================ FILE: template-default/src/SpeakerLayout.tsx ================================================ /** * Copyright 2023 LiveKit, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { TrackReference } from '@livekit/components-core'; import { CarouselLayout, FocusLayout, ParticipantTile, VideoTrack, useVisualStableUpdate, } from '@livekit/components-react'; import { LayoutProps } from './common'; const SpeakerLayout = ({ tracks: references }: LayoutProps) => { const sortedTracks = useVisualStableUpdate(references, 1); const mainTrack = sortedTracks.shift(); const remainingTracks = useVisualStableUpdate(sortedTracks, 3); if (!mainTrack) { return <>; } else if (remainingTracks.length === 0) { const trackRef = mainTrack as TrackReference; return ; } return (
); }; export default SpeakerLayout; ================================================ FILE: template-default/src/common.ts ================================================ /** * Copyright 2023 LiveKit, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { TrackReference } from '@livekit/components-core'; export interface LayoutProps { tracks: TrackReference[]; } ================================================ FILE: template-default/src/index.css ================================================ /** * Copyright 2023 LiveKit, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ body { margin: 0; font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue', sans-serif; -webkit-font-smoothing: antialiased; -moz-osx-font-smoothing: grayscale; } code { font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New', monospace; } ================================================ FILE: template-default/src/index.tsx ================================================ /** * Copyright 2023 LiveKit, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import React from 'react'; import { createRoot } from 'react-dom/client'; import App from './App'; const container = document.getElementById('root'); if (!container) throw new Error('Failed to find the root element'); const root = createRoot(container); root.render( , ); ================================================ FILE: template-default/src/vite-env.d.ts ================================================ /// ================================================ FILE: template-default/tsconfig.app.json ================================================ { "compilerOptions": { "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.app.tsbuildinfo", "target": "ES2022", "useDefineForClassFields": true, "lib": ["ES2022", "DOM", "DOM.Iterable"], "module": "ESNext", "skipLibCheck": true, /* Bundler mode */ "moduleResolution": "bundler", "allowImportingTsExtensions": true, "moduleDetection": "force", "noEmit": true, "jsx": "react-jsx", /* Linting */ "strict": true, "noUnusedLocals": true, "noUnusedParameters": true, "erasableSyntaxOnly": true, "noFallthroughCasesInSwitch": true, "noUncheckedSideEffectImports": true }, "include": ["src"] } ================================================ FILE: template-default/tsconfig.json ================================================ { "files": [], "references": [ { "path": "./tsconfig.app.json" }, { "path": "./tsconfig.node.json" } ] } ================================================ FILE: template-default/tsconfig.node.json ================================================ { "compilerOptions": { "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.node.tsbuildinfo", "target": "ES2023", "lib": ["ES2023"], "module": "ESNext", "skipLibCheck": true, /* Bundler mode */ "moduleResolution": "bundler", "allowImportingTsExtensions": true, "moduleDetection": "force", "noEmit": true, /* Linting */ "strict": true, "noUnusedLocals": true, "noUnusedParameters": true, "erasableSyntaxOnly": true, "noFallthroughCasesInSwitch": true, "noUncheckedSideEffectImports": true }, "include": ["vite.config.ts"] } ================================================ FILE: template-default/vite.config.ts ================================================ import { defineConfig } from 'vite' import react from '@vitejs/plugin-react' // https://vite.dev/config/ export default defineConfig({ plugins: [react()], build: { sourcemap: "hidden", outDir: "build" } }) ================================================ FILE: template-sdk/.gitignore ================================================ node_modules/ dist/ ================================================ FILE: template-sdk/.npmignore ================================================ .github node_modules tsconfig.json .prettierrc ================================================ FILE: template-sdk/.prettierrc ================================================ { "singleQuote": true, "trailingComma": "all", "semi": true, "tabWidth": 2, "printWidth": 100, "plugins": [], "pluginSearchDirs": ["."] } ================================================ FILE: template-sdk/README.md ================================================ # Egress Recording Template SDK This lightweight SDK makes it simple to build your own Room Composite templates. ## Docs See [custom egress template docs](https://docs.livekit.io/guides/egress/custom-template/) ================================================ FILE: template-sdk/package.json ================================================ { "name": "@livekit/egress-sdk", "version": "0.2.1", "description": "A lightweight SDK for developing RoomComposite templates", "main": "dist/index.js", "types": "dist/index.d.ts", "source": "src/index.ts", "repository": "https://github.com/livekit/egress", "author": "David Zhao ", "license": "Apache-2.0", "scripts": { "build": "tsc" }, "devDependencies": { "livekit-client": "^2.12.0", "prettier": "^2.8.8", "typescript": "^5.8.3" }, "peerDependencies": { "livekit-client": "^1.15.13 || ^2.7.5" } } ================================================ FILE: template-sdk/src/index.ts ================================================ /** * Copyright 2023 LiveKit, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { ParticipantEvent, Room, RoomEvent } from 'livekit-client'; const EgressHelper = { /** * RoomComposite will pass URL to your livekit's server instance. * @returns */ getLiveKitURL(): string { const url = getURLParam('url'); if (!url) { throw new Error('url is not found in query string'); } return url; }, /** * * @returns access token to pass to `Room.connect` */ getAccessToken(): string { const token = getURLParam('token'); if (!token) { throw new Error('token is not found in query string'); } return token; }, /** * the current desired layout. layout can be changed dynamically with [Egress.UpdateLayout](https://github.com/livekit/protocol/blob/main/livekit_egress.proto#L15) * @returns */ getLayout(): string { if (state.layout) { return state.layout; } const layout = getURLParam('layout'); return layout ?? ''; }, /** * Call when successfully connected to the room * @param room */ setRoom(room: Room) { if (currentRoom) { currentRoom.off(RoomEvent.Disconnected, EgressHelper.endRecording); } currentRoom = room; currentRoom.localParticipant.on(ParticipantEvent.ParticipantMetadataChanged, onMetadataChanged); currentRoom.on(RoomEvent.Disconnected, EgressHelper.endRecording); onMetadataChanged(); }, /** * Starts recording the room that's passed in */ startRecording() { console.log('START_RECORDING'); }, /** * Finishes recording the room, by default, it'll end automatically finish * when all other participants have left the room. */ endRecording() { currentRoom = undefined; console.log('END_RECORDING'); }, /** * Registers a callback to listen to layout changes. * @param f */ onLayoutChanged(f: (layout: string) => void) { layoutChangedCallback = f; }, }; let currentRoom: Room | undefined; let layoutChangedCallback: (layout: string) => void | undefined; let state: TemplateState = { layout: '', }; interface TemplateState { layout: string; } function onMetadataChanged() { // for recorder, metadata is a JSON object containing layout const metadata = currentRoom?.localParticipant.metadata; if (metadata) { const newState: TemplateState = JSON.parse(metadata); if (newState && newState.layout !== state.layout) { state = newState; layoutChangedCallback(state.layout); } } } function getURLParam(name: string): string | null { const query = new URLSearchParams(window.location.search); return query.get(name); } export default EgressHelper; ================================================ FILE: template-sdk/tsconfig.json ================================================ { "compilerOptions": { "target": "es2015", /* Specify ECMAScript target version: 'ES3' (default), 'ES5', 'ES2015', 'ES2016', 'ES2017', 'ES2018', 'ES2019', 'ES2020', or 'ESNEXT'. */ "module": "commonjs", /* Specify module code generation: 'none', 'commonjs', 'amd', 'system', 'umd', 'es2015', 'es2020', or 'ESNext'. */ "outDir": "dist", "declaration": true, "sourceMap": true, "strict": true, /* Enable all strict type-checking options. */ "esModuleInterop": true, /* Enables emit interoperability between CommonJS and ES Modules via creation of namespace objects for all imports. Implies 'allowSyntheticDefaultImports'. */ "skipLibCheck": true, /* Skip type checking of declaration files. */ "noUnusedLocals": true, "forceConsistentCasingInFileNames": true /* Disallow inconsistently-cased references to the same file. */ }, "include": [ "src/**/*", ] } ================================================ FILE: test/agents/.gitignore ================================================ venv/ .env ================================================ FILE: test/agents/guest.py ================================================ from dotenv import load_dotenv from livekit.agents import ( Agent, AgentSession, JobContext, WorkerOptions, cli, ) from livekit.plugins import deepgram, elevenlabs, openai, silero load_dotenv(dotenv_path=".env", override=True) async def entrypoint(ctx: JobContext): await ctx.connect() agent = Agent( instructions="You are a guest on a podcast." " The audio from this conversation will be streamed to live listeners." " Choose a field of study and expertise, and talk about recent developments in that field.", turn_detection="vad", ) session = AgentSession( vad=silero.VAD.load(), stt=deepgram.STT(model="nova-3"), llm=openai.LLM(model="gpt-4o-mini"), tts=elevenlabs.TTS(), ) await session.start(agent=agent, room=ctx.room) if __name__ == "__main__": cli.run_app(WorkerOptions(entrypoint_fnc=entrypoint, agent_name="egress-integration-guest")) ================================================ FILE: test/agents/host.py ================================================ from dotenv import load_dotenv from livekit.agents import ( Agent, AgentSession, JobContext, WorkerOptions, cli, ) from livekit.plugins import deepgram, elevenlabs, openai, silero load_dotenv(dotenv_path=".env", override=True) async def entrypoint(ctx: JobContext): await ctx.connect() agent = Agent( instructions="You are hosting a podcast, and you will be having a conversation with your guest." " The audio from this conversation will be streamed to live listeners." " Ask engaging questions and keep the conversation flowing.", turn_detection="vad" ) session = AgentSession( vad=silero.VAD.load(), stt=deepgram.STT(model="nova-3"), llm=openai.LLM(model="gpt-4o-mini"), tts=elevenlabs.TTS(), ) await session.start(agent=agent, room=ctx.room) await session.generate_reply(instructions="Greet your guest and ask them about their field of study.") if __name__ == "__main__": cli.run_app(WorkerOptions(entrypoint_fnc=entrypoint, agent_name="egress-integration-host")) ================================================ FILE: test/agents/requirements.txt ================================================ livekit-agents>=1.0.0 livekit-plugins-deepgram>=1.0.0 livekit-plugins-elevenlabs>=1.0.0 livekit-plugins-openai>=1.0.0 livekit-plugins-cartesia>=1.0.0 livekit-plugins-silero>=1.0.0 livekit-plugins-turn-detector>=1.0.0 python-dotenv~=1.0 ================================================ FILE: test/agents.go ================================================ // Copyright 2025 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build integration package test import ( "context" "os" "os/exec" "testing" "github.com/stretchr/testify/require" "github.com/livekit/protocol/livekit" lksdk "github.com/livekit/server-sdk-go/v2" ) func (r *Runner) launchAgents(t *testing.T) { cmd := exec.Command("python3", "guest.py", "dev") cmd.Dir = "/agents" cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr require.NoError(t, cmd.Start()) cmd = exec.Command("python3", "host.py", "dev") cmd.Dir = "/agents" cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr require.NoError(t, cmd.Start()) agentsClient := lksdk.NewAgentDispatchServiceClient(r.WsUrl, r.ApiKey, r.ApiSecret) guest, err := agentsClient.CreateDispatch(context.Background(), &livekit.CreateAgentDispatchRequest{ AgentName: "egress-integration-guest", Room: r.RoomName, }) require.NoError(t, err) host, err := agentsClient.CreateDispatch(context.Background(), &livekit.CreateAgentDispatchRequest{ AgentName: "egress-integration-host", Room: r.RoomName, }) require.NoError(t, err) t.Cleanup(func() { _, _ = agentsClient.DeleteDispatch(context.Background(), &livekit.DeleteAgentDispatchRequest{ DispatchId: host.Id, Room: r.RoomName, }) _, _ = agentsClient.DeleteDispatch(context.Background(), &livekit.DeleteAgentDispatchRequest{ DispatchId: guest.Id, Room: r.RoomName, }) }) } ================================================ FILE: test/builder.go ================================================ // Copyright 2024 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build integration package test import ( "path" "testing" "time" "github.com/livekit/egress/pkg/types" "github.com/livekit/protocol/egress" "github.com/livekit/protocol/livekit" "github.com/livekit/protocol/rpc" "github.com/livekit/protocol/utils" ) const ( webUrl = "https://download.blender.org/peach/bigbuckbunny_movies/BigBuckBunny_320x180.mp4" setAtRuntime = "set-at-runtime" ) type testCase struct { name string requestType types.RequestType publishOptions // encoding options encodingOptions *livekit.EncodingOptions encodingPreset livekit.EncodingOptionsPreset *fileOptions *streamOptions *segmentOptions *imageOptions *v2OutputOptions multi bool custom func(*testing.T, *testCase) contentCheck func(t *testing.T, path string, info *FFProbeInfo) } type publishOptions struct { audioCodec types.MimeType audioDelay time.Duration audioUnpublish time.Duration audioRepublish time.Duration audioOnly bool audioMixing livekit.AudioMixing audioTrackID string videoCodec types.MimeType videoDelay time.Duration videoUnpublish time.Duration videoRepublish time.Duration videoOnly bool videoTrackID string layout string // v2 Media source fields mediaVideoTrackID string mediaParticipantVideo *livekit.ParticipantVideo audioRoutes []*livekit.AudioRoute // v2 Template source fields templateCustomBaseUrl string } type fileOptions struct { filename string fileType livekit.EncodedFileType outputType types.OutputType } type streamOptions struct { streamUrls []string rawFileName string websocketUrl string outputType types.OutputType } type segmentOptions struct { prefix string playlist string livePlaylist string suffix livekit.SegmentedFileSuffix } type imageOptions struct { prefix string suffix livekit.ImageFileSuffix } type v2OutputOptions struct { outputs []*livekit.Output storage *livekit.StorageConfig } func (r *Runner) build(test *testCase) *rpc.StartEgressRequest { switch test.requestType { case types.RequestTypeRoomComposite: room := &livekit.RoomCompositeEgressRequest{ RoomName: r.RoomName, Layout: test.layout, AudioOnly: test.audioOnly, AudioMixing: test.audioMixing, VideoOnly: test.videoOnly, } if test.encodingOptions != nil { room.Options = &livekit.RoomCompositeEgressRequest_Advanced{ Advanced: test.encodingOptions, } } else if test.encodingPreset != 0 { room.Options = &livekit.RoomCompositeEgressRequest_Preset{ Preset: test.encodingPreset, } } if test.fileOptions != nil { room.FileOutputs = r.buildFileOutputs(test.fileOptions) } if test.streamOptions != nil { room.StreamOutputs = r.buildStreamOutputs(test.streamOptions) } if test.segmentOptions != nil { room.SegmentOutputs = r.buildSegmentOutputs(test.segmentOptions) } if test.imageOptions != nil { room.ImageOutputs = r.buildImageOutputs(test.imageOptions) } return &rpc.StartEgressRequest{ EgressId: utils.NewGuid(utils.EgressPrefix), Request: &rpc.StartEgressRequest_RoomComposite{RoomComposite: room}, } case types.RequestTypeWeb: web := &livekit.WebEgressRequest{ Url: webUrl, AudioOnly: test.audioOnly, VideoOnly: test.videoOnly, } if test.encodingOptions != nil { web.Options = &livekit.WebEgressRequest_Advanced{ Advanced: test.encodingOptions, } } else if test.encodingPreset != 0 { web.Options = &livekit.WebEgressRequest_Preset{ Preset: test.encodingPreset, } } if test.fileOptions != nil { web.FileOutputs = r.buildFileOutputs(test.fileOptions) } if test.streamOptions != nil { web.StreamOutputs = r.buildStreamOutputs(test.streamOptions) } if test.segmentOptions != nil { web.SegmentOutputs = r.buildSegmentOutputs(test.segmentOptions) } if test.imageOptions != nil { web.ImageOutputs = r.buildImageOutputs(test.imageOptions) } return &rpc.StartEgressRequest{ EgressId: utils.NewGuid(utils.EgressPrefix), Request: &rpc.StartEgressRequest_Web{Web: web}, } case types.RequestTypeParticipant: participant := &livekit.ParticipantEgressRequest{ RoomName: r.RoomName, Identity: r.room.LocalParticipant.Identity(), } if test.encodingOptions != nil { participant.Options = &livekit.ParticipantEgressRequest_Advanced{ Advanced: test.encodingOptions, } } else if test.encodingPreset != 0 { participant.Options = &livekit.ParticipantEgressRequest_Preset{ Preset: test.encodingPreset, } } if test.fileOptions != nil { participant.FileOutputs = r.buildFileOutputs(test.fileOptions) } if test.streamOptions != nil { participant.StreamOutputs = r.buildStreamOutputs(test.streamOptions) } if test.segmentOptions != nil { participant.SegmentOutputs = r.buildSegmentOutputs(test.segmentOptions) } if test.imageOptions != nil { participant.ImageOutputs = r.buildImageOutputs(test.imageOptions) } return &rpc.StartEgressRequest{ EgressId: utils.NewGuid(utils.EgressPrefix), Request: &rpc.StartEgressRequest_Participant{Participant: participant}, } case types.RequestTypeTrackComposite: trackComposite := &livekit.TrackCompositeEgressRequest{ RoomName: r.RoomName, AudioTrackId: test.audioTrackID, VideoTrackId: test.videoTrackID, } if test.encodingOptions != nil { trackComposite.Options = &livekit.TrackCompositeEgressRequest_Advanced{ Advanced: test.encodingOptions, } } else if test.encodingPreset != 0 { trackComposite.Options = &livekit.TrackCompositeEgressRequest_Preset{ Preset: test.encodingPreset, } } if test.fileOptions != nil { trackComposite.FileOutputs = r.buildFileOutputs(test.fileOptions) } if test.streamOptions != nil { trackComposite.StreamOutputs = r.buildStreamOutputs(test.streamOptions) } if test.segmentOptions != nil { trackComposite.SegmentOutputs = r.buildSegmentOutputs(test.segmentOptions) } if test.imageOptions != nil { trackComposite.ImageOutputs = r.buildImageOutputs(test.imageOptions) } return &rpc.StartEgressRequest{ EgressId: utils.NewGuid(utils.EgressPrefix), Request: &rpc.StartEgressRequest_TrackComposite{TrackComposite: trackComposite}, } case types.RequestTypeTrack: trackID := test.audioTrackID if trackID == "" { trackID = test.videoTrackID } track := &livekit.TrackEgressRequest{ RoomName: r.RoomName, TrackId: trackID, } if test.fileOptions != nil { track.Output = &livekit.TrackEgressRequest_File{ File: &livekit.DirectFileOutput{ Filepath: path.Join(r.FilePrefix, test.filename), }, } } else if test.streamOptions != nil { track.Output = &livekit.TrackEgressRequest_WebsocketUrl{ WebsocketUrl: test.websocketUrl, } } return &rpc.StartEgressRequest{ EgressId: utils.NewGuid(utils.EgressPrefix), Request: &rpc.StartEgressRequest_Track{Track: track}, } } panic("unknown request type") } func (r *Runner) buildFileOutputs(o *fileOptions) []*livekit.EncodedFileOutput { if u := r.getUploadConfig(); u != nil { output := &livekit.EncodedFileOutput{ FileType: o.fileType, Filepath: path.Join(uploadPrefix, o.filename), } switch conf := u.(type) { case *livekit.S3Upload: output.Output = &livekit.EncodedFileOutput_S3{S3: conf} case *livekit.GCPUpload: output.Output = &livekit.EncodedFileOutput_Gcp{Gcp: conf} case *livekit.AzureBlobUpload: output.Output = &livekit.EncodedFileOutput_Azure{Azure: conf} } return []*livekit.EncodedFileOutput{output} } return []*livekit.EncodedFileOutput{{ FileType: o.fileType, Filepath: path.Join(r.FilePrefix, o.filename), }} } func (r *Runner) buildStreamOutputs(o *streamOptions) []*livekit.StreamOutput { var protocol livekit.StreamProtocol switch o.outputType { case types.OutputTypeRTMP: protocol = livekit.StreamProtocol_RTMP case types.OutputTypeSRT: protocol = livekit.StreamProtocol_SRT default: protocol = livekit.StreamProtocol_DEFAULT_PROTOCOL } return []*livekit.StreamOutput{{ Protocol: protocol, Urls: o.streamUrls, }} } func (r *Runner) buildSegmentOutputs(o *segmentOptions) []*livekit.SegmentedFileOutput { if u := r.getUploadConfig(); u != nil { output := &livekit.SegmentedFileOutput{ FilenamePrefix: path.Join(uploadPrefix, o.prefix), PlaylistName: o.playlist, LivePlaylistName: o.livePlaylist, FilenameSuffix: o.suffix, } switch conf := u.(type) { case *livekit.S3Upload: output.Output = &livekit.SegmentedFileOutput_S3{S3: conf} case *livekit.GCPUpload: output.Output = &livekit.SegmentedFileOutput_Gcp{Gcp: conf} case *livekit.AzureBlobUpload: output.Output = &livekit.SegmentedFileOutput_Azure{Azure: conf} } return []*livekit.SegmentedFileOutput{output} } return []*livekit.SegmentedFileOutput{{ FilenamePrefix: path.Join(r.FilePrefix, o.prefix), PlaylistName: o.playlist, LivePlaylistName: o.livePlaylist, FilenameSuffix: o.suffix, }} } func (r *Runner) buildImageOutputs(o *imageOptions) []*livekit.ImageOutput { return []*livekit.ImageOutput{{ CaptureInterval: 5, Width: 1280, Height: 720, FilenamePrefix: path.Join(r.FilePrefix, o.prefix), FilenameSuffix: o.suffix, }} } func (r *Runner) getUploadConfig() interface{} { configs := make([]interface{}, 0) if r.S3Upload != nil { configs = append(configs, r.S3Upload) } if r.GCPUpload != nil { configs = append(configs, r.GCPUpload) } if r.AzureUpload != nil { configs = append(configs, r.AzureUpload) } if len(configs) == 0 { return nil } return configs[r.testNumber%len(configs)] } func (test *testCase) isV2() bool { switch test.requestType { case types.RequestTypeTemplate, types.RequestTypeMedia: return true case types.RequestTypeWeb: return test.v2OutputOptions != nil default: return false } } func (r *Runner) buildRequest(test *testCase) *rpc.StartEgressRequest { if test.isV2() { return r.buildV2(test) } return r.build(test) } func (r *Runner) getV2StorageConfig() *livekit.StorageConfig { u := r.getUploadConfig() if u == nil { return nil } switch conf := u.(type) { case *livekit.S3Upload: return &livekit.StorageConfig{Provider: &livekit.StorageConfig_S3{S3: conf}} case *livekit.GCPUpload: return &livekit.StorageConfig{Provider: &livekit.StorageConfig_Gcp{Gcp: conf}} case *livekit.AzureBlobUpload: return &livekit.StorageConfig{Provider: &livekit.StorageConfig_Azure{Azure: conf}} case *livekit.AliOSSUpload: return &livekit.StorageConfig{Provider: &livekit.StorageConfig_AliOSS{AliOSS: conf}} } return nil } func (r *Runner) buildV2Outputs(test *testCase) []*livekit.Output { if test.v2OutputOptions != nil && len(test.outputs) > 0 { return test.outputs } storage := r.getV2StorageConfig() var prefix string if storage != nil { prefix = uploadPrefix } else { prefix = r.FilePrefix } var outputs []*livekit.Output if test.fileOptions != nil { outputs = append(outputs, &livekit.Output{ Config: &livekit.Output_File{ File: &livekit.FileOutput{ FileType: test.fileType, Filepath: path.Join(prefix, test.filename), }, }, Storage: storage, }) } if test.streamOptions != nil { var protocol livekit.StreamProtocol switch test.streamOptions.outputType { case types.OutputTypeRTMP: protocol = livekit.StreamProtocol_RTMP case types.OutputTypeSRT: protocol = livekit.StreamProtocol_SRT default: protocol = livekit.StreamProtocol_DEFAULT_PROTOCOL } outputs = append(outputs, &livekit.Output{ Config: &livekit.Output_Stream{ Stream: &livekit.StreamOutput{ Protocol: protocol, Urls: test.streamUrls, }, }, }) } if test.segmentOptions != nil { outputs = append(outputs, &livekit.Output{ Config: &livekit.Output_Segments{ Segments: &livekit.SegmentedFileOutput{ FilenamePrefix: path.Join(prefix, test.segmentOptions.prefix), PlaylistName: test.playlist, LivePlaylistName: test.livePlaylist, FilenameSuffix: test.segmentOptions.suffix, }, }, Storage: storage, }) } if test.imageOptions != nil { outputs = append(outputs, &livekit.Output{ Config: &livekit.Output_Images{ Images: &livekit.ImageOutput{ CaptureInterval: 5, Width: 1280, Height: 720, FilenamePrefix: path.Join(r.FilePrefix, test.imageOptions.prefix), FilenameSuffix: test.imageOptions.suffix, }, }, }) } return outputs } func (r *Runner) buildV2(test *testCase) *rpc.StartEgressRequest { replayReq := &livekit.ExportReplayRequest{ ReplayId: "test-replay-id", Outputs: r.buildV2Outputs(test), } // Source switch test.requestType { case types.RequestTypeTemplate: replayReq.Source = &livekit.ExportReplayRequest_Template{ Template: &livekit.TemplateSource{ Layout: test.layout, AudioOnly: test.audioOnly, VideoOnly: test.videoOnly, CustomBaseUrl: test.templateCustomBaseUrl, }, } case types.RequestTypeWeb: replayReq.Source = &livekit.ExportReplayRequest_Web{ Web: &livekit.WebSource{ Url: webUrl, AudioOnly: test.audioOnly, VideoOnly: test.videoOnly, }, } case types.RequestTypeMedia: media := &livekit.MediaSource{} // video - use explicit mediaVideoTrackID, or fall back to published videoTrackID videoTrackID := test.mediaVideoTrackID if videoTrackID == "" && test.videoCodec != "" { videoTrackID = test.videoTrackID } if videoTrackID != "" { media.Video = &livekit.MediaSource_VideoTrackId{ VideoTrackId: videoTrackID, } } else if test.mediaParticipantVideo != nil { pv := test.mediaParticipantVideo if pv.Identity == setAtRuntime { pv = &livekit.ParticipantVideo{ Identity: string(r.room.LocalParticipant.Identity()), PreferScreenShare: pv.PreferScreenShare, } } media.Video = &livekit.MediaSource_ParticipantVideo{ ParticipantVideo: pv, } } // audio - replace placeholder track IDs with actual published IDs if len(test.audioRoutes) > 0 { routes := make([]*livekit.AudioRoute, len(test.audioRoutes)) for i, route := range test.audioRoutes { routes[i] = route if tr, ok := route.Match.(*livekit.AudioRoute_TrackId); ok && tr.TrackId == setAtRuntime { routes[i] = &livekit.AudioRoute{ Match: &livekit.AudioRoute_TrackId{TrackId: test.audioTrackID}, Channel: route.Channel, } } if pi, ok := route.Match.(*livekit.AudioRoute_ParticipantIdentity); ok && pi.ParticipantIdentity == setAtRuntime { routes[i] = &livekit.AudioRoute{ Match: &livekit.AudioRoute_ParticipantIdentity{ParticipantIdentity: string(r.room.LocalParticipant.Identity())}, Channel: route.Channel, } } } media.Audio = &livekit.AudioConfig{Routes: routes} } replayReq.Source = &livekit.ExportReplayRequest_Media{ Media: media, } } // Encoding if test.encodingOptions != nil { replayReq.Encoding = &livekit.ExportReplayRequest_Advanced{ Advanced: test.encodingOptions, } } else if test.encodingPreset != 0 { replayReq.Encoding = &livekit.ExportReplayRequest_Preset{ Preset: test.encodingPreset, } } // Global storage if test.v2OutputOptions != nil && test.storage != nil { replayReq.Storage = test.storage } // build token since we don't pass a room name egressID := utils.NewGuid(utils.EgressPrefix) token, _ := egress.BuildEgressToken(egressID, r.ApiKey, r.ApiSecret, r.RoomName) return &rpc.StartEgressRequest{ EgressId: egressID, Request: &rpc.StartEgressRequest_Replay{Replay: replayReq}, Token: token, WsUrl: r.WsUrl, } } ================================================ FILE: test/config-sample.yaml ================================================ log_level: error redis: address: 192.168.65.2:6379 api_key: '****' api_secret: '****' ws_url: 'wss://your.livekit.url' file_prefix: /out/output s3: access_key: '****' secret: '****' region: us-east-1 bucket: mybucket room_name: egress-test room_only: false track_composite_only: false track_only: false file_only: false stream_only: false segments_only: false muting: false ================================================ FILE: test/content_checks.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build integration package test import ( "context" "encoding/csv" "fmt" "io" "os/exec" "strconv" "strings" "testing" "time" "github.com/livekit/protocol/logger" "github.com/stretchr/testify/require" ) func (r *Runner) fullContentCheck(t *testing.T, file string, _ *FFProbeInfo) { if r.Muting { // TODO: support for content check on muted tracks to be added later return } // TODO: enable after fixing the issue with missing beeps // dur, err := parseFFProbeDuration(info.Format.Duration) //require.NoError(t, err) flashes, err := extractFlashTimestamps(file, r.FilePrefix) require.NoError(t, err) beeps, err := extractBeepTimestamps(file, testSampleBeepLevel, r.FilePrefix) require.NoError(t, err) silenceRanges, err := detectSilence(file, testSampleSilenceLevel, time.Millisecond*100) if len(silenceRanges) > 0 || err != nil { logger.Errorw("silence ranges not empty", err, "silenceRanges", silenceRanges) } // require.InDelta(t, len(flashes), len(beeps), 3) // require.InDelta(t, len(flashes), dur.Round(time.Second).Seconds(), 3) // avgFlashSpacing, err := averageSpacing(flashes) // require.NoError(t, err) // 200ms is still pretty generous, should be tighter // requireDurationInDelta(t, avgFlashSpacing, time.Second, time.Millisecond*200) // avgBeepSpacing, err := averageSpacing(beeps) // require.NoError(t, err) // requireDurationInDelta(t, avgBeepSpacing, time.Second, time.Millisecond*200) logger.Debugw("beeps", "beeps", beeps) logger.Debugw("flashes", "flashes", flashes) } func (r *Runner) videoOnlyContentCheck(t *testing.T, file string, info *FFProbeInfo) { if r.Muting { // TODO: support for content check on muted tracks to be added later return } flashes, err := extractFlashTimestamps(file, r.FilePrefix) require.NoError(t, err) dur, err := parseFFProbeDuration(info.Format.Duration) require.NoError(t, err) require.InDelta(t, len(flashes), dur.Round(time.Second).Seconds(), 3) avgFlashSpacing, err := averageSpacing(flashes) require.NoError(t, err) // 200ms is still pretty generous, should be tighter requireDurationInDelta(t, avgFlashSpacing, time.Second, time.Millisecond*200) } func (r *Runner) audioOnlyContentCheck(t *testing.T, file string, _ *FFProbeInfo) { if r.Muting { // TODO: support for content check on muted tracks to be added later return } //TODO: enable after fixing the issue with missing beeps //dur, err := parseFFProbeDuration(info.Format.Duration) //require.NoError(t, err) beeps, err := extractBeepTimestamps(file, testSampleBeepLevel, r.FilePrefix) require.NoError(t, err) silenceRanges, err := detectSilence(file, testSampleSilenceLevel, time.Millisecond*100) if len(silenceRanges) > 0 || err != nil { logger.Errorw("silence ranges not empty", err, "silenceRanges", silenceRanges) } // require.NoError(t, err) // // sometimes the silence range is at the end of the file, ignore it // require.True(t, len(silenceRanges) == 0 || silenceRanges[0].start > dur-time.Second*2, // fmt.Sprintf("unexpected silence ranges: %v", silenceRanges)) // require.InDelta(t, len(beeps), dur.Round(time.Second).Seconds(), 3) // avgBeepSpacing, err := averageSpacing(beeps) // require.NoError(t, err) // requireDurationInDelta(t, avgBeepSpacing, time.Second, time.Millisecond*200) logger.Debugw("beeps", "beeps", beeps) } func (r *Runner) fullContentCheckWithVideoUnpublishAt10AndRepublishAt20(t *testing.T, file string, info *FFProbeInfo) { if r.Muting { // TODO: support for content check on muted to be added later return } flashes, err := extractFlashTimestamps(file, r.FilePrefix) require.NoError(t, err) dur, err := parseFFProbeDuration(info.Format.Duration) require.NoError(t, err) gapLength := time.Second * 10 require.InDelta( t, float64(len(flashes))+gapLength.Seconds(), dur.Round(time.Second).Seconds(), 5.0, "flashes+gap ~= duration (±3s)", ) gapsFound := 0 for i := 1; i < len(flashes); i++ { if flashes[i]-flashes[i-1] > gapLength-time.Millisecond*500 { gapsFound++ requireDurationInDelta(t, flashes[i], time.Second*20, time.Second*2) } else { // all other flashes should be within 1 second of the previous flash requireDurationInDelta(t, flashes[i], flashes[i-1], time.Second+time.Millisecond*200) } } require.Equal(t, gapsFound, 1) r.audioOnlyContentCheck(t, file, info) } func (r *Runner) streamKeyframeContentCheck(expectedInterval float64) func(t *testing.T, target string, _ *FFProbeInfo) { return func(t *testing.T, target string, _ *FFProbeInfo) { requireKeyframeInterval(t, target, expectedInterval) } } // ensures input is read long enough to get sufficient keyframes for spacing check func requireKeyframeInterval(t *testing.T, input string, expectedInterval float64) { t.Helper() if expectedInterval <= 0 { return } timestamps, err := ffprobeKeyframeTimestamps(input, expectedInterval) require.NoError(t, err) require.GreaterOrEqual(t, len(timestamps), 2, "ffprobe returned less than two keyframes for %s", input) tolerance := 0.020 // 20ms prev := timestamps[0] found := false for _, ts := range timestamps[1:] { if ts <= prev { prev = ts continue } found = true require.InDelta(t, expectedInterval, ts-prev, tolerance, "keyframe spacing mismatch for %s", input) prev = ts } require.True(t, found, "no increasing keyframe timestamps found for %s", input) } func ffprobeKeyframeTimestamps(input string, expectedInterval float64) ([]float64, error) { timestamps := []float64{} var err error // ensure at least 3 keyframes are read readSeconds := expectedInterval*4 + 1 args := []string{ "-v", "error", "-fflags", "nobuffer", "-rw_timeout", "5000000", "-select_streams", "v:0", "-show_packets", "-show_entries", "packet=pts_time,dts_time,flags,stream_index,size,pos", "-of", "csv=p=0", input, } timeout := time.Duration(readSeconds) * time.Second ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() cmd := exec.CommandContext(ctx, "ffprobe", args...) stdout, err := cmd.StdoutPipe() if err != nil { return nil, fmt.Errorf("stdout pipe: %w", err) } if err = cmd.Start(); err != nil { return nil, fmt.Errorf("start ffprobe: %w", err) } defer cmd.Wait() csvReader := csv.NewReader(stdout) for { record, e := csvReader.Read() if e != nil { // ignore context && EOF errors, we could be canceling the context after readSeconds if ctx.Err() == nil && e != io.EOF { err = fmt.Errorf("read csv: %w", e) } break } if len(record) != 6 { err = fmt.Errorf("unexpected record length: %d", len(record)) break } pts, e := strconv.ParseFloat(record[1], 64) if e != nil { err = fmt.Errorf("parse pts: %w", e) break } if strings.Contains(record[5], "K") { timestamps = append(timestamps, pts) } } if err != nil { return nil, err } return timestamps, nil } ================================================ FILE: test/download.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build integration package test import ( "context" "encoding/json" "fmt" "io" "net/url" "os" "testing" "cloud.google.com/go/storage" "github.com/Azure/azure-storage-blob-go/azblob" "github.com/aws/aws-sdk-go-v2/aws" awsConfig "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/credentials" "github.com/aws/aws-sdk-go-v2/feature/s3/manager" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/googleapis/gax-go/v2" "github.com/stretchr/testify/require" "google.golang.org/api/option" "github.com/livekit/egress/pkg/config" "github.com/livekit/protocol/logger" lkstorage "github.com/livekit/storage" ) func loadManifest(t *testing.T, c *config.StorageConfig, localFilepath, storageFilepath string) *config.Manifest { download(t, c, localFilepath, storageFilepath, false) defer os.Remove(localFilepath) b, err := os.ReadFile(localFilepath) require.NoError(t, err) m := &config.Manifest{} err = json.Unmarshal(b, m) require.NoError(t, err) return m } func download(t *testing.T, c *config.StorageConfig, localFilepath, storageFilepath string, delete bool) { if c != nil { if c.S3 != nil { logger.Debugw("s3 download", "localFilepath", localFilepath, "storageFilepath", storageFilepath) downloadS3(t, c.S3, localFilepath, storageFilepath, delete) } else if c.GCP != nil { logger.Debugw("gcp download", "localFilepath", localFilepath, "storageFilepath", storageFilepath) downloadGCP(t, c.GCP, localFilepath, storageFilepath, delete) } else if c.Azure != nil { logger.Debugw("azure download", "localFilepath", localFilepath, "storageFilepath", storageFilepath) downloadAzure(t, c.Azure, localFilepath, storageFilepath, delete) } } } func downloadS3(t *testing.T, conf *lkstorage.S3Config, localFilepath, storageFilepath string, delete bool) { file, err := os.Create(localFilepath) require.NoError(t, err) defer file.Close() awsConf, err := awsConfig.LoadDefaultConfig(context.Background(), func(o *awsConfig.LoadOptions) error { o.Region = conf.Region o.Credentials = credentials.StaticCredentialsProvider{ Value: aws.Credentials{ AccessKeyID: conf.AccessKey, SecretAccessKey: conf.Secret, SessionToken: conf.SessionToken, }, } return nil }) require.NoError(t, err) s3Client := s3.NewFromConfig(awsConf) _, err = manager.NewDownloader(s3Client).Download( context.Background(), file, &s3.GetObjectInput{ Bucket: aws.String(conf.Bucket), Key: aws.String(storageFilepath), }, ) require.NoError(t, err) if delete { _, err = s3Client.DeleteObject(context.Background(), &s3.DeleteObjectInput{ Bucket: aws.String(conf.Bucket), Key: aws.String(storageFilepath), }) require.NoError(t, err) } } func downloadAzure(t *testing.T, conf *lkstorage.AzureConfig, localFilepath, storageFilepath string, delete bool) { credential, err := azblob.NewSharedKeyCredential( conf.AccountName, conf.AccountKey, ) require.NoError(t, err) pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{ Retry: azblob.RetryOptions{ Policy: azblob.RetryPolicyExponential, MaxTries: maxRetries, MaxRetryDelay: maxDelay, }, }) sUrl := fmt.Sprintf("https://%s.blob.core.windows.net/%s", conf.AccountName, conf.ContainerName) azUrl, err := url.Parse(sUrl) require.NoError(t, err) containerURL := azblob.NewContainerURL(*azUrl, pipeline) blobURL := containerURL.NewBlobURL(storageFilepath) file, err := os.Create(localFilepath) require.NoError(t, err) defer file.Close() err = azblob.DownloadBlobToFile(context.Background(), blobURL, 0, 0, file, azblob.DownloadFromBlobOptions{ BlockSize: 4 * 1024 * 1024, Parallelism: 16, RetryReaderOptionsPerBlock: azblob.RetryReaderOptions{ MaxRetryRequests: 3, }, }) require.NoError(t, err) if delete { _, err = blobURL.Delete(context.Background(), azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}) require.NoError(t, err) } } func downloadGCP(t *testing.T, conf *lkstorage.GCPConfig, localFilepath, storageFilepath string, delete bool) { ctx := context.Background() var client *storage.Client var err error if conf.CredentialsJSON != "" { client, err = storage.NewClient(ctx, option.WithCredentialsJSON([]byte(conf.CredentialsJSON))) } else { client, err = storage.NewClient(ctx) } require.NoError(t, err) defer client.Close() file, err := os.Create(localFilepath) require.NoError(t, err) defer file.Close() rc, err := client.Bucket(conf.Bucket).Object(storageFilepath).Retryer( storage.WithBackoff( gax.Backoff{ Initial: minDelay, Max: maxDelay, Multiplier: 2, }), storage.WithPolicy(storage.RetryAlways), ).NewReader(ctx) require.NoError(t, err) _, err = io.Copy(file, rc) _ = rc.Close() require.NoError(t, err) if delete { err = client.Bucket(conf.Bucket).Object(storageFilepath).Delete(context.Background()) require.NoError(t, err) } } ================================================ FILE: test/edge.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build integration package test import ( "context" "fmt" "math/rand" "os" "testing" "time" "github.com/stretchr/testify/require" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/types" "github.com/livekit/protocol/livekit" lksdk "github.com/livekit/server-sdk-go/v2" ) func (r *Runner) testEdgeCases(t *testing.T) { if !r.should(runEdge) { return } t.Run("EdgeCases", func(t *testing.T) { for _, test := range []*testCase{ // RoomComposite with a late-joining participant (audio only). // Verifies that file duration reflects wall-clock time, not // inflated by the late track's PTS offset. { name: "RoomCompositeLateTrackDuration", requestType: types.RequestTypeRoomComposite, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, audioOnly: true, }, fileOptions: &fileOptions{ filename: "room_composite_late_track_{time}", fileType: livekit.EncodedFileType_OGG, }, custom: r.testRoomCompositeLateTrackDuration, }, // Agents with room composite audio only { name: "Agents", requestType: types.RequestTypeRoomComposite, fileOptions: &fileOptions{ filename: "agents_{time}", }, custom: r.testAgents, }, // RoomComposite audio mixing { name: "AudioMixing", requestType: types.RequestTypeRoomComposite, publishOptions: publishOptions{ audioOnly: true, audioMixing: livekit.AudioMixing_DUAL_CHANNEL_AGENT, }, fileOptions: &fileOptions{ filename: "audio_mixing_{time}", }, custom: r.testAudioMixing, }, // ParticipantComposite where the participant never publishes { name: "ParticipantNoPublish", requestType: types.RequestTypeParticipant, fileOptions: &fileOptions{ filename: "participant_no_publish_{time}.mp4", }, custom: r.testParticipantNoPublish, }, // Test that the egress continues if a user leaves { name: "RoomCompositeStaysOpen", requestType: types.RequestTypeRoomComposite, fileOptions: &fileOptions{ filename: "room_composite_stays_open_{time}.mp4", }, custom: r.testRoomCompositeStaysOpen, }, // Room composite where all participants leave and the server // eventually disconnects the egress. Verifies that the reported // duration includes the silence tail between participant departure // and server-initiated leave. { name: "RoomCompositeDisconnectDuration", requestType: types.RequestTypeRoomComposite, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, audioOnly: true, }, fileOptions: &fileOptions{ filename: "room_composite_disconnect_duration_{time}", fileType: livekit.EncodedFileType_OGG, }, custom: r.testRoomCompositeDisconnectDuration, }, // RTMP output with no valid urls { name: "RtmpFailure", requestType: types.RequestTypeRoomComposite, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, videoCodec: types.MimeTypeH264, }, streamOptions: &streamOptions{ streamUrls: []string{badRtmpUrl1}, outputType: types.OutputTypeRTMP, }, custom: r.testRtmpFailure, }, // SRT output with no valid urls { name: "SrtFailure", requestType: types.RequestTypeWeb, streamOptions: &streamOptions{ streamUrls: []string{badSrtUrl1}, outputType: types.OutputTypeSRT, }, custom: r.testSrtFailure, }, // Track composite with data loss due to a disconnection { name: "TrackDisconnection", requestType: types.RequestTypeTrackComposite, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, }, fileOptions: &fileOptions{ filename: "track_disconnection_{time}.mp4", fileType: livekit.EncodedFileType_MP4, }, custom: r.testTrackDisconnection, }, // Stream output with no urls { name: "EmptyStreamBin", requestType: types.RequestTypeRoomComposite, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, videoCodec: types.MimeTypeVP8, }, streamOptions: &streamOptions{ streamUrls: []string{rtmpUrl4, badRtmpUrl1}, outputType: types.OutputTypeRTMP, }, segmentOptions: &segmentOptions{ prefix: "empty_stream_{time}", playlist: "empty_stream_{time}", }, custom: r.testEmptyStreamBin, }, // File storage limit reached { name: "FileStorageLimit", requestType: types.RequestTypeRoomComposite, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, videoCodec: types.MimeTypeVP8, }, fileOptions: &fileOptions{ filename: "storage_limit_{time}.mp4", fileType: livekit.EncodedFileType_MP4, }, custom: r.testStorageLimit, }, } { if !r.run(t, test, test.custom) { return } } }) } func (r *Runner) testRoomCompositeLateTrackDuration(t *testing.T, test *testCase) { // First participant is already connected (r.room) and publishes audio immediately. // Start egress, wait for it to become active, then connect a second participant // after a delay. Stop egress and verify that the reported file duration is close // to wall-clock time and not inflated by the late track's synchronizer offset. req := r.build(test) testStart := time.Now() egressID := r.startEgress(t, req) // Second participant joins several seconds after egress is active time.Sleep(time.Second * 5) p2, err := lksdk.ConnectToRoom(r.WsUrl, lksdk.ConnectInfo{ APIKey: r.ApiKey, APISecret: r.ApiSecret, RoomName: r.RoomName, ParticipantName: "egress-late-joiner", ParticipantIdentity: fmt.Sprintf("late-joiner-%d", rand.Intn(100)), }, lksdk.NewRoomCallback()) require.NoError(t, err) t.Cleanup(p2.Disconnect) r.publish(t, p2.LocalParticipant, types.MimeTypeOpus, make(chan struct{})) // Let the late track record for a few seconds time.Sleep(time.Second * 7) // Stop and verify res := r.stopEgress(t, egressID) wallClock := time.Since(testStart) fileRes := res.GetFile() //nolint:staticcheck if fileRes == nil { require.Len(t, res.FileResults, 1) fileRes = res.FileResults[0] } reportedDuration := time.Duration(fileRes.Duration) t.Logf("reported duration: %s, wall-clock: %s, startedAt: %d, endedAt: %d", reportedDuration, wallClock, fileRes.StartedAt, fileRes.EndedAt) // Reported duration must not exceed wall-clock time. It can legitimately be // shorter (pipeline startup delay between testStart and first packet), but // should never be longer. require.LessOrEqual(t, reportedDuration.Seconds(), wallClock.Seconds()+3.0, "file duration should not exceed wall-clock duration (inflated by late track offset)") } func (r *Runner) testAgents(t *testing.T, test *testCase) { _, err := os.Stat("/agents/.env") if err != nil { t.Skip("skipping agents test; missing env file") } r.launchAgents(t) time.Sleep(time.Second * 5) r.runFileTest(t, test) } func (r *Runner) testAudioMixing(t *testing.T, test *testCase) { p1, err := lksdk.ConnectToRoom(r.WsUrl, lksdk.ConnectInfo{ APIKey: r.ApiKey, APISecret: r.ApiSecret, RoomName: r.RoomName, ParticipantName: "egress-sample-1", ParticipantIdentity: fmt.Sprintf("sample-1-%d", rand.Intn(100)), }, lksdk.NewRoomCallback()) require.NoError(t, err) t.Cleanup(p1.Disconnect) r.publish(t, p1.LocalParticipant, types.MimeTypeOpus, make(chan struct{})) agent, err := lksdk.ConnectToRoom(r.WsUrl, lksdk.ConnectInfo{ APIKey: r.ApiKey, APISecret: r.ApiSecret, RoomName: r.RoomName, ParticipantName: "egress-sample", ParticipantIdentity: fmt.Sprintf("agent-%d", rand.Intn(100)), ParticipantKind: lksdk.ParticipantAgent, }, lksdk.NewRoomCallback()) require.NoError(t, err) t.Cleanup(agent.Disconnect) r.publish(t, agent.LocalParticipant, types.MimeTypeOpus, make(chan struct{})) p2, err := lksdk.ConnectToRoom(r.WsUrl, lksdk.ConnectInfo{ APIKey: r.ApiKey, APISecret: r.ApiSecret, RoomName: r.RoomName, ParticipantName: "egress-sample", ParticipantIdentity: fmt.Sprintf("sample-2-%d", rand.Intn(100)), }, lksdk.NewRoomCallback()) require.NoError(t, err) t.Cleanup(p2.Disconnect) r.publish(t, p2.LocalParticipant, types.MimeTypeOpus, make(chan struct{})) r.runFileTest(t, test) } func (r *Runner) testParticipantNoPublish(t *testing.T, test *testCase) { identity := r.room.LocalParticipant.Identity() req := r.build(test) info := r.sendRequest(t, req) time.Sleep(time.Second * 15) r.room.Disconnect() time.Sleep(time.Second * 30) info = r.getUpdate(t, info.EgressId) require.Equal(t, livekit.EgressStatus_EGRESS_ABORTED.String(), info.Status.String()) // reconnect the publisher to the room room, err := lksdk.ConnectToRoom(r.WsUrl, lksdk.ConnectInfo{ APIKey: r.ApiKey, APISecret: r.ApiSecret, RoomName: r.RoomName, ParticipantName: "egress-sample", ParticipantIdentity: identity, }, lksdk.NewRoomCallback()) require.NoError(t, err) r.room = room } func (r *Runner) testRoomCompositeStaysOpen(t *testing.T, test *testCase) { req := r.build(test) info := r.sendRequest(t, req) time.Sleep(time.Second * 10) identity := r.room.LocalParticipant.Identity() r.room.Disconnect() time.Sleep(time.Second * 10) // reconnect the publisher to the room room, err := lksdk.ConnectToRoom(r.WsUrl, lksdk.ConnectInfo{ APIKey: r.ApiKey, APISecret: r.ApiSecret, RoomName: r.RoomName, ParticipantName: "egress-sample", ParticipantIdentity: identity, }, lksdk.NewRoomCallback()) require.NoError(t, err) r.room = room r.publishSample(t, types.MimeTypeOpus, 0, 0, false) r.publishSample(t, types.MimeTypeVP8, 0, 0, false) time.Sleep(time.Second * 10) r.checkUpdate(t, info.EgressId, livekit.EgressStatus_EGRESS_ACTIVE) r.stopEgress(t, info.EgressId) } func (r *Runner) testRoomCompositeDisconnectDuration(t *testing.T, test *testCase) { // Start egress, record for a while, then disconnect all participants. // The server will eventually disconnect the egress after departure_timeout. // The file will contain silence during that gap, so endedAt must // reflect the full file content including the silence tail. const departureTimeout = 20 // seconds // Create the room with an explicit departure_timeout so the silence // gap is predictable regardless of server defaults. roomClient := lksdk.NewRoomServiceClient(r.WsUrl, r.ApiKey, r.ApiSecret) _, err := roomClient.CreateRoom(context.Background(), &livekit.CreateRoomRequest{ Name: r.RoomName, DepartureTimeout: departureTimeout, }) require.NoError(t, err) req := r.build(test) egressID := r.startEgress(t, req) // Record with active audio for 10 seconds time.Sleep(time.Second * 10) // Disconnect all participants — the room becomes empty, but the // egress stays connected until the server kicks it out. disconnectTime := time.Now() identity := r.room.LocalParticipant.Identity() r.room.Disconnect() // Reconnect the publisher on exit so subsequent tests have a room defer func() { room, err := lksdk.ConnectToRoom(r.WsUrl, lksdk.ConnectInfo{ APIKey: r.ApiKey, APISecret: r.ApiSecret, RoomName: r.RoomName, ParticipantName: "egress-sample", ParticipantIdentity: identity, }, lksdk.NewRoomCallback()) require.NoError(t, err) r.room = room }() // Wait for the egress to complete on its own (server-initiated leave). // Drain updates until we see EGRESS_COMPLETE or EGRESS_FAILED. var res *livekit.EgressInfo deadline := time.After(90 * time.Second) for res == nil { select { case info := <-r.updates: if info.EgressId != egressID { continue } switch info.Status { case livekit.EgressStatus_EGRESS_COMPLETE: res = info case livekit.EgressStatus_EGRESS_FAILED: t.Fatalf("egress failed: %s", info.Error) } case <-deadline: t.Fatal("timed out waiting for egress to complete after room disconnect") } } silenceGap := time.Since(disconnectTime) t.Logf("silence gap after disconnect: %s", silenceGap) fileRes := res.GetFile() //nolint:staticcheck if fileRes == nil { require.Len(t, res.FileResults, 1) fileRes = res.FileResults[0] } reportedDuration := time.Duration(fileRes.Duration) t.Logf("reported duration: %s, startedAt: %d, endedAt: %d", reportedDuration, fileRes.StartedAt, fileRes.EndedAt) // The reported duration should include the silence tail. The room was // created with departure_timeout=20s, so the server disconnects the // egress ~20s after the last participant leaves. We allow 5s of slack // for pipeline startup/teardown. minExpected := 10*time.Second + silenceGap - 5*time.Second require.GreaterOrEqual(t, reportedDuration, minExpected, "file duration should include silence tail after participants left") } func (r *Runner) testStorageLimit(t *testing.T, test *testCase) { origLimit := r.FileOutputMaxSize r.FileOutputMaxSize = 300000 // ~300KB to trigger quickly t.Cleanup(func() { r.FileOutputMaxSize = origLimit }) req := r.build(test) info := r.sendRequest(t, req) egressID := info.EgressId deadline := time.After(45 * time.Second) for { select { case <-deadline: t.Fatal("timed out waiting for storage limit") default: } update := r.getUpdate(t, egressID) switch update.Status { //nolint:revive // EGRESS_ACTIVE explicitly listed for readability case livekit.EgressStatus_EGRESS_ACTIVE: continue case livekit.EgressStatus_EGRESS_LIMIT_REACHED: file := update.GetFile() //nolint:staticcheck // keep deprecated field for older clients if file == nil && len(update.FileResults) > 0 { file = update.FileResults[0] } require.NotNil(t, file) require.Contains(t, update.Details, livekit.EndReasonLimitReached) require.NotEmpty(t, update.Error) return case livekit.EgressStatus_EGRESS_FAILED: t.Fatalf("egress failed: %s", update.Error) default: continue } } } func (r *Runner) testRtmpFailure(t *testing.T, test *testCase) { req := r.build(test) info, err := r.StartEgress(context.Background(), req) require.NoError(t, err) require.Empty(t, info.Error) require.NotEmpty(t, info.EgressId) require.Equal(t, r.RoomName, info.RoomName) require.Equal(t, livekit.EgressStatus_EGRESS_STARTING, info.Status) // check updates time.Sleep(time.Second * 5) info = r.getUpdate(t, info.EgressId) streamFailed := false for info.Status == livekit.EgressStatus_EGRESS_ACTIVE { if !streamFailed && info.StreamResults[0].Status == livekit.StreamInfo_FAILED { streamFailed = true } if streamFailed { // make sure this never reverts in subsequent updates require.Equal(t, livekit.StreamInfo_FAILED, info.StreamResults[0].Status) } info = r.getUpdate(t, info.EgressId) } require.Equal(t, livekit.EgressStatus_EGRESS_FAILED, info.Status) require.NotEmpty(t, info.Error) require.Equal(t, livekit.StreamInfo_FAILED, info.StreamResults[0].Status) require.NotEmpty(t, info.StreamResults[0].Error) } func (r *Runner) testSrtFailure(t *testing.T, test *testCase) { req := r.build(test) info, err := r.StartEgress(context.Background(), req) require.NoError(t, err) require.Empty(t, info.Error) require.NotEmpty(t, info.EgressId) require.Equal(t, livekit.EgressStatus_EGRESS_STARTING, info.Status) // check update time.Sleep(time.Second * 5) info = r.getUpdate(t, info.EgressId) if info.Status == livekit.EgressStatus_EGRESS_ACTIVE { r.checkUpdate(t, info.EgressId, livekit.EgressStatus_EGRESS_FAILED) } else { require.Equal(t, livekit.EgressStatus_EGRESS_FAILED, info.Status) } } func (r *Runner) testTrackDisconnection(t *testing.T, test *testCase) { test.videoTrackID = r.publishSampleWithDisconnection(t, types.MimeTypeVP8) r.runFileTest(t, test) } func (r *Runner) testEmptyStreamBin(t *testing.T, test *testCase) { req := r.build(test) info := r.sendRequest(t, req) egressID := info.EgressId time.Sleep(time.Second * 15) // get params p, err := config.GetValidatedPipelineConfig(r.ServiceConfig, req) require.NoError(t, err) r.checkStreamUpdate(t, egressID, map[string]livekit.StreamInfo_Status{ rtmpUrl4Redacted: livekit.StreamInfo_ACTIVE, badRtmpUrl1Redacted: livekit.StreamInfo_FAILED, }) _, err = r.client.UpdateStream(context.Background(), egressID, &livekit.UpdateStreamRequest{ EgressId: egressID, RemoveOutputUrls: []string{rtmpUrl4}, }) require.NoError(t, err) r.checkStreamUpdate(t, egressID, map[string]livekit.StreamInfo_Status{ rtmpUrl4Redacted: livekit.StreamInfo_FINISHED, badRtmpUrl1Redacted: livekit.StreamInfo_FAILED, }) time.Sleep(time.Second * 10) res := r.stopEgress(t, egressID) r.verifySegments(t, test, p, livekit.SegmentedFileSuffix_INDEX, res, false) } ================================================ FILE: test/ffprobe.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build integration package test import ( "context" "encoding/json" "errors" "fmt" "math" "os" "os/exec" "regexp" "strconv" "strings" "testing" "time" "github.com/llehouerou/go-mp3/lameinfo" "github.com/stretchr/testify/require" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/types" "github.com/livekit/protocol/livekit" ) const ( maxRetries = 5 minDelay = time.Millisecond * 100 maxDelay = time.Second * 5 ) var ( segmentTimeRegexp = regexp.MustCompile(`_(\d{14})(\d{3})\.ts`) ) type FFProbeInfo struct { Streams []struct { CodecName string `json:"codec_name"` CodecType string `json:"codec_type"` Profile string `json:"profile"` // audio SampleRate string `json:"sample_rate"` Channels int `json:"channels"` ChannelLayout string `json:"channel_layout"` // video Width int32 `json:"width"` Height int32 `json:"height"` RFrameRate string `json:"r_frame_rate"` AvgFrameRate string `json:"avg_frame_rate"` BitRate string `json:"bit_rate"` } `json:"streams"` Format struct { Filename string `json:"filename"` FormatName string `json:"format_name"` Duration string `json:"duration"` Size string `json:"size"` ProbeScore int `json:"probe_score"` Tags struct { Encoder string `json:"encoder"` } `json:"tags"` } `json:"format"` } func ffprobe(input string) (*FFProbeInfo, error) { args := []string{ "-v", "quiet", "-hide_banner", "-show_format", "-show_streams", "-print_format", "json", } if strings.HasSuffix(input, ".raw") { args = append(args, "-f", "s16le", "-ac", "2", "-ar", "48k", ) } args = append(args, input) ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) defer cancel() cmd := exec.CommandContext(ctx, "ffprobe", args...) out, err := cmd.Output() if err != nil { if errors.Is(ctx.Err(), context.DeadlineExceeded) { return nil, fmt.Errorf("ffprobe timeout after 15s") } return nil, err } info := &FFProbeInfo{} err = json.Unmarshal(out, info) return info, err } func verify(t *testing.T, in string, p *config.PipelineConfig, res *livekit.EgressInfo, egressType types.EgressType, withMuting bool, sourceFramerate float64, live bool) *FFProbeInfo { info, err := ffprobe(in) require.NoError(t, err) // Check source type if res != nil { if (p.RequestType == types.RequestTypeRoomComposite || p.RequestType == types.RequestTypeTemplate) && (p.VideoEnabled || p.Layout != "") { require.Equal(t, livekit.EgressSourceType_EGRESS_SOURCE_TYPE_WEB, res.SourceType) } else if p.RequestType == types.RequestTypeWeb { require.Equal(t, livekit.EgressSourceType_EGRESS_SOURCE_TYPE_WEB, res.SourceType) } else { require.Equal(t, livekit.EgressSourceType_EGRESS_SOURCE_TYPE_SDK, res.SourceType) } } switch egressType { case types.EgressTypeFile: // size require.NotEqual(t, "0", info.Format.Size) // duration fileRes := res.GetFile() //nolint:staticcheck if fileRes == nil { fileRes = res.FileResults[0] } expected := float64(fileRes.Duration) / 1e9 actual, err := strconv.ParseFloat(info.Format.Duration, 64) require.NoError(t, err) // file duration can be different from egress duration based on keyframes, muting, and latency delta := 5.0 switch p.RequestType { case types.RequestTypeRoomComposite, types.RequestTypeTemplate, types.RequestTypeWeb: require.InDelta(t, expected, actual, delta) case types.RequestTypeTrack: if p.AudioEnabled { if withMuting { delta = 6 } require.InDelta(t, expected, actual, delta) } } case types.EgressTypeSegments: actual, err := strconv.ParseFloat(info.Format.Duration, 64) require.NoError(t, err) require.Len(t, res.GetSegmentResults(), 1) segments := res.GetSegmentResults()[0] if live { require.InDelta(t, float64(5*p.GetSegmentConfig().SegmentDuration), actual, float64(p.GetSegmentConfig().SegmentDuration)) } else { expected := int64(math.Ceil(actual / float64(p.GetSegmentConfig().SegmentDuration))) require.InDelta(t, expected, segments.SegmentCount, 1) } case types.EgressTypeWebsocket: size, err := strconv.Atoi(info.Format.Size) require.NoError(t, err) require.Greater(t, size, 6300000) expected := float64(res.StreamResults[0].Duration) / 1e9 actual, err := strconv.ParseFloat(info.Format.Duration, 64) require.NoError(t, err) require.InDelta(t, expected, actual, 4.1) } // verify Xing/Info header for MP3 files if egressType == types.EgressTypeFile && p.AudioOutCodec == types.MimeTypeMP3 { fpDuration, _ := strconv.ParseFloat(info.Format.Duration, 64) verifyXingHeader(t, in, int(p.AudioFrequency), fpDuration) } // check stream info var hasAudio, hasVideo bool for _, stream := range info.Streams { switch stream.CodecType { case "audio": hasAudio = true // codec switch p.AudioOutCodec { case types.MimeTypeAAC: require.Equal(t, "aac", stream.CodecName) require.Equal(t, fmt.Sprint(p.AudioFrequency), stream.SampleRate) require.Equal(t, "stereo", stream.ChannelLayout) case types.MimeTypeOpus: require.Equal(t, "opus", stream.CodecName) require.Equal(t, "48000", stream.SampleRate) require.Equal(t, "stereo", stream.ChannelLayout) case types.MimeTypeMP3: require.Equal(t, "mp3", stream.CodecName) require.Equal(t, fmt.Sprint(p.AudioFrequency), stream.SampleRate) require.Equal(t, "stereo", stream.ChannelLayout) // verify CBR: stream bitrate should match configured bitrate bitrate, err := strconv.Atoi(stream.BitRate) require.NoError(t, err) require.InDelta(t, int(p.AudioBitrate)*1000, bitrate, 5000, "MP3 bitrate %d bps not close to configured %d kbps", bitrate, p.AudioBitrate) case types.MimeTypeRawAudio: require.Equal(t, "pcm_s16le", stream.CodecName) require.Equal(t, "48000", stream.SampleRate) } // channels require.Equal(t, 2, stream.Channels) // audio bitrate if p.Outputs[egressType][0].GetOutputType() == types.OutputTypeMP4 { bitrate, err := strconv.Atoi(stream.BitRate) require.NoError(t, err) require.NotZero(t, bitrate) } case "video": hasVideo = true // codec and profile switch p.VideoOutCodec { case types.MimeTypeH264: require.Equal(t, "h264", stream.CodecName) if p.VideoEncoding { switch p.VideoProfile { case types.ProfileBaseline: require.Equal(t, "Constrained Baseline", stream.Profile) case types.ProfileMain: require.Equal(t, "Main", stream.Profile) case types.ProfileHigh: require.Equal(t, "High", stream.Profile) } } case types.MimeTypeVP8: require.Equal(t, "vp8", stream.CodecName) case types.MimeTypeVP9: require.Equal(t, "vp9", stream.CodecName) } if p.VideoEncoding { // dimensions require.Equal(t, p.Width, stream.Width) require.Equal(t, p.Height, stream.Height) } switch p.Outputs[egressType][0].GetOutputType() { case types.OutputTypeIVF: require.Equal(t, "vp8", stream.CodecName) case types.OutputTypeMP4: require.Equal(t, "h264", stream.CodecName) if p.VideoEncoding { // bitrate, not available for HLS or WebM bitrate, err := strconv.Atoi(stream.BitRate) require.NoError(t, err) require.NotZero(t, bitrate) require.Less(t, int32(bitrate), p.VideoBitrate*1050) // framerate frac := strings.Split(stream.AvgFrameRate, "/") require.Len(t, frac, 2) n, err := strconv.ParseFloat(frac[0], 64) require.NoError(t, err) d, err := strconv.ParseFloat(frac[1], 64) require.NoError(t, err) require.NotZero(t, d) require.Less(t, n/d, float64(p.Framerate)*1.5) require.Greater(t, n/d, float64(sourceFramerate)*0.8) } case types.OutputTypeHLS: require.Equal(t, "h264", stream.CodecName) } default: t.Fatalf("unrecognized stream type %s", stream.CodecType) } } if p.AudioEnabled { require.True(t, hasAudio) require.NotEmpty(t, p.AudioOutCodec) } if p.VideoEnabled { require.True(t, hasVideo) require.NotEmpty(t, p.VideoOutCodec) } return info } // parseFFProbeDuration supports either "123.456" (seconds) or "HH:MM:SS.mmm" func parseFFProbeDuration(s string) (time.Duration, error) { s = strings.TrimSpace(s) if s == "" { return 0, errors.New("empty duration") } if strings.Contains(s, ":") { // HH:MM:SS(.frac) parts := strings.Split(s, ":") if len(parts) != 3 { return 0, fmt.Errorf("invalid H:M:S format: %q", s) } h, err := strconv.ParseFloat(parts[0], 64) if err != nil { return 0, fmt.Errorf("invalid h part: %w", err) } m, err := strconv.ParseFloat(parts[1], 64) if err != nil { return 0, fmt.Errorf("invalid m part: %w", err) } sec, err := strconv.ParseFloat(parts[2], 64) if err != nil { return 0, fmt.Errorf("invalid s part: %w", err) } total := h*3600 + m*60 + sec return time.Duration(total * float64(time.Second)), nil } // Plain seconds (stringified float) f, err := strconv.ParseFloat(s, 64) if err != nil { return 0, fmt.Errorf("invalid seconds format %q: %w", s, err) } return time.Duration(f * float64(time.Second)), nil } // verifyXingHeader checks that an MP3 file contains a valid Xing/Info header // and that the duration derived from its frame count matches ffprobe. func verifyXingHeader(t *testing.T, filepath string, sampleRate int, ffprobeDuration float64) { t.Helper() f, err := os.Open(filepath) require.NoError(t, err) defer f.Close() xi, err := lameinfo.ParseFromReader(f) require.NoError(t, err, "MP3 file missing Xing/Info header") require.True(t, xi.HasFrameCount(), "Xing header missing frame count") require.NotZero(t, xi.FrameCount, "Xing header has zero frame count") require.True(t, xi.HasTOC(), "Xing header missing TOC seek table") // MPEG1 Layer 3: 1152 samples per frame. // Cross-check Xing frame count against ffprobe duration. const samplesPerFrame = 1152 xingDuration := float64(xi.FrameCount) * samplesPerFrame / float64(sampleRate) require.InDelta(t, ffprobeDuration, xingDuration, 0.1, "Xing duration (%0.3fs from %d frames) does not match ffprobe duration (%0.3fs)", xingDuration, xi.FrameCount, ffprobeDuration) } ================================================ FILE: test/file.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build integration package test import ( "path" "strings" "testing" "time" "github.com/stretchr/testify/require" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/types" "github.com/livekit/protocol/livekit" ) func (r *Runner) testFile(t *testing.T) { if !r.should(runFile) { return } t.Run("File", func(t *testing.T) { for _, test := range []*testCase{ // ---- Room Composite ----- { name: "RoomComposite/Base", requestType: types.RequestTypeRoomComposite, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, videoCodec: types.MimeTypeH264, layout: "speaker", }, fileOptions: &fileOptions{ filename: "r_{room_name}_{time}.mp4", }, contentCheck: r.fullContentCheck, }, { name: "RoomComposite/VideoOnly", requestType: types.RequestTypeRoomComposite, publishOptions: publishOptions{ videoCodec: types.MimeTypeH264, videoOnly: true, layout: "speaker", }, encodingOptions: &livekit.EncodingOptions{ VideoCodec: livekit.VideoCodec_H264_HIGH, }, fileOptions: &fileOptions{ filename: "r_{room_name}_video_{time}.mp4", }, contentCheck: r.videoOnlyContentCheck, }, { name: "RoomComposite/AudioOnly", requestType: types.RequestTypeRoomComposite, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, audioOnly: true, }, encodingOptions: &livekit.EncodingOptions{ AudioCodec: livekit.AudioCodec_OPUS, }, fileOptions: &fileOptions{ filename: "r_{room_name}_audio_{time}", fileType: livekit.EncodedFileType_OGG, }, contentCheck: r.audioOnlyContentCheck, }, { name: "RoomComposite/AudioOnlyMP3", requestType: types.RequestTypeRoomComposite, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, audioOnly: true, }, fileOptions: &fileOptions{ filename: "r_{room_name}_audio_mp3_{time}", fileType: livekit.EncodedFileType_MP3, }, contentCheck: r.audioOnlyContentCheck, }, // ---------- Web ---------- { name: "Web", publishOptions: publishOptions{ videoOnly: true, }, requestType: types.RequestTypeWeb, fileOptions: &fileOptions{ filename: "web_{time}", }, }, // ------ Participant ------ { name: "ParticipantComposite/VP8", requestType: types.RequestTypeParticipant, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, audioDelay: time.Second * 8, audioUnpublish: time.Second * 14, audioRepublish: time.Second * 20, videoCodec: types.MimeTypeVP8, }, fileOptions: &fileOptions{ filename: "participant_{publisher_identity}_vp8_{time}.mp4", fileType: livekit.EncodedFileType_MP4, }, }, { name: "ParticipantComposite/H264", requestType: types.RequestTypeParticipant, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, videoCodec: types.MimeTypeH264, videoUnpublish: time.Second * 10, videoRepublish: time.Second * 20, }, fileOptions: &fileOptions{ filename: "participant_{room_name}_h264_{time}.mp4", fileType: livekit.EncodedFileType_MP4, }, contentCheck: r.fullContentCheckWithVideoUnpublishAt10AndRepublishAt20, }, { name: "ParticipantComposite/AudioOnly", requestType: types.RequestTypeParticipant, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, audioUnpublish: time.Second * 10, audioRepublish: time.Second * 15, }, fileOptions: &fileOptions{ filename: "participant_{room_name}_{time}.mp4", fileType: livekit.EncodedFileType_MP4, }, }, // ---- Track Composite ---- { name: "TrackComposite/VP8", requestType: types.RequestTypeTrackComposite, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, videoCodec: types.MimeTypeVP8, }, fileOptions: &fileOptions{ filename: "tc_{publisher_identity}_vp8_{time}.mp4", fileType: livekit.EncodedFileType_MP4, }, contentCheck: r.fullContentCheck, }, { name: "TrackComposite/VideoOnly", requestType: types.RequestTypeTrackComposite, publishOptions: publishOptions{ videoCodec: types.MimeTypeH264, videoOnly: true, }, fileOptions: &fileOptions{ filename: "tc_{room_name}_video_{time}.mp4", fileType: livekit.EncodedFileType_MP4, }, contentCheck: r.videoOnlyContentCheck, }, { name: "TrackComposite/AudioOnlyMP3", requestType: types.RequestTypeTrackComposite, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, audioOnly: true, }, fileOptions: &fileOptions{ filename: "tc_{room_name}_audio_mp3_{time}", fileType: livekit.EncodedFileType_MP3, outputType: types.OutputTypeMP3, }, contentCheck: r.audioOnlyContentCheck, }, { name: "TrackComposite/AudioOnlyPCMU", requestType: types.RequestTypeTrackComposite, publishOptions: publishOptions{ audioCodec: types.MimeTypePCMU, audioOnly: true, }, fileOptions: &fileOptions{ filename: "tc_{room_name}_audio_pcmu_{time}.mp4", fileType: livekit.EncodedFileType_MP4, outputType: types.OutputTypeMP4, }, contentCheck: r.audioOnlyContentCheck, }, { name: "TrackComposite/AudioOnlyPCMA", requestType: types.RequestTypeTrackComposite, publishOptions: publishOptions{ audioCodec: types.MimeTypePCMA, audioOnly: true, }, fileOptions: &fileOptions{ filename: "tc_{room_name}_audio_pcma_{time}.mp4", fileType: livekit.EncodedFileType_MP4, outputType: types.OutputTypeMP4, }, contentCheck: r.audioOnlyContentCheck, }, // --------- Track --------- { name: "Track/Opus", requestType: types.RequestTypeTrack, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, audioOnly: true, }, fileOptions: &fileOptions{ filename: "t_{track_source}_{time}.ogg", outputType: types.OutputTypeOGG, }, contentCheck: r.audioOnlyContentCheck, }, { name: "Track/PCMU", requestType: types.RequestTypeTrack, publishOptions: publishOptions{ audioCodec: types.MimeTypePCMU, audioOnly: true, }, fileOptions: &fileOptions{ filename: "t_{track_source}_pcmu_{time}.ogg", outputType: types.OutputTypeOGG, }, contentCheck: r.audioOnlyContentCheck, }, { name: "Track/PCMA", requestType: types.RequestTypeTrack, publishOptions: publishOptions{ audioCodec: types.MimeTypePCMA, audioOnly: true, }, fileOptions: &fileOptions{ filename: "t_{track_source}_pcma_{time}.ogg", outputType: types.OutputTypeOGG, }, contentCheck: r.audioOnlyContentCheck, }, { name: "Track/H264", requestType: types.RequestTypeTrack, publishOptions: publishOptions{ videoCodec: types.MimeTypeH264, videoOnly: true, }, fileOptions: &fileOptions{ filename: "t_{track_id}_{time}.mp4", outputType: types.OutputTypeMP4, }, contentCheck: r.videoOnlyContentCheck, }, { name: "Track/VP8", requestType: types.RequestTypeTrack, publishOptions: publishOptions{ videoCodec: types.MimeTypeVP8, videoOnly: true, }, fileOptions: &fileOptions{ filename: "t_{track_type}_{time}.webm", outputType: types.OutputTypeWebM, }, contentCheck: r.videoOnlyContentCheck, }, // { // name: "Track/VP9", // videoOnly: true, // videoCodec: types.MimeTypeVP9, // outputType: types.OutputTypeWebM, // filename: "t_{track_type}_{time}.webm", // }, // -------- Template -------- { name: "Template/AudioOnly", requestType: types.RequestTypeTemplate, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, audioOnly: true, }, encodingOptions: &livekit.EncodingOptions{ AudioCodec: livekit.AudioCodec_OPUS, }, fileOptions: &fileOptions{ filename: "template_audio_{time}", fileType: livekit.EncodedFileType_OGG, }, contentCheck: r.audioOnlyContentCheck, }, { name: "Template/VideoOnly", requestType: types.RequestTypeTemplate, publishOptions: publishOptions{ videoCodec: types.MimeTypeH264, videoOnly: true, layout: "speaker", }, fileOptions: &fileOptions{ filename: "template_video_{time}.mp4", }, contentCheck: r.videoOnlyContentCheck, }, { name: "Template/Base", requestType: types.RequestTypeTemplate, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, videoCodec: types.MimeTypeH264, layout: "speaker", }, fileOptions: &fileOptions{ filename: "template_{time}.mp4", }, contentCheck: r.fullContentCheck, }, // --------- Web V2 -------- { name: "WebV2/AudioOnly", requestType: types.RequestTypeWeb, publishOptions: publishOptions{ audioOnly: true, }, encodingOptions: &livekit.EncodingOptions{ AudioCodec: livekit.AudioCodec_OPUS, }, fileOptions: &fileOptions{ filename: "webv2_audio_{time}", fileType: livekit.EncodedFileType_OGG, }, v2OutputOptions: &v2OutputOptions{}, contentCheck: r.audioOnlyContentCheck, }, { name: "WebV2/VideoOnly", requestType: types.RequestTypeWeb, publishOptions: publishOptions{ videoOnly: true, }, fileOptions: &fileOptions{ filename: "webv2_video_{time}.mp4", }, v2OutputOptions: &v2OutputOptions{}, }, { name: "WebV2/Base", requestType: types.RequestTypeWeb, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, videoCodec: types.MimeTypeH264, }, fileOptions: &fileOptions{ filename: "webv2_{time}.mp4", }, v2OutputOptions: &v2OutputOptions{}, }, // -------- Media ---------- { name: "Media/AudioOnly", requestType: types.RequestTypeMedia, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, audioOnly: true, audioRoutes: []*livekit.AudioRoute{{ Match: &livekit.AudioRoute_TrackId{TrackId: "set-at-runtime"}, }}, }, encodingOptions: &livekit.EncodingOptions{ AudioCodec: livekit.AudioCodec_OPUS, }, fileOptions: &fileOptions{ filename: "media_audio_{time}", fileType: livekit.EncodedFileType_OGG, }, contentCheck: r.audioOnlyContentCheck, }, { name: "Media/VideoOnly", requestType: types.RequestTypeMedia, publishOptions: publishOptions{ videoCodec: types.MimeTypeH264, videoOnly: true, }, fileOptions: &fileOptions{ filename: "media_video_{time}.mp4", }, contentCheck: r.videoOnlyContentCheck, }, { name: "Media/Base", requestType: types.RequestTypeMedia, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, videoCodec: types.MimeTypeH264, audioRoutes: []*livekit.AudioRoute{{ Match: &livekit.AudioRoute_TrackId{TrackId: "set-at-runtime"}, }}, }, fileOptions: &fileOptions{ filename: "media_{time}.mp4", }, contentCheck: r.fullContentCheck, }, // ---- Media Audio Routing ---- { name: "Media/AudioRouteByTrackID", requestType: types.RequestTypeMedia, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, videoCodec: types.MimeTypeH264, audioRoutes: []*livekit.AudioRoute{{ Match: &livekit.AudioRoute_TrackId{TrackId: "set-at-runtime"}, Channel: livekit.AudioChannel_AUDIO_CHANNEL_LEFT, }}, }, fileOptions: &fileOptions{ filename: "media_route_trackid_{time}.mp4", }, }, { name: "Media/AudioRouteByParticipantIdentity", requestType: types.RequestTypeMedia, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, videoCodec: types.MimeTypeH264, audioRoutes: []*livekit.AudioRoute{{ Match: &livekit.AudioRoute_ParticipantIdentity{ParticipantIdentity: "set-at-runtime"}, Channel: livekit.AudioChannel_AUDIO_CHANNEL_BOTH, }}, }, fileOptions: &fileOptions{ filename: "media_route_identity_{time}.mp4", }, }, { name: "Media/AudioRouteByParticipantKind", requestType: types.RequestTypeMedia, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, videoCodec: types.MimeTypeH264, audioRoutes: []*livekit.AudioRoute{{ Match: &livekit.AudioRoute_ParticipantKind{ParticipantKind: livekit.ParticipantInfo_STANDARD}, Channel: livekit.AudioChannel_AUDIO_CHANNEL_BOTH, }}, }, fileOptions: &fileOptions{ filename: "media_route_kind_{time}.mp4", }, }, { name: "Media/MultiRoute", requestType: types.RequestTypeMedia, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, videoCodec: types.MimeTypeH264, audioRoutes: []*livekit.AudioRoute{ { Match: &livekit.AudioRoute_TrackId{TrackId: "set-at-runtime"}, Channel: livekit.AudioChannel_AUDIO_CHANNEL_LEFT, }, { Match: &livekit.AudioRoute_ParticipantIdentity{ParticipantIdentity: "set-at-runtime"}, Channel: livekit.AudioChannel_AUDIO_CHANNEL_RIGHT, }, }, }, fileOptions: &fileOptions{ filename: "media_multiroute_{time}.mp4", }, }, { name: "Media/ParticipantVideo", requestType: types.RequestTypeMedia, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, videoCodec: types.MimeTypeH264, mediaParticipantVideo: &livekit.ParticipantVideo{ Identity: "set-at-runtime", }, audioRoutes: []*livekit.AudioRoute{{ Match: &livekit.AudioRoute_TrackId{TrackId: "set-at-runtime"}, }}, }, fileOptions: &fileOptions{ filename: "media_participant_video_{time}.mp4", }, contentCheck: r.fullContentCheck, }, } { if !r.run(t, test, r.runFileTest) { return } } }) } func (r *Runner) runFileTest(t *testing.T, test *testCase) { req := r.buildRequest(test) // start egressID := r.startEgress(t, req) time.Sleep(time.Second * 10) if r.Dotfiles { r.createDotFile(t, egressID) } // stop time.Sleep(time.Second * 15) res := r.stopEgress(t, egressID) // get params p, err := config.GetValidatedPipelineConfig(r.ServiceConfig, req) require.NoError(t, err) if p.GetFileConfig().OutputType == types.OutputTypeUnknownFile { p.GetFileConfig().OutputType = test.fileOptions.outputType } require.Equal(t, test.requestType != types.RequestTypeTrack && !test.audioOnly, p.VideoEncoding) // verify r.verifyFile(t, test, p, res) } func (r *Runner) verifyFile(t *testing.T, tc *testCase, p *config.PipelineConfig, res *livekit.EgressInfo) { // egress info require.Equal(t, res.Error == "", res.Status != livekit.EgressStatus_EGRESS_FAILED) require.NotZero(t, res.StartedAt) require.NotZero(t, res.EndedAt) // file info fileRes := res.GetFile() //nolint:staticcheck if fileRes == nil { require.Len(t, res.FileResults, 1) fileRes = res.FileResults[0] } require.NotEmpty(t, fileRes.Location) require.Greater(t, fileRes.Size, int64(0)) require.Greater(t, fileRes.Duration, int64(0)) storagePath := fileRes.Filename require.NotEmpty(t, storagePath) require.False(t, strings.Contains(storagePath, "{")) storageFilename := path.Base(storagePath) // download from cloud storage localPath := path.Join(r.FilePrefix, storageFilename) download(t, p.GetFileConfig().StorageConfig, localPath, storagePath, false) manifestLocal := path.Join(path.Dir(localPath), res.EgressId+".json") manifestStorage := path.Join(path.Dir(storagePath), res.EgressId+".json") manifest := loadManifest(t, p.GetFileConfig().StorageConfig, manifestLocal, manifestStorage) require.NotNil(t, manifest) // verify info := verify(t, localPath, p, res, types.EgressTypeFile, r.Muting, r.sourceFramerate, false) if tc.contentCheck != nil && info != nil { tc.contentCheck(t, localPath, info) } } ================================================ FILE: test/flags.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build integration package test import "github.com/livekit/egress/pkg/types" const ( runRoom = 0b1 << 0 runWeb = 0b1 << 1 runParticipant = 0b1 << 2 runTrackComposite = 0b1 << 3 runTrack = 0b1 << 4 runTemplate = 0b1 << 5 runMedia = 0b1 << 6 runAllRequests = 0b1111111 runFile = 0b1 << 31 runStream = 0b1 << 30 runSegments = 0b1 << 29 runImages = 0b1 << 28 runMulti = 0b1 << 27 runEdge = 0b1 << 26 runAllOutputs = 0b111111 << 26 ) var runRequestType = map[types.RequestType]uint{ types.RequestTypeRoomComposite: runRoom, types.RequestTypeWeb: runWeb, types.RequestTypeParticipant: runParticipant, types.RequestTypeTrackComposite: runTrackComposite, types.RequestTypeTrack: runTrack, types.RequestTypeTemplate: runTemplate, types.RequestTypeMedia: runMedia, } func (r *Runner) updateFlagset() { switch { case r.RoomTestsOnly: r.shouldRun |= runRoom case r.ParticipantTestsOnly: r.shouldRun |= runParticipant case r.WebTestsOnly: r.shouldRun |= runWeb case r.TrackCompositeTestsOnly: r.shouldRun |= runTrackComposite case r.TrackTestsOnly: r.shouldRun |= runTrack case r.TemplateTestsOnly: r.shouldRun |= runTemplate case r.MediaTestsOnly: r.shouldRun |= runMedia default: r.shouldRun |= runAllRequests } switch { case r.FileTestsOnly: r.shouldRun |= runFile case r.StreamTestsOnly: r.shouldRun |= runStream case r.SegmentTestsOnly: r.shouldRun |= runSegments case r.ImageTestsOnly: r.shouldRun |= runImages case r.MultiTestsOnly: r.shouldRun |= runMulti case r.EdgeCasesOnly: r.shouldRun |= runEdge default: r.shouldRun |= runAllOutputs } } func (r *Runner) should(runFlag uint) bool { return r.shouldRun&runFlag > 0 } ================================================ FILE: test/images.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build integration package test import ( "fmt" "path" "testing" "time" "github.com/stretchr/testify/require" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/types" "github.com/livekit/protocol/livekit" ) func (r *Runner) testImages(t *testing.T) { if !r.should(runImages) { return } t.Run("Images", func(t *testing.T) { for _, test := range []*testCase{ // ---- Room Composite ----- { name: "RoomComposite", requestType: types.RequestTypeRoomComposite, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, videoCodec: types.MimeTypeH264, layout: "speaker", }, encodingOptions: &livekit.EncodingOptions{ Width: 640, Height: 360, }, imageOptions: &imageOptions{ prefix: "r_{room_name}_{time}", suffix: livekit.ImageFileSuffix_IMAGE_SUFFIX_TIMESTAMP, }, }, // ---- Track Composite ---- { name: "TrackComposite/H264", requestType: types.RequestTypeTrackComposite, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, videoCodec: types.MimeTypeH264, }, imageOptions: &imageOptions{ prefix: "tc_{publisher_identity}_h264", }, }, // -------- Media ---------- { name: "Media", requestType: types.RequestTypeMedia, publishOptions: publishOptions{ videoCodec: types.MimeTypeH264, videoOnly: true, }, imageOptions: &imageOptions{ prefix: "media_{room_name}_{time}", suffix: livekit.ImageFileSuffix_IMAGE_SUFFIX_TIMESTAMP, }, }, } { if !r.run(t, test, r.runImagesTest) { return } } }) } func (r *Runner) runImagesTest(t *testing.T, test *testCase) { req := r.buildRequest(test) egressID := r.startEgress(t, req) time.Sleep(time.Second * 10) if r.Dotfiles { r.createDotFile(t, egressID) } // stop time.Sleep(time.Second * 15) res := r.stopEgress(t, egressID) // get params p, err := config.GetValidatedPipelineConfig(r.ServiceConfig, req) require.NoError(t, err) r.verifyImages(t, p, res) } func (r *Runner) verifyImages(t *testing.T, p *config.PipelineConfig, res *livekit.EgressInfo) { // egress info require.Equal(t, res.Error == "", res.Status != livekit.EgressStatus_EGRESS_FAILED) require.NotZero(t, res.StartedAt) require.NotZero(t, res.EndedAt) // image info require.Len(t, res.GetImageResults(), 1) images := res.GetImageResults()[0] require.Greater(t, images.ImageCount, int64(0)) imageConfig := p.GetImageConfigs()[0] for i := range images.ImageCount { storagePath := fmt.Sprintf("%s_%05d%s", images.FilenamePrefix, i, imageConfig.ImageExtension) localPath := path.Join(r.FilePrefix, path.Base(storagePath)) download(t, imageConfig.StorageConfig, localPath, storagePath, true) } } ================================================ FILE: test/integration.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build integration package test import ( "context" "encoding/json" "fmt" "os" "strings" "testing" "time" "github.com/stretchr/testify/require" "github.com/livekit/egress/pkg/types" "github.com/livekit/protocol/livekit" "github.com/livekit/protocol/logger" "github.com/livekit/protocol/rpc" ) var uploadPrefix = fmt.Sprintf("integration/%s", time.Now().Format("2006-01-02")) func (r *Runner) RunTests(t *testing.T) { // run tests r.testFile(t) r.testStream(t) r.testSegments(t) r.testImages(t) r.testMulti(t) r.testEdgeCases(t) } func (r *Runner) run(t *testing.T, test *testCase, f func(*testing.T, *testCase)) bool { if !r.should(runRequestType[test.requestType]) { return true } switch test.requestType { case types.RequestTypeRoomComposite, types.RequestTypeWeb: r.sourceFramerate = 30 case types.RequestTypeParticipant, types.RequestTypeTrackComposite, types.RequestTypeTrack, types.RequestTypeMedia: r.sourceFramerate = 23.97 case types.RequestTypeTemplate: if test.audioOnly && test.layout == "" && test.templateCustomBaseUrl == "" { r.sourceFramerate = 23.97 } else { r.sourceFramerate = 30 } } r.awaitIdle(t) r.ensureRoomForTest(t, test) r.testNumber++ t.Run(fmt.Sprintf("%d/%s", r.testNumber, test.name), func(t *testing.T) { audioMuting := r.Muting videoMuting := r.Muting && test.audioCodec == "" test.audioTrackID = r.publishSample(t, test.audioCodec, test.audioDelay, test.audioUnpublish, audioMuting) if test.audioRepublish != 0 { r.publishSample(t, test.audioCodec, test.audioRepublish, 0, audioMuting) } test.videoTrackID = r.publishSample(t, test.videoCodec, test.videoDelay, test.videoUnpublish, videoMuting) if test.videoRepublish != 0 { r.publishSample(t, test.videoCodec, test.videoRepublish, 0, videoMuting) } logger.Infow("test publish summary", "test", test.name, "room", r.RoomName, "audioCodec", test.audioCodec, "audioTrackID", test.audioTrackID, "videoCodec", test.videoCodec, "videoTrackID", test.videoTrackID, ) f(t, test) }) return !r.Short } func (r *Runner) ensureRoomForTest(t *testing.T, test *testCase) { desiredRoom := r.RoomBaseName var codecs []livekit.Codec switch test.audioCodec { case types.MimeTypePCMU: desiredRoom = fmt.Sprintf("%s-pcmu", r.RoomBaseName) codecs = []livekit.Codec{{ Mime: string(types.MimeTypePCMU), }} case types.MimeTypePCMA: desiredRoom = fmt.Sprintf("%s-pcma", r.RoomBaseName) codecs = []livekit.Codec{{ Mime: string(types.MimeTypePCMA), }} } if desiredRoom == "" || desiredRoom == r.RoomName { return } r.connectRoom(t, desiredRoom, codecs) } func (r *Runner) awaitIdle(t *testing.T) { r.svc.KillAll() for i := 0; i < 30; i++ { if r.svc.IsIdle() && len(r.room.LocalParticipant.TrackPublications()) == 0 { return } time.Sleep(time.Second) } if !r.svc.IsIdle() { t.Fatal("service not idle after 30s") } else if len(r.room.LocalParticipant.TrackPublications()) != 0 { t.Fatal("room still has tracks after 30s") } } func (r *Runner) startEgress(t *testing.T, req *rpc.StartEgressRequest) string { info := r.sendRequest(t, req) // check status if r.HealthPort != 0 { status := r.getStatus(t) require.Contains(t, status, info.EgressId) } // wait time.Sleep(time.Second * 5) // check active update r.checkUpdate(t, info.EgressId, livekit.EgressStatus_EGRESS_ACTIVE) return info.EgressId } func (r *Runner) sendRequest(t *testing.T, req *rpc.StartEgressRequest) *livekit.EgressInfo { // send start request info, err := r.StartEgress(context.Background(), req) // check returned egress info require.NoError(t, err) require.Empty(t, info.Error) require.NotEmpty(t, info.EgressId) switch req.Request.(type) { case *rpc.StartEgressRequest_Web: require.Empty(t, info.RoomName) case *rpc.StartEgressRequest_Replay: replayReq := req.Request.(*rpc.StartEgressRequest_Replay).Replay if _, ok := replayReq.Source.(*livekit.ExportReplayRequest_Web); ok { require.Empty(t, info.RoomName) } default: require.Equal(t, r.RoomName, info.RoomName) } require.Equal(t, livekit.EgressStatus_EGRESS_STARTING.String(), info.Status.String()) return info } func (r *Runner) checkUpdate(t *testing.T, egressID string, status livekit.EgressStatus) *livekit.EgressInfo { info := r.getUpdate(t, egressID) require.Equal(t, status.String(), info.Status.String(), info.Error) require.Equal(t, info.Status == livekit.EgressStatus_EGRESS_FAILED, info.Error != "") return info } func (r *Runner) checkStreamUpdate(t *testing.T, egressID string, expected map[string]livekit.StreamInfo_Status) { for { info := r.getUpdate(t, egressID) if len(expected) != len(info.StreamResults) { continue } require.Equal(t, len(expected), len(info.StreamResults)) checkNext := false for _, s := range info.StreamResults { require.Equal(t, s.Status == livekit.StreamInfo_FAILED, s.Error != "") if expected[s.Url] > s.Status { logger.Debugw(fmt.Sprintf("stream status %s, expecting %s", s.Status.String(), expected[s.Url].String())) checkNext = true continue } require.Equal(t, expected[s.Url], s.Status) } if !checkNext { return } } } func (r *Runner) getUpdate(t *testing.T, egressID string) *livekit.EgressInfo { for { select { case info := <-r.updates: if info.EgressId == egressID { return info } case <-time.After(time.Second * 30): r.createDotFile(t, egressID) t.Fatal("no update from results channel") return nil } } } func (r *Runner) getStatus(t *testing.T) map[string]interface{} { b, err := r.svc.Status() require.NoError(t, err) status := make(map[string]interface{}) err = json.Unmarshal(b, &status) require.NoError(t, err) return status } func (r *Runner) createDotFile(t *testing.T, egressID string) { dot, err := r.svc.GetGstPipelineDotFile(egressID) require.NoError(t, err) filename := strings.ReplaceAll(t.Name()[11:], "/", "_") filepath := fmt.Sprintf("%s/%s.dot", r.FilePrefix, filename) f, err := os.Create(filepath) require.NoError(t, err) defer f.Close() _, err = f.WriteString(dot) require.NoError(t, err) } func (r *Runner) stopEgress(t *testing.T, egressID string) *livekit.EgressInfo { // send stop request info, err := r.client.StopEgress(context.Background(), egressID, &livekit.StopEgressRequest{ EgressId: egressID, }) // check returned egress info require.NoError(t, err) require.Empty(t, info.Error) require.NotEmpty(t, info.StartedAt) require.Equal(t, livekit.EgressStatus_EGRESS_ENDING.String(), info.Status.String()) // check ending update r.checkUpdate(t, egressID, livekit.EgressStatus_EGRESS_ENDING) // get final info res := r.checkUpdate(t, egressID, livekit.EgressStatus_EGRESS_COMPLETE) // check status if r.HealthPort != 0 { status := r.getStatus(t) require.Len(t, status, 1) } return res } ================================================ FILE: test/integration_test.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build integration package test import ( "embed" "io/fs" "testing" "github.com/stretchr/testify/require" "github.com/livekit/egress/pkg/info" "github.com/livekit/egress/pkg/server" "github.com/livekit/protocol/redis" "github.com/livekit/psrpc" ) var ( //go:embed templates templateEmbedFs embed.FS ) func TestEgress(t *testing.T) { r := NewRunner(t) rfs, err := fs.Sub(templateEmbedFs, "templates") require.NoError(t, err) // rpc client and server rc, err := redis.GetRedisClient(r.Redis) require.NoError(t, err) bus := psrpc.NewRedisMessageBus(rc) ioClient, err := info.NewSessionReporter(&r.BaseConfig, bus) require.NoError(t, err) svc, err := server.NewServer(r.ServiceConfig, bus, ioClient) require.NoError(t, err) r.StartServer(t, svc, bus, rfs) r.RunTests(t) } ================================================ FILE: test/ioserver.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build integration package test import ( "context" "google.golang.org/protobuf/types/known/emptypb" "github.com/livekit/protocol/livekit" "github.com/livekit/protocol/logger" "github.com/livekit/protocol/rpc" "github.com/livekit/psrpc" ) type ioTestServer struct { rpc.IOInfoServerImpl server rpc.IOInfoServer updates chan *livekit.EgressInfo } func newIOTestServer(bus psrpc.MessageBus, updates chan *livekit.EgressInfo) (*ioTestServer, error) { s := &ioTestServer{ updates: updates, } server, err := rpc.NewIOInfoServer(s, bus) if err != nil { return nil, err } s.server = server return s, nil } func (s *ioTestServer) CreateEgress(_ context.Context, info *livekit.EgressInfo) (*emptypb.Empty, error) { logger.Infow("egress created", "egressID", info.EgressId) return &emptypb.Empty{}, nil } func (s *ioTestServer) UpdateEgress(_ context.Context, info *livekit.EgressInfo) (*emptypb.Empty, error) { logger.Infow("egress updated", "egressID", info.EgressId, "status", info.Status) s.updates <- info return &emptypb.Empty{}, nil } func (s *ioTestServer) UpdateMetrics(_ context.Context, _ *rpc.UpdateMetricsRequest) (*emptypb.Empty, error) { return &emptypb.Empty{}, nil } ================================================ FILE: test/multi.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build integration package test import ( "context" "testing" "time" "github.com/stretchr/testify/require" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/types" "github.com/livekit/protocol/livekit" ) func (r *Runner) testMulti(t *testing.T) { if !r.should(runMulti) { return } t.Run("Multi", func(t *testing.T) { for _, test := range []*testCase{ // ---- Room Composite ----- { name: "RoomComposite", requestType: types.RequestTypeRoomComposite, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, videoCodec: types.MimeTypeVP8, }, fileOptions: &fileOptions{ filename: "rc_multiple_{time}", }, imageOptions: &imageOptions{ prefix: "rc_image", }, multi: true, }, // ---------- Web ---------- { name: "Web", requestType: types.RequestTypeWeb, fileOptions: &fileOptions{ filename: "web_multiple_{time}", }, segmentOptions: &segmentOptions{ prefix: "web_multiple_{time}", playlist: "web_multiple_{time}.m3u8", }, multi: true, }, // ------ Participant ------ { name: "ParticipantComposite", requestType: types.RequestTypeParticipant, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, audioUnpublish: time.Second * 20, videoCodec: types.MimeTypeVP8, videoDelay: time.Second * 5, }, fileOptions: &fileOptions{ filename: "participant_{publisher_identity}_multi_{time}.mp4", }, streamOptions: &streamOptions{ outputType: types.OutputTypeRTMP, }, segmentOptions: &segmentOptions{ prefix: "participant_{publisher_identity}_multi_{time}", playlist: "participant_{publisher_identity}_multi_{time}.m3u8", }, multi: true, }, // ---- Track Composite ---- { name: "TrackComposite", requestType: types.RequestTypeTrackComposite, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, videoCodec: types.MimeTypeVP8, }, streamOptions: &streamOptions{ outputType: types.OutputTypeRTMP, }, segmentOptions: &segmentOptions{ prefix: "tc_multiple_{time}", playlist: "tc_multiple_{time}.m3u8", }, multi: true, }, } { if !r.run(t, test, r.runMultiTest) { return } } }) } func (r *Runner) runMultiTest(t *testing.T, test *testCase) { req := r.build(test) egressID := r.startEgress(t, req) time.Sleep(time.Second * 10) // get params p, err := config.GetValidatedPipelineConfig(r.ServiceConfig, req) require.NoError(t, err) if test.streamOptions != nil { _, err = r.client.UpdateStream(context.Background(), egressID, &livekit.UpdateStreamRequest{ EgressId: egressID, AddOutputUrls: []string{rtmpUrl3}, }) require.NoError(t, err) time.Sleep(time.Second * 10) r.verifyStreams(t, nil, p, rtmpUrl3) r.checkStreamUpdate(t, egressID, map[string]livekit.StreamInfo_Status{ rtmpUrl3Redacted: livekit.StreamInfo_ACTIVE, }) time.Sleep(time.Second * 10) } else { time.Sleep(time.Second * 20) } res := r.stopEgress(t, egressID) if test.fileOptions != nil { r.verifyFile(t, test, p, res) } if test.segmentOptions != nil { require.Len(t, res.GetSegmentResults(), 1) segments := res.GetSegmentResults()[0] require.Greater(t, segments.Size, int64(0)) require.NotContains(t, segments.PlaylistName, "{") require.NotContains(t, segments.PlaylistLocation, "{") if segments.LivePlaylistName != "" { require.NotContains(t, segments.LivePlaylistName, "{") } if segments.LivePlaylistLocation != "" { require.NotContains(t, segments.LivePlaylistLocation, "{") } r.verifySegments(t, test, p, test.segmentOptions.suffix, res, false) } if test.imageOptions != nil { r.verifyImages(t, p, res) } } ================================================ FILE: test/publish.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build integration package test import ( "testing" "time" "github.com/stretchr/testify/require" "github.com/livekit/egress/pkg/types" lksdk "github.com/livekit/server-sdk-go/v2" ) var ( samples = map[types.MimeType]string{ types.MimeTypeOpus: "/media-samples/avsync_minmotion_livekit_audio_48k_120s.ogg", types.MimeTypeH264: "/media-samples/avsync_minmotion_livekit_video_1080p25_120s.h264", types.MimeTypeVP8: "/media-samples/avsync_minmotion_livekit_1080p24_vp8.ivf", types.MimeTypeVP9: "/media-samples/avsync_minmotion_livekit_1080p24_vp9.ivf", types.MimeTypePCMU: "/media-samples/avsync_minmotion_livekit_audio_8k_120s_pcmu.wav", types.MimeTypePCMA: "/media-samples/avsync_minmotion_livekit_audio_8k_120s_pcma.wav", } frameDurations = map[types.MimeType]time.Duration{ types.MimeTypeH264: time.Microsecond * 41667, types.MimeTypeVP8: time.Microsecond * 41667, types.MimeTypeVP9: time.Microsecond * 41667, types.MimeTypePCMU: time.Millisecond * 20, types.MimeTypePCMA: time.Millisecond * 20, } ) func (r *Runner) publishSample(t *testing.T, codec types.MimeType, publishAfter, unpublishAfter time.Duration, withMuting bool) string { if codec == "" { return "" } trackID := make(chan string, 1) time.AfterFunc(publishAfter, func() { done := make(chan struct{}) unpublished := make(chan struct{}) pub := r.publish(t, r.room.LocalParticipant, codec, done) trackID <- pub.SID() if withMuting { go func() { muted := false time.Sleep(time.Second * 15) for { select { case <-unpublished: return case <-done: return default: pub.SetMuted(!muted) muted = !muted time.Sleep(time.Second * 10) } } }() } if unpublishAfter != 0 { time.AfterFunc(unpublishAfter-publishAfter, func() { select { case <-done: return default: close(unpublished) _ = r.room.LocalParticipant.UnpublishTrack(pub.SID()) } }) } }) if publishAfter == 0 { return <-trackID } return "TBD" } func (r *Runner) publishSampleWithDisconnection(t *testing.T, codec types.MimeType) string { pub := r.publish(t, r.room.LocalParticipant, codec, make(chan struct{})) trackID := pub.SID() time.AfterFunc(time.Second*10, func() { pub.SimulateDisconnection(time.Second * 10) }) return trackID } func (r *Runner) publish(t *testing.T, p *lksdk.LocalParticipant, codec types.MimeType, done chan struct{}) *lksdk.LocalTrackPublication { filename := samples[codec] frameDuration := frameDurations[codec] var pub *lksdk.LocalTrackPublication opts := []lksdk.ReaderSampleProviderOption{ lksdk.ReaderTrackWithOnWriteComplete(func() { close(done) if pub != nil { _ = p.UnpublishTrack(pub.SID()) } }), } if frameDuration != 0 { opts = append(opts, lksdk.ReaderTrackWithFrameDuration(frameDuration)) } track, err := lksdk.NewLocalFileTrack(filename, opts...) require.NoError(t, err) pub, err = p.PublishTrack(track, &lksdk.TrackPublicationOptions{Name: filename}) require.NoError(t, err) trackID := pub.SID() t.Cleanup(func() { _ = p.UnpublishTrack(trackID) }) return pub } ================================================ FILE: test/runner.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build integration package test import ( "context" "encoding/json" "fmt" "io/fs" "math/rand" "os" "testing" "time" "github.com/stretchr/testify/require" "gopkg.in/yaml.v3" "github.com/livekit/protocol/livekit" "github.com/livekit/protocol/logger" "github.com/livekit/protocol/rpc" "github.com/livekit/psrpc" lksdk "github.com/livekit/server-sdk-go/v2" "github.com/livekit/egress/pkg/config" ) type Runner struct { StartEgress func(ctx context.Context, request *rpc.StartEgressRequest) (*livekit.EgressInfo, error) `yaml:"-"` svc Server `yaml:"-"` client rpc.EgressClient `yaml:"-"` room *lksdk.Room `yaml:"-"` updates chan *livekit.EgressInfo `yaml:"-"` sourceFramerate float64 `yaml:"-"` testNumber int `yaml:"-"` // service config *config.ServiceConfig `yaml:",inline"` S3Upload *livekit.S3Upload `yaml:"-"` GCPUpload *livekit.GCPUpload `yaml:"-"` AzureUpload *livekit.AzureBlobUpload `yaml:"-"` // testing config FilePrefix string `yaml:"file_prefix"` RoomName string `yaml:"room_name"` RoomBaseName string `yaml:"-"` Muting bool `yaml:"muting"` Dotfiles bool `yaml:"dot_files"` Short bool `yaml:"short"` // flagset used to determine which tests to run shouldRun uint `yaml:"-"` RoomTestsOnly bool `yaml:"room_only"` WebTestsOnly bool `yaml:"web_only"` ParticipantTestsOnly bool `yaml:"participant_only"` TrackCompositeTestsOnly bool `yaml:"track_composite_only"` TrackTestsOnly bool `yaml:"track_only"` TemplateTestsOnly bool `yaml:"template_only"` MediaTestsOnly bool `yaml:"media_only"` EdgeCasesOnly bool `yaml:"edge_cases_only"` FileTestsOnly bool `yaml:"file_only"` StreamTestsOnly bool `yaml:"stream_only"` SegmentTestsOnly bool `yaml:"segments_only"` ImageTestsOnly bool `yaml:"images_only"` MultiTestsOnly bool `yaml:"multi_only"` } type Server interface { StartTemplatesServer(fs.FS) error Run() error Status() ([]byte, error) GetGstPipelineDotFile(string) (string, error) IsIdle() bool KillAll() Shutdown(bool, bool) Drain() } func NewRunner(t *testing.T) *Runner { confString := os.Getenv("EGRESS_CONFIG_STRING") if confString == "" { confFile := os.Getenv("EGRESS_CONFIG_FILE") require.NotEmpty(t, confFile) b, err := os.ReadFile(confFile) require.NoError(t, err) confString = string(b) } r := &Runner{} err := yaml.Unmarshal([]byte(confString), r) require.NoError(t, err) switch os.Getenv("INTEGRATION_TYPE") { case "room": r.RoomTestsOnly = true r.RoomName = fmt.Sprintf("room-integration-%d", rand.Intn(100)) case "web": r.WebTestsOnly = true r.RoomName = fmt.Sprintf("web-integration-%d", rand.Intn(100)) case "participant": r.ParticipantTestsOnly = true r.RoomName = fmt.Sprintf("participant-integration-%d", rand.Intn(100)) case "track_composite": r.TrackCompositeTestsOnly = true r.RoomName = fmt.Sprintf("track-composite-integration-%d", rand.Intn(100)) case "track": r.TrackTestsOnly = true r.RoomName = fmt.Sprintf("track-integration-%d", rand.Intn(100)) case "template": r.TemplateTestsOnly = true r.RoomName = fmt.Sprintf("template-integration-%d", rand.Intn(100)) case "media": r.MediaTestsOnly = true r.RoomName = fmt.Sprintf("media-integration-%d", rand.Intn(100)) case "file-room": r.shouldRun = runFile | runRoom | runWeb | runTemplate r.RoomName = fmt.Sprintf("file-room-integration-%d", rand.Intn(100)) case "file-track": r.shouldRun = runFile | runTrackComposite | runTrack r.RoomName = fmt.Sprintf("file-track-integration-%d", rand.Intn(100)) case "file-media": r.shouldRun = runFile | runMedia | runParticipant r.RoomName = fmt.Sprintf("file-media-integration-%d", rand.Intn(100)) case "file": r.FileTestsOnly = true r.RoomName = fmt.Sprintf("file-integration-%d", rand.Intn(100)) case "stream": r.StreamTestsOnly = true r.RoomName = fmt.Sprintf("stream-integration-%d", rand.Intn(100)) case "segments": r.SegmentTestsOnly = true r.RoomName = fmt.Sprintf("segments-integration-%d", rand.Intn(100)) case "images": r.ImageTestsOnly = true r.RoomName = fmt.Sprintf("images-integration-%d", rand.Intn(100)) case "multi": r.MultiTestsOnly = true r.RoomName = fmt.Sprintf("multi-integration-%d", rand.Intn(100)) case "edge": r.EdgeCasesOnly = true r.RoomName = fmt.Sprintf("edge-integration-%d", rand.Intn(100)) default: if r.RoomName == "" { r.RoomName = fmt.Sprintf("egress-integration-%d", rand.Intn(100)) } } conf, err := config.NewServiceConfig(confString) require.NoError(t, err) r.ServiceConfig = conf if conf.ApiKey == "" || conf.ApiSecret == "" || conf.WsUrl == "" { t.Fatal("api key, secret, and ws url required") } if conf.Redis == nil { t.Fatal("redis required") } if s3 := os.Getenv("S3_UPLOAD"); s3 != "" { logger.Infow("using s3 uploads") r.S3Upload = &livekit.S3Upload{} require.NoError(t, json.Unmarshal([]byte(s3), r.S3Upload)) } else { logger.Infow("no s3 config supplied") } if gcp := os.Getenv("GCP_UPLOAD"); gcp != "" { logger.Infow("using gcp uploads") r.GCPUpload = &livekit.GCPUpload{} require.NoError(t, json.Unmarshal([]byte(gcp), r.GCPUpload)) } else { logger.Infow("no gcp config supplied") } if azure := os.Getenv("AZURE_UPLOAD"); azure != "" { logger.Infow("using azure uploads") r.AzureUpload = &livekit.AzureBlobUpload{} require.NoError(t, json.Unmarshal([]byte(azure), r.AzureUpload)) } else { logger.Infow("no azure config supplied") } if r.RoomBaseName == "" { r.RoomBaseName = r.RoomName } if r.shouldRun == 0 { r.updateFlagset() } return r } func (r *Runner) connectRoom(t *testing.T, roomName string, codecs []livekit.Codec) { if r.room != nil { r.room.Disconnect() } opts := []lksdk.ConnectOption{} if len(codecs) > 0 { opts = append(opts, lksdk.WithCodecs(codecs)) } room, err := lksdk.ConnectToRoom(r.WsUrl, lksdk.ConnectInfo{ APIKey: r.ApiKey, APISecret: r.ApiSecret, RoomName: roomName, ParticipantName: "egress-sample", ParticipantIdentity: fmt.Sprintf("sample-%d", rand.Intn(100)), }, lksdk.NewRoomCallback(), opts...) require.NoError(t, err) r.room = room r.RoomName = roomName } func (r *Runner) StartServer(t *testing.T, svc Server, bus psrpc.MessageBus, templateFs fs.FS) { r.svc = svc t.Cleanup(func() { if r.room != nil { r.room.Disconnect() } r.svc.Shutdown(false, true) }) r.connectRoom(t, r.RoomName, nil) psrpcClient, err := rpc.NewEgressClient(rpc.ClientParams{Bus: bus}) require.NoError(t, err) r.StartEgress = func(ctx context.Context, req *rpc.StartEgressRequest) (*livekit.EgressInfo, error) { return psrpcClient.StartEgress(ctx, "", req) } // start templates handler err = r.svc.StartTemplatesServer(templateFs) require.NoError(t, err) go r.svc.Run() time.Sleep(time.Second * 3) // subscribe to update channel psrpcUpdates := make(chan *livekit.EgressInfo, 100) _, err = newIOTestServer(bus, psrpcUpdates) require.NoError(t, err) // update test config r.client = psrpcClient r.updates = psrpcUpdates // check status if r.HealthPort != 0 { status := r.getStatus(t) require.Len(t, status, 1) require.Contains(t, status, "CpuLoad") } } ================================================ FILE: test/segments.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build integration package test import ( "os" "path" "strconv" "strings" "testing" "time" "github.com/stretchr/testify/require" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/pipeline/sink/m3u8" "github.com/livekit/egress/pkg/types" "github.com/livekit/protocol/livekit" ) func (r *Runner) testSegments(t *testing.T) { if !r.should(runSegments) { return } t.Run("Segments", func(t *testing.T) { for _, test := range []*testCase{ // ---- Room Composite ----- { name: "RoomComposite", requestType: types.RequestTypeRoomComposite, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, videoCodec: types.MimeTypeVP8, layout: "speaker", }, encodingOptions: &livekit.EncodingOptions{ AudioCodec: livekit.AudioCodec_AAC, VideoCodec: livekit.VideoCodec_H264_BASELINE, Width: 1920, Height: 1080, VideoBitrate: 4500, }, segmentOptions: &segmentOptions{ prefix: "r_{room_name}_{time}", playlist: "r_{room_name}_{time}.m3u8", livePlaylist: "r_live_{room_name}_{time}.m3u8", suffix: livekit.SegmentedFileSuffix_INDEX, }, contentCheck: r.fullContentCheck, }, { name: "RoomComposite/AudioOnly", requestType: types.RequestTypeRoomComposite, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, audioOnly: true, }, encodingOptions: &livekit.EncodingOptions{ AudioCodec: livekit.AudioCodec_AAC, }, segmentOptions: &segmentOptions{ prefix: "r_{room_name}_audio_{time}", playlist: "r_{room_name}_audio_{time}.m3u8", suffix: livekit.SegmentedFileSuffix_TIMESTAMP, }, contentCheck: r.audioOnlyContentCheck, }, // ---------- Web ---------- { name: "Web", requestType: types.RequestTypeWeb, segmentOptions: &segmentOptions{ prefix: "web_{time}", playlist: "web_{time}.m3u8", }, }, // ------ Participant ------ { name: "ParticipantComposite/VP8", requestType: types.RequestTypeParticipant, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, videoCodec: types.MimeTypeVP8, // videoDelay: time.Second * 10, // videoUnpublish: time.Second * 20, }, segmentOptions: &segmentOptions{ prefix: "participant_{publisher_identity}_vp8_{time}", playlist: "participant_{publisher_identity}_vp8_{time}.m3u8", }, contentCheck: r.fullContentCheck, }, { name: "ParticipantComposite/H264", requestType: types.RequestTypeParticipant, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, audioDelay: time.Second * 10, audioUnpublish: time.Second * 20, videoCodec: types.MimeTypeH264, }, segmentOptions: &segmentOptions{ prefix: "participant_{room_name}_h264_{time}", playlist: "participant_{room_name}_h264_{time}.m3u8", }, }, // ---- Track Composite ---- { name: "TrackComposite/H264", requestType: types.RequestTypeTrackComposite, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, videoCodec: types.MimeTypeH264, }, segmentOptions: &segmentOptions{ prefix: "tcs_{room_name}_h264_{time}", playlist: "tcs_{room_name}_h264_{time}.m3u8", livePlaylist: "tcs_live_{room_name}_h264_{time}.m3u8", }, contentCheck: r.fullContentCheck, }, { name: "TrackComposite/AudioOnly", requestType: types.RequestTypeTrackComposite, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, audioOnly: true, }, segmentOptions: &segmentOptions{ prefix: "tcs_{room_name}_audio_{time}", playlist: "tcs_{room_name}_audio_{time}.m3u8", }, contentCheck: r.audioOnlyContentCheck, }, // --------- Web V2 -------- { name: "WebV2", requestType: types.RequestTypeWeb, segmentOptions: &segmentOptions{ prefix: "webv2_{time}", playlist: "webv2_{time}.m3u8", }, v2OutputOptions: &v2OutputOptions{}, }, } { if !r.run(t, test, r.runSegmentsTest) { return } } }) } func (r *Runner) runSegmentsTest(t *testing.T, test *testCase) { req := r.buildRequest(test) egressID := r.startEgress(t, req) time.Sleep(time.Second * 10) if r.Dotfiles { r.createDotFile(t, egressID) } // stop time.Sleep(time.Second * 15) res := r.stopEgress(t, egressID) // get params p, err := config.GetValidatedPipelineConfig(r.ServiceConfig, req) require.NoError(t, err) require.Equal(t, !test.audioOnly, p.VideoEncoding) r.verifySegments(t, test, p, test.segmentOptions.suffix, res, test.livePlaylist != "") } func (r *Runner) verifySegments( t *testing.T, tc *testCase, p *config.PipelineConfig, filenameSuffix livekit.SegmentedFileSuffix, res *livekit.EgressInfo, enableLivePlaylist bool, ) { // egress info require.Equal(t, res.Error == "", res.Status != livekit.EgressStatus_EGRESS_FAILED) require.NotZero(t, res.StartedAt) require.NotZero(t, res.EndedAt) // segments info require.Len(t, res.GetSegmentResults(), 1) segments := res.GetSegmentResults()[0] require.Greater(t, segments.Size, int64(0)) require.Greater(t, segments.Duration, int64(0)) r.verifySegmentOutput(t, tc, p, filenameSuffix, segmentPlaylist{ name: segments.PlaylistName, location: segments.PlaylistLocation, segmentCount: int(segments.SegmentCount), playlistType: m3u8.PlaylistTypeEvent, }, res) if enableLivePlaylist { r.verifySegmentOutput(t, tc, p, filenameSuffix, segmentPlaylist{ name: segments.LivePlaylistName, location: segments.LivePlaylistLocation, segmentCount: 5, playlistType: m3u8.PlaylistTypeLive, }, res) } } type segmentPlaylist struct { name string location string segmentCount int playlistType m3u8.PlaylistType } func (r *Runner) verifySegmentOutput( t *testing.T, tc *testCase, p *config.PipelineConfig, filenameSuffix livekit.SegmentedFileSuffix, pl segmentPlaylist, res *livekit.EgressInfo, ) { require.NotEmpty(t, pl.name) require.NotEmpty(t, pl.location) storedPlaylistPath := pl.name // download from cloud storage localPlaylistPath := path.Join(r.FilePrefix, path.Base(storedPlaylistPath)) download(t, p.GetSegmentConfig().StorageConfig, localPlaylistPath, storedPlaylistPath, false) if pl.playlistType == m3u8.PlaylistTypeEvent { manifestLocal := path.Join(path.Dir(localPlaylistPath), res.EgressId+".json") manifestStorage := path.Join(path.Dir(storedPlaylistPath), res.EgressId+".json") manifest := loadManifest(t, p.GetSegmentConfig().StorageConfig, manifestLocal, manifestStorage) for _, playlist := range manifest.Playlists { require.Equal(t, pl.segmentCount, len(playlist.Segments)) for _, segment := range playlist.Segments { localPath := path.Join(r.FilePrefix, path.Base(segment.Filename)) download(t, p.GetSegmentConfig().StorageConfig, localPath, segment.Filename, false) } } } verifyPlaylistProgramDateTime(t, filenameSuffix, localPlaylistPath, pl.playlistType) // verify info := verify(t, localPlaylistPath, p, res, types.EgressTypeSegments, r.Muting, r.sourceFramerate, pl.playlistType == m3u8.PlaylistTypeLive) if tc.contentCheck != nil && info != nil { tc.contentCheck(t, localPlaylistPath, info) } } func verifyPlaylistProgramDateTime(t *testing.T, filenameSuffix livekit.SegmentedFileSuffix, localPlaylistPath string, plType m3u8.PlaylistType) { p, err := readPlaylist(localPlaylistPath) require.NoError(t, err) require.Equal(t, string(plType), p.MediaType) require.True(t, p.Closed) now := time.Now() for i, s := range p.Segments { const leeway = 50 * time.Millisecond // Make sure the program date time is current, ie not more than 2 min in the past require.InDelta(t, now.Unix(), s.ProgramDateTime.Unix(), 120) if filenameSuffix == livekit.SegmentedFileSuffix_TIMESTAMP { m := segmentTimeRegexp.FindStringSubmatch(s.Filename) require.Equal(t, 3, len(m)) tm, err := time.Parse("20060102150405", m[1]) require.NoError(t, err) ms, err := strconv.Atoi(m[2]) require.NoError(t, err) tm = tm.Add(time.Duration(ms) * time.Millisecond) require.InDelta(t, s.ProgramDateTime.UnixNano(), tm.UnixNano(), float64(time.Millisecond)) } if i < len(p.Segments)-2 { nextSegmentStartDate := p.Segments[i+1].ProgramDateTime dateDuration := nextSegmentStartDate.Sub(s.ProgramDateTime) require.InDelta(t, time.Duration(s.Duration*float64(time.Second)), dateDuration, float64(leeway)) } } } type Playlist struct { Version int MediaType string TargetDuration int Segments []*Segment Closed bool } type Segment struct { ProgramDateTime time.Time Duration float64 Filename string } func readPlaylist(filename string) (*Playlist, error) { b, err := os.ReadFile(filename) if err != nil { return nil, err } var segmentLineStart = 5 var i = 1 lines := strings.Split(string(b), "\n") version, _ := strconv.Atoi(strings.Split(lines[i], ":")[1]) i++ var mediaType string if strings.Contains(string(b), "#EXT-X-PLAYLIST-TYPE") { mediaType = strings.Split(lines[i], ":")[1] segmentLineStart++ i++ } i++ // #EXT-X-ALLOW-CACHE:NO hardcoded targetDuration, _ := strconv.Atoi(strings.Split(lines[i], ":")[1]) p := &Playlist{ Version: version, MediaType: mediaType, TargetDuration: targetDuration, Segments: make([]*Segment, 0), } for i = segmentLineStart; i < len(lines)-3; i += 3 { startTime, _ := time.Parse("2006-01-02T15:04:05.999Z07:00", strings.SplitN(lines[i], ":", 2)[1]) durStr := strings.Split(lines[i+1], ":")[1] durStr = durStr[:len(durStr)-1] // remove trailing comma duration, _ := strconv.ParseFloat(durStr, 64) p.Segments = append(p.Segments, &Segment{ ProgramDateTime: startTime, Duration: duration, Filename: lines[i+2], }) } if lines[len(lines)-2] == "#EXT-X-ENDLIST" { p.Closed = true } return p, nil } ================================================ FILE: test/stream.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build integration package test import ( "context" "fmt" "net/http" "net/http/httptest" "os" "path" "strings" "testing" "time" "github.com/gorilla/websocket" "github.com/stretchr/testify/require" "github.com/livekit/egress/pkg/config" "github.com/livekit/egress/pkg/types" "github.com/livekit/protocol/livekit" "github.com/livekit/protocol/logger" "github.com/livekit/protocol/utils" ) const ( badRtmpUrl1 = "rtmp://localhost:1936/wrong/stream" badRtmpUrl1Redacted = "rtmp://localhost:1936/wrong/{st...am}" badRtmpUrl2 = "rtmp://localhost:1936/live/stream" badRtmpUrl2Redacted = "rtmp://localhost:1936/live/{st...am}" badSrtUrl1 = "srt://localhost:8891?streamid=publish:wrongport&pkt_size=1316" badSrtUrl2 = "srt://localhost:8891?streamid=publish:badstream&pkt_size=1316" ) var ( streamKey1 = utils.NewGuid("") streamKey2 = utils.NewGuid("") streamKey3 = utils.NewGuid("") streamKey4 = utils.NewGuid("") rtmpUrl1 = fmt.Sprintf("rtmp://localhost:1935/live/%s", streamKey1) rtmpUrl2 = fmt.Sprintf("rtmp://localhost:1935/live/%s", streamKey2) rtmpUrl3 = fmt.Sprintf("rtmp://localhost:1935/live/%s", streamKey3) rtmpUrl4 = fmt.Sprintf("rtmp://localhost:1935/live/%s", streamKey4) rtmpUrl1Redacted, _ = utils.RedactStreamKey(rtmpUrl1) rtmpUrl2Redacted, _ = utils.RedactStreamKey(rtmpUrl2) rtmpUrl3Redacted, _ = utils.RedactStreamKey(rtmpUrl3) rtmpUrl4Redacted, _ = utils.RedactStreamKey(rtmpUrl4) srtPublishUrl1 = fmt.Sprintf("srt://localhost:8890?streamid=publish:%s&pkt_size=1316", streamKey1) srtReadUrl1 = fmt.Sprintf("srt://localhost:8890?streamid=read:%s", streamKey1) srtPublishUrl2 = fmt.Sprintf("srt://localhost:8890?streamid=publish:%s&pkt_size=1316", streamKey2) srtReadUrl2 = fmt.Sprintf("srt://localhost:8890?streamid=read:%s", streamKey2) ) // [[publish, redacted, verification]] var streamUrls = map[types.OutputType][][]string{ types.OutputTypeRTMP: { {rtmpUrl1, rtmpUrl1Redacted, rtmpUrl1}, {badRtmpUrl1, badRtmpUrl1Redacted, ""}, {rtmpUrl2, rtmpUrl2Redacted, rtmpUrl2}, {badRtmpUrl2, badRtmpUrl2Redacted, ""}, }, types.OutputTypeSRT: { {srtPublishUrl1, srtPublishUrl1, srtReadUrl1}, {badSrtUrl1, badSrtUrl1, ""}, {srtPublishUrl2, srtPublishUrl2, srtReadUrl2}, {badSrtUrl2, badSrtUrl2, ""}, }, } func (r *Runner) testStream(t *testing.T) { if !r.should(runStream) { return } t.Run("Stream", func(t *testing.T) { for _, test := range []*testCase{ // ---- Room Composite ----- { name: "RoomComposite", requestType: types.RequestTypeRoomComposite, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, videoCodec: types.MimeTypeVP8, layout: "speaker", }, streamOptions: &streamOptions{ streamUrls: []string{rtmpUrl1, badRtmpUrl1}, outputType: types.OutputTypeRTMP, }, }, { name: "RoomCompositeFixedKeyframeInterval", requestType: types.RequestTypeRoomComposite, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, videoCodec: types.MimeTypeVP8, layout: "speaker", }, streamOptions: &streamOptions{ streamUrls: []string{rtmpUrl1, badRtmpUrl1}, outputType: types.OutputTypeRTMP, }, encodingOptions: &livekit.EncodingOptions{ KeyFrameInterval: 2, }, contentCheck: r.streamKeyframeContentCheck(2), }, // ---------- Web ---------- { name: "Web", requestType: types.RequestTypeWeb, streamOptions: &streamOptions{ streamUrls: []string{srtPublishUrl1, badSrtUrl1}, outputType: types.OutputTypeSRT, }, encodingOptions: &livekit.EncodingOptions{ KeyFrameInterval: 2, }, }, // ------ Participant ------ { name: "ParticipantComposite", requestType: types.RequestTypeParticipant, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, audioDelay: time.Second * 8, videoCodec: types.MimeTypeVP8, }, streamOptions: &streamOptions{ streamUrls: []string{rtmpUrl1, badRtmpUrl1}, outputType: types.OutputTypeRTMP, }, }, // ---- Track Composite ---- { name: "TrackComposite", requestType: types.RequestTypeTrackComposite, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, videoCodec: types.MimeTypeVP8, }, streamOptions: &streamOptions{ streamUrls: []string{rtmpUrl1, badRtmpUrl1}, outputType: types.OutputTypeRTMP, }, }, // --------- Track --------- { name: "Track", requestType: types.RequestTypeTrack, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, audioOnly: true, }, streamOptions: &streamOptions{ rawFileName: fmt.Sprintf("track-ws-%v.raw", time.Now().Unix()), outputType: types.OutputTypeRaw, }, }, // -------- Template -------- { name: "Template", requestType: types.RequestTypeTemplate, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, videoCodec: types.MimeTypeVP8, layout: "speaker", }, streamOptions: &streamOptions{ streamUrls: []string{rtmpUrl1, badRtmpUrl1}, outputType: types.OutputTypeRTMP, }, }, // -------- Media ---------- { name: "Media/ParticipantVideoStream", requestType: types.RequestTypeMedia, publishOptions: publishOptions{ audioCodec: types.MimeTypeOpus, videoCodec: types.MimeTypeVP8, mediaParticipantVideo: &livekit.ParticipantVideo{ Identity: "set-at-runtime", }, audioRoutes: []*livekit.AudioRoute{{ Match: &livekit.AudioRoute_TrackId{TrackId: "set-at-runtime"}, }}, }, streamOptions: &streamOptions{ streamUrls: []string{rtmpUrl1, badRtmpUrl1}, outputType: types.OutputTypeRTMP, }, }, } { if !r.run(t, test, r.runStreamTest) { return } } }) } func (r *Runner) runStreamTest(t *testing.T, test *testCase) { if test.requestType == types.RequestTypeTrack { r.runWebsocketTest(t, test) return } req := r.buildRequest(test) ctx := context.Background() urls := streamUrls[test.streamOptions.outputType] egressID := r.startEgress(t, req) p, err := config.GetValidatedPipelineConfig(r.ServiceConfig, req) require.NoError(t, err) if !test.audioOnly { require.True(t, p.VideoEncoding) } // verify time.Sleep(time.Second * 5) r.verifyStreams(t, test, p, urls[0][2]) r.checkStreamUpdate(t, egressID, map[string]livekit.StreamInfo_Status{ urls[0][1]: livekit.StreamInfo_ACTIVE, urls[1][1]: livekit.StreamInfo_FAILED, }) // add one good stream url and one bad _, err = r.client.UpdateStream(ctx, egressID, &livekit.UpdateStreamRequest{ EgressId: egressID, AddOutputUrls: []string{urls[2][0], urls[3][0]}, }) require.NoError(t, err) time.Sleep(time.Second * 5) // verify r.verifyStreams(t, test, p, urls[0][2], urls[2][2]) r.checkStreamUpdate(t, egressID, map[string]livekit.StreamInfo_Status{ urls[0][1]: livekit.StreamInfo_ACTIVE, urls[1][1]: livekit.StreamInfo_FAILED, urls[2][1]: livekit.StreamInfo_ACTIVE, urls[3][1]: livekit.StreamInfo_FAILED, }) // remove one of the stream urls _, err = r.client.UpdateStream(ctx, egressID, &livekit.UpdateStreamRequest{ EgressId: egressID, RemoveOutputUrls: []string{urls[0][0]}, }) require.NoError(t, err) time.Sleep(time.Second * 5) if r.Dotfiles { r.createDotFile(t, egressID) } // verify the remaining stream r.verifyStreams(t, test, p, urls[2][2]) r.checkStreamUpdate(t, egressID, map[string]livekit.StreamInfo_Status{ urls[0][1]: livekit.StreamInfo_FINISHED, urls[1][1]: livekit.StreamInfo_FAILED, urls[2][1]: livekit.StreamInfo_ACTIVE, urls[3][1]: livekit.StreamInfo_FAILED, }) // stop time.Sleep(time.Second * 5) res := r.stopEgress(t, egressID) // verify egress info require.Empty(t, res.Error) require.NotZero(t, res.StartedAt) require.NotZero(t, res.EndedAt) // check stream info require.Len(t, res.StreamResults, 4) for _, info := range res.StreamResults { require.NotZero(t, info.StartedAt) require.NotZero(t, info.EndedAt) switch info.Url { case urls[0][1]: require.Equal(t, livekit.StreamInfo_FINISHED.String(), info.Status.String()) require.Greater(t, float64(info.Duration)/1e9, 15.0) case urls[2][1]: require.Equal(t, livekit.StreamInfo_FINISHED.String(), info.Status.String()) require.Greater(t, float64(info.Duration)/1e9, 10.0) default: require.Equal(t, livekit.StreamInfo_FAILED.String(), info.Status.String()) } } } func (r *Runner) verifyStreams(t *testing.T, tc *testCase, p *config.PipelineConfig, urls ...string) { for _, url := range urls { info := verify(t, url, p, nil, types.EgressTypeStream, false, r.sourceFramerate, false) if tc != nil && tc.contentCheck != nil && info != nil { tc.contentCheck(t, url, info) } } } func (r *Runner) runWebsocketTest(t *testing.T, test *testCase) { filepath := path.Join(r.FilePrefix, test.rawFileName) wss := newTestWebsocketServer(filepath) s := httptest.NewServer(http.HandlerFunc(wss.handleWebsocket)) test.websocketUrl = "ws" + strings.TrimPrefix(s.URL, "http") defer func() { wss.close() s.Close() }() req := r.build(test) egressID := r.startEgress(t, req) p, err := config.GetValidatedPipelineConfig(r.ServiceConfig, req) require.NoError(t, err) time.Sleep(time.Second * 30) res := r.stopEgress(t, egressID) verify(t, filepath, p, res, types.EgressTypeWebsocket, r.Muting, r.sourceFramerate, false) } type websocketTestServer struct { path string file *os.File conn *websocket.Conn done chan struct{} } func newTestWebsocketServer(filepath string) *websocketTestServer { return &websocketTestServer{ path: filepath, done: make(chan struct{}), } } func (s *websocketTestServer) handleWebsocket(w http.ResponseWriter, r *http.Request) { var err error s.file, err = os.Create(s.path) if err != nil { logger.Errorw("could not create file", err) return } // accept ws connection upgrader := websocket.Upgrader{} s.conn, err = upgrader.Upgrade(w, r, nil) if err != nil { logger.Errorw("could not accept ws connection", err) return } go func() { defer func() { _ = s.file.Close() // close the connection only if it's not closed already if !websocket.IsUnexpectedCloseError(err) { _ = s.conn.Close() } }() for { select { case <-s.done: return default: mt, msg, err := s.conn.ReadMessage() if err != nil { if !websocket.IsUnexpectedCloseError(err) { logger.Errorw("unexpected ws close", err) } return } switch mt { case websocket.BinaryMessage: _, err = s.file.Write(msg) if err != nil { logger.Errorw("could not write to file", err) return } } } } }() } func (s *websocketTestServer) close() { close(s.done) } ================================================ FILE: test/test_content.go ================================================ // Copyright 2025 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "bufio" "bytes" "context" "errors" "fmt" "os" "os/exec" "path/filepath" "regexp" "strconv" "strings" "testing" "time" "github.com/stretchr/testify/require" ) //---------------------------------------------------------------------- // Utilities for checking AV sync for the given audio/video test sample // https://github.com/livekit/media-samples/avsync_minmotion_livekit* //---------------------------------------------------------------------- const ( testSampleSilenceLevel = -38 testSampleBeepLevel = -30.0 ) var ( rePTS = regexp.MustCompile(`pts_time:([0-9.]+)`) reYAVG = regexp.MustCompile(`lavfi\.signalstats\.YAVG[=:]\s*([0-9.]+)`) reRMS = regexp.MustCompile(`RMS_level[=:](-?[0-9.infINFNaN]+)`) ) func ffmpegVideoStats(videoPath, statsFile string) error { ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) defer cancel() cmd := exec.CommandContext(ctx, "ffmpeg", "-hide_banner", "-nostats", "-loglevel", "repeat+info", "-i", videoPath, "-map", "0:v:0", "-vf", fmt.Sprintf("crop=w=iw:h=8:x=0:y=0,signalstats,metadata=print:file=%s", statsFile), "-f", "null", "-") var outBuf, errBuf bytes.Buffer cmd.Stdout = &outBuf cmd.Stderr = &errBuf if err := cmd.Run(); err != nil { if errors.Is(ctx.Err(), context.DeadlineExceeded) { return fmt.Errorf("ffmpeg video stats timeout after 15s") } return fmt.Errorf("ffmpeg video stats extraction failed: %w\nstdout:\n%s\nstderr:\n%s", err, outBuf.String(), errBuf.String()) } return nil } func ffmpegAudioStats(audioPath, statsFile string) error { ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) defer cancel() cmd := exec.CommandContext(ctx, "ffmpeg", "-hide_banner", "-nostats", "-loglevel", "repeat+info", "-i", audioPath, "-af", fmt.Sprintf("pan=mono|c0=0.5*c0+0.5*c1,astats=metadata=1:reset=1,ametadata=print:key=lavfi.astats.Overall.RMS_level:file=%s", statsFile), "-f", "null", "-") var outBuf, errBuf bytes.Buffer cmd.Stdout = &outBuf cmd.Stderr = &errBuf if err := cmd.Run(); err != nil { if errors.Is(ctx.Err(), context.DeadlineExceeded) { return fmt.Errorf("ffmpeg audio stats timeout after 15s") } return fmt.Errorf("ffmpeg audio stats extraction failed: %w\nstdout:\n%s\nstderr:\n%s", err, outBuf.String(), errBuf.String()) } return nil } func ffmpegSilenceStats(audioPath string, noiseLevel int, minDuration float64) (*bytes.Buffer, error) { ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) defer cancel() cmd := exec.CommandContext(ctx, "ffmpeg", "-hide_banner", "-nostats", "-loglevel", "info", "-i", audioPath, "-af", "silencedetect=noise="+fmt.Sprintf("%d", noiseLevel)+"dB:d="+strconv.FormatFloat(minDuration, 'f', -1, 64), "-f", "null", "-") var stderr bytes.Buffer cmd.Stderr = &stderr if err := cmd.Run(); err != nil { if errors.Is(ctx.Err(), context.DeadlineExceeded) { return nil, fmt.Errorf("ffmpeg silence stats timeout after 15s") } return nil, fmt.Errorf("ffmpeg silence stats extraction failed: %w\nstderr:\n%s", err, stderr.String()) } return &stderr, nil } // extractFlashTimestamps runs ffmpeg + signalstats on the top stripe // and returns one timestamp per flash event (YAVG >= 130, spaced ≥0.2s). func extractFlashTimestamps(videoPath, outPath string) ([]time.Duration, error) { logFile := filepath.Join(outPath, "video_flash.log") err := ffmpegVideoStats(videoPath, logFile) if err != nil { return nil, err } file, err := os.Open(logFile) if err != nil { return nil, fmt.Errorf("ffmpeg video stats failed to open log file: %w", err) } defer file.Close() const flashThreshold = 130.0 const minGap = 200 * time.Millisecond var ( flashes []time.Duration lastFlash = -999 * time.Second curPTS time.Duration ) scanner := bufio.NewScanner(file) for scanner.Scan() { line := scanner.Text() if m := rePTS.FindStringSubmatch(line); len(m) == 2 { // PTS is logged as seconds (float); convert to duration if d, perr := parsePTSSecondsToDuration(m[1]); perr == nil { curPTS = d } continue } if m := reYAVG.FindStringSubmatch(line); len(m) == 2 { y, _ := strconv.ParseFloat(m[1], 64) if y >= flashThreshold && curPTS-lastFlash > minGap { flashes = append(flashes, curPTS) lastFlash = curPTS } } } return flashes, scanner.Err() } // extractBeepTimestamps runs ffmpeg + astats to find beeps. // A beep is when RMS_level > beepThreshold, debounced by 0.2s. func extractBeepTimestamps(audioPath string, beepThreshold float64, outPath string) ([]time.Duration, error) { logFile := filepath.Join(outPath, "audio_beep.log") err := ffmpegAudioStats(audioPath, logFile) if err != nil { return nil, err } file, err := os.Open(logFile) if err != nil { return nil, fmt.Errorf("ffmpeg audio stats failed to open log file: %w", err) } defer file.Close() const minGap = 200 * time.Millisecond var ( beeps []time.Duration last = -999 * time.Second curPTS time.Duration ) scanner := bufio.NewScanner(file) for scanner.Scan() { line := scanner.Text() if m := rePTS.FindStringSubmatch(line); len(m) == 2 { if d, perr := parsePTSSecondsToDuration(m[1]); perr == nil { curPTS = d } continue } if m := reRMS.FindStringSubmatch(line); len(m) == 2 { val := m[1] if strings.Contains(val, "inf") || strings.Contains(val, "nan") { continue // skip silence or invalid } lvl, _ := strconv.ParseFloat(val, 64) if lvl > beepThreshold && curPTS-last > minGap { beeps = append(beeps, curPTS) last = curPTS } } } return beeps, scanner.Err() } // silenceRange represents one silence segment in durations. type silenceRange struct { start time.Duration end time.Duration duration time.Duration } // detectSilence runs ffmpeg silencedetect and returns all silence ranges. func detectSilence(audioPath string, noiseLevel int, minDuration time.Duration) ([]silenceRange, error) { stderr, err := ffmpegSilenceStats(audioPath, noiseLevel, minDuration.Seconds()) if err != nil { return nil, err } var ranges []silenceRange var current silenceRange inSilence := false reStart := regexp.MustCompile(`silence_start:\s*([0-9.]+)`) reEnd := regexp.MustCompile(`silence_end:\s*([0-9.]+)\s*\|\s*silence_duration:\s*([0-9.]+)`) scanner := bufio.NewScanner(stderr) for scanner.Scan() { line := scanner.Text() if m := reStart.FindStringSubmatch(line); len(m) == 2 { if start, perr := strconv.ParseFloat(m[1], 64); perr == nil { current = silenceRange{start: secondsToDuration(start)} inSilence = true } } if m := reEnd.FindStringSubmatch(line); len(m) == 3 { if inSilence { end, _ := strconv.ParseFloat(m[1], 64) dur, _ := strconv.ParseFloat(m[2], 64) current.end = secondsToDuration(end) current.duration = secondsToDuration(dur) ranges = append(ranges, current) inSilence = false } } } return ranges, scanner.Err() } func secondsToDuration(f float64) time.Duration { return time.Duration(f * float64(time.Second)) } func parsePTSSecondsToDuration(s string) (time.Duration, error) { f, err := strconv.ParseFloat(strings.TrimSpace(s), 64) if err != nil { return 0, err } return time.Duration(f * float64(time.Second)), nil } func averageSpacing(ts []time.Duration) (time.Duration, error) { if len(ts) < 2 { return 0, fmt.Errorf("need at least 2 timestamps (got %d)", len(ts)) } var sum time.Duration var gaps int for i := 1; i < len(ts); i++ { d := ts[i] - ts[i-1] if d <= 0 { // skip non-positive gaps (duplicates or out-of-order anomalies) continue } sum += d gaps++ } if gaps == 0 { return 0, fmt.Errorf("no positive gaps to compute spacing") } return time.Duration(int64(sum) / int64(gaps)), nil } func requireDurationInDelta(t *testing.T, expected, actual, delta time.Duration, msgAndArgs ...interface{}) { require.InDelta(t, expected.Nanoseconds(), actual.Nanoseconds(), float64(delta.Nanoseconds()), msgAndArgs...) } ================================================ FILE: version/version.go ================================================ // Copyright 2023 LiveKit, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package version const ( Version = "1.12.0" TemplateVersion = "sha-594b3b1" )